summaryrefslogtreecommitdiff
path: root/deps/v8
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2019-11-08 15:39:11 +0100
committerMichaël Zasso <targos@protonmail.com>2019-11-08 15:46:25 +0100
commit6ca81ad72a3c6fdf16c683335be748f22aaa9a0d (patch)
tree33c8ee75f729aed76c2c0b89c63f9bf1b4dd66aa /deps/v8
parent1eee0b8bf8bba39b600fb16a9223e545e3bac2bc (diff)
downloadandroid-node-v8-6ca81ad72a3c6fdf16c683335be748f22aaa9a0d.tar.gz
android-node-v8-6ca81ad72a3c6fdf16c683335be748f22aaa9a0d.tar.bz2
android-node-v8-6ca81ad72a3c6fdf16c683335be748f22aaa9a0d.zip
deps: update V8 to 7.9.317.20
PR-URL: https://github.com/nodejs/node/pull/30020 Reviewed-By: Colin Ihrig <cjihrig@gmail.com> Reviewed-By: Jiawen Geng <technicalcute@gmail.com> Reviewed-By: Anna Henningsen <anna@addaleax.net> Reviewed-By: Matteo Collina <matteo.collina@gmail.com>
Diffstat (limited to 'deps/v8')
-rw-r--r--deps/v8/.gitignore2
-rw-r--r--deps/v8/AUTHORS2
-rw-r--r--deps/v8/BUILD.gn162
-rw-r--r--deps/v8/COMMON_OWNERS2
-rw-r--r--deps/v8/ChangeLog1615
-rw-r--r--deps/v8/DEPS47
-rw-r--r--deps/v8/OWNERS3
-rw-r--r--deps/v8/PRESUBMIT.py177
-rw-r--r--deps/v8/gni/v8.gni23
-rw-r--r--deps/v8/include/OWNERS1
-rw-r--r--deps/v8/include/js_protocol.pdl21
-rw-r--r--deps/v8/include/libplatform/libplatform.h11
-rw-r--r--deps/v8/include/v8-inspector.h9
-rw-r--r--deps/v8/include/v8-internal.h11
-rw-r--r--deps/v8/include/v8-platform.h18
-rw-r--r--deps/v8/include/v8-profiler.h45
-rw-r--r--deps/v8/include/v8-version.h6
-rw-r--r--deps/v8/include/v8.h743
-rw-r--r--deps/v8/include/v8config.h113
-rw-r--r--deps/v8/infra/mb/mb_config.pyl57
-rw-r--r--deps/v8/infra/testing/builders.pyl107
-rw-r--r--deps/v8/src/api/OWNERS2
-rw-r--r--deps/v8/src/api/api.cc606
-rw-r--r--deps/v8/src/api/api.h9
-rw-r--r--deps/v8/src/asmjs/OWNERS2
-rw-r--r--deps/v8/src/asmjs/asm-js.cc15
-rw-r--r--deps/v8/src/asmjs/asm-js.h2
-rw-r--r--deps/v8/src/asmjs/asm-parser.cc16
-rw-r--r--deps/v8/src/ast/ast-function-literal-id-reindexer.cc21
-rw-r--r--deps/v8/src/ast/ast-traversal-visitor.h8
-rw-r--r--deps/v8/src/ast/ast.cc6
-rw-r--r--deps/v8/src/ast/ast.h61
-rw-r--r--deps/v8/src/ast/modules.cc10
-rw-r--r--deps/v8/src/ast/prettyprinter.cc10
-rw-r--r--deps/v8/src/ast/prettyprinter.h4
-rw-r--r--deps/v8/src/ast/scopes.cc334
-rw-r--r--deps/v8/src/ast/scopes.h151
-rw-r--r--deps/v8/src/ast/source-range-ast-visitor.cc45
-rw-r--r--deps/v8/src/ast/source-range-ast-visitor.h3
-rw-r--r--deps/v8/src/ast/variables.h20
-rw-r--r--deps/v8/src/base/OWNERS2
-rw-r--r--deps/v8/src/base/adapters.h55
-rw-r--r--deps/v8/src/base/cpu.cc71
-rw-r--r--deps/v8/src/base/file-utils.cc26
-rw-r--r--deps/v8/src/base/file-utils.h6
-rw-r--r--deps/v8/src/base/free_deleter.h1
-rw-r--r--deps/v8/src/base/iterator.h20
-rw-r--r--deps/v8/src/base/macros.h22
-rw-r--r--deps/v8/src/base/optional.h17
-rw-r--r--deps/v8/src/base/platform/mutex.h1
-rw-r--r--deps/v8/src/base/platform/platform-openbsd.cc4
-rw-r--r--deps/v8/src/base/platform/platform-posix.cc1
-rw-r--r--deps/v8/src/base/platform/semaphore.cc49
-rw-r--r--deps/v8/src/base/platform/semaphore.h4
-rw-r--r--deps/v8/src/base/template-utils.h81
-rw-r--r--deps/v8/src/base/ubsan.cc50
-rw-r--r--deps/v8/src/builtins/accessors.cc21
-rw-r--r--deps/v8/src/builtins/accessors.h2
-rw-r--r--deps/v8/src/builtins/arm/builtins-arm.cc275
-rw-r--r--deps/v8/src/builtins/arm64/builtins-arm64.cc246
-rw-r--r--deps/v8/src/builtins/base.tq301
-rw-r--r--deps/v8/src/builtins/bigint.tq6
-rw-r--r--deps/v8/src/builtins/builtins-arguments-gen.cc251
-rw-r--r--deps/v8/src/builtins/builtins-arguments-gen.h34
-rw-r--r--deps/v8/src/builtins/builtins-array-gen.cc507
-rw-r--r--deps/v8/src/builtins/builtins-array-gen.h103
-rw-r--r--deps/v8/src/builtins/builtins-array.cc7
-rw-r--r--deps/v8/src/builtins/builtins-arraybuffer.cc35
-rw-r--r--deps/v8/src/builtins/builtins-async-function-gen.cc2
-rw-r--r--deps/v8/src/builtins/builtins-async-gen.cc104
-rw-r--r--deps/v8/src/builtins/builtins-async-gen.h58
-rw-r--r--deps/v8/src/builtins/builtins-async-generator-gen.cc324
-rw-r--r--deps/v8/src/builtins/builtins-async-iterator-gen.cc173
-rw-r--r--deps/v8/src/builtins/builtins-async-module.cc33
-rw-r--r--deps/v8/src/builtins/builtins-bigint.cc29
-rw-r--r--deps/v8/src/builtins/builtins-call-gen.cc6
-rw-r--r--deps/v8/src/builtins/builtins-collections-gen.cc715
-rw-r--r--deps/v8/src/builtins/builtins-collections-gen.h8
-rw-r--r--deps/v8/src/builtins/builtins-console-gen.cc8
-rw-r--r--deps/v8/src/builtins/builtins-console.cc3
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.cc221
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.h37
-rw-r--r--deps/v8/src/builtins/builtins-conversion-gen.cc118
-rw-r--r--deps/v8/src/builtins/builtins-date-gen.cc106
-rw-r--r--deps/v8/src/builtins/builtins-date.cc42
-rw-r--r--deps/v8/src/builtins/builtins-definitions.h36
-rw-r--r--deps/v8/src/builtins/builtins-function-gen.cc47
-rw-r--r--deps/v8/src/builtins/builtins-generator-gen.cc63
-rw-r--r--deps/v8/src/builtins/builtins-handler-gen.cc32
-rw-r--r--deps/v8/src/builtins/builtins-internal-gen.cc176
-rw-r--r--deps/v8/src/builtins/builtins-intl-gen.cc69
-rw-r--r--deps/v8/src/builtins/builtins-intl.cc33
-rw-r--r--deps/v8/src/builtins/builtins-iterator-gen.cc112
-rw-r--r--deps/v8/src/builtins/builtins-iterator-gen.h5
-rw-r--r--deps/v8/src/builtins/builtins-math-gen.cc36
-rw-r--r--deps/v8/src/builtins/builtins-math-gen.h2
-rw-r--r--deps/v8/src/builtins/builtins-microtask-queue-gen.cc62
-rw-r--r--deps/v8/src/builtins/builtins-number-gen.cc328
-rw-r--r--deps/v8/src/builtins/builtins-number.cc11
-rw-r--r--deps/v8/src/builtins/builtins-object-gen.cc532
-rw-r--r--deps/v8/src/builtins/builtins-promise-gen.cc298
-rw-r--r--deps/v8/src/builtins/builtins-promise-gen.h52
-rw-r--r--deps/v8/src/builtins/builtins-proxy-gen.cc30
-rw-r--r--deps/v8/src/builtins/builtins-proxy-gen.h7
-rw-r--r--deps/v8/src/builtins/builtins-reflect-gen.cc8
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.cc955
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.h57
-rw-r--r--deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc14
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.cc581
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.h61
-rw-r--r--deps/v8/src/builtins/builtins-string.cc11
-rw-r--r--deps/v8/src/builtins/builtins-typed-array-gen.cc148
-rw-r--r--deps/v8/src/builtins/builtins-typed-array-gen.h12
-rw-r--r--deps/v8/src/builtins/builtins-utils-inl.h9
-rw-r--r--deps/v8/src/builtins/builtins-utils.h16
-rw-r--r--deps/v8/src/builtins/builtins.cc8
-rw-r--r--deps/v8/src/builtins/frames.tq4
-rw-r--r--deps/v8/src/builtins/growable-fixed-array-gen.h2
-rw-r--r--deps/v8/src/builtins/ia32/builtins-ia32.cc213
-rw-r--r--deps/v8/src/builtins/internal-coverage.tq2
-rw-r--r--deps/v8/src/builtins/iterator.tq33
-rw-r--r--deps/v8/src/builtins/mips/builtins-mips.cc24
-rw-r--r--deps/v8/src/builtins/mips64/builtins-mips64.cc24
-rw-r--r--deps/v8/src/builtins/ppc/builtins-ppc.cc210
-rw-r--r--deps/v8/src/builtins/regexp-exec.tq45
-rw-r--r--deps/v8/src/builtins/regexp-match-all.tq258
-rw-r--r--deps/v8/src/builtins/regexp-replace.tq5
-rw-r--r--deps/v8/src/builtins/regexp-search.tq105
-rw-r--r--deps/v8/src/builtins/regexp-source.tq3
-rw-r--r--deps/v8/src/builtins/regexp-split.tq72
-rw-r--r--deps/v8/src/builtins/regexp-test.tq2
-rw-r--r--deps/v8/src/builtins/regexp.tq85
-rw-r--r--deps/v8/src/builtins/s390/builtins-s390.cc535
-rw-r--r--deps/v8/src/builtins/setup-builtins-internal.cc19
-rw-r--r--deps/v8/src/builtins/string-endswith.tq2
-rw-r--r--deps/v8/src/builtins/string-iterator.tq6
-rw-r--r--deps/v8/src/builtins/string-slice.tq3
-rw-r--r--deps/v8/src/builtins/string-startswith.tq5
-rw-r--r--deps/v8/src/builtins/string-substring.tq3
-rw-r--r--deps/v8/src/builtins/string.tq11
-rw-r--r--deps/v8/src/builtins/typed-array-createtypedarray.tq25
-rw-r--r--deps/v8/src/builtins/typed-array-slice.tq2
-rw-r--r--deps/v8/src/builtins/typed-array.tq9
-rw-r--r--deps/v8/src/builtins/x64/builtins-x64.cc279
-rw-r--r--deps/v8/src/codegen/OWNERS2
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm-inl.h3
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm.cc21
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm.h5
-rw-r--r--deps/v8/src/codegen/arm/macro-assembler-arm.cc113
-rw-r--r--deps/v8/src/codegen/arm/macro-assembler-arm.h8
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64-inl.h20
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64.cc24
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64.h10
-rw-r--r--deps/v8/src/codegen/arm64/constants-arm64.h4
-rw-r--r--deps/v8/src/codegen/arm64/instructions-arm64.cc7
-rw-r--r--deps/v8/src/codegen/arm64/instructions-arm64.h16
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h56
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.cc208
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.h25
-rw-r--r--deps/v8/src/codegen/arm64/register-arm64.h13
-rw-r--r--deps/v8/src/codegen/arm64/utils-arm64.cc17
-rw-r--r--deps/v8/src/codegen/arm64/utils-arm64.h3
-rw-r--r--deps/v8/src/codegen/assembler.cc6
-rw-r--r--deps/v8/src/codegen/assembler.h1
-rw-r--r--deps/v8/src/codegen/code-stub-assembler.cc2529
-rw-r--r--deps/v8/src/codegen/code-stub-assembler.h738
-rw-r--r--deps/v8/src/codegen/compilation-cache.cc24
-rw-r--r--deps/v8/src/codegen/compilation-cache.h19
-rw-r--r--deps/v8/src/codegen/compiler.cc48
-rw-r--r--deps/v8/src/codegen/constant-pool.cc18
-rw-r--r--deps/v8/src/codegen/constant-pool.h9
-rw-r--r--deps/v8/src/codegen/cpu-features.h31
-rw-r--r--deps/v8/src/codegen/external-reference.cc20
-rw-r--r--deps/v8/src/codegen/external-reference.h3
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32-inl.h14
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32.cc151
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32.h144
-rw-r--r--deps/v8/src/codegen/ia32/macro-assembler-ia32.cc116
-rw-r--r--deps/v8/src/codegen/ia32/macro-assembler-ia32.h44
-rw-r--r--deps/v8/src/codegen/interface-descriptors.cc5
-rw-r--r--deps/v8/src/codegen/interface-descriptors.h66
-rw-r--r--deps/v8/src/codegen/machine-type.h114
-rw-r--r--deps/v8/src/codegen/mips/assembler-mips-inl.h5
-rw-r--r--deps/v8/src/codegen/mips/assembler-mips.cc117
-rw-r--r--deps/v8/src/codegen/mips/assembler-mips.h15
-rw-r--r--deps/v8/src/codegen/mips/macro-assembler-mips.cc135
-rw-r--r--deps/v8/src/codegen/mips/macro-assembler-mips.h10
-rw-r--r--deps/v8/src/codegen/mips64/assembler-mips64-inl.h3
-rw-r--r--deps/v8/src/codegen/mips64/assembler-mips64.cc68
-rw-r--r--deps/v8/src/codegen/mips64/assembler-mips64.h4
-rw-r--r--deps/v8/src/codegen/mips64/macro-assembler-mips64.cc153
-rw-r--r--deps/v8/src/codegen/mips64/macro-assembler-mips64.h10
-rw-r--r--deps/v8/src/codegen/optimized-compilation-info.cc11
-rw-r--r--deps/v8/src/codegen/optimized-compilation-info.h8
-rw-r--r--deps/v8/src/codegen/pending-optimization-table.cc2
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc-inl.h3
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc.cc4
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc.h1
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.cc43
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.h10
-rw-r--r--deps/v8/src/codegen/reglist.h16
-rw-r--r--deps/v8/src/codegen/reloc-info.cc2
-rw-r--r--deps/v8/src/codegen/s390/assembler-s390-inl.h3
-rw-r--r--deps/v8/src/codegen/s390/assembler-s390.cc4
-rw-r--r--deps/v8/src/codegen/s390/assembler-s390.h1
-rw-r--r--deps/v8/src/codegen/s390/macro-assembler-s390.cc137
-rw-r--r--deps/v8/src/codegen/s390/macro-assembler-s390.h80
-rw-r--r--deps/v8/src/codegen/source-position-table.cc45
-rw-r--r--deps/v8/src/codegen/tnode.h374
-rw-r--r--deps/v8/src/codegen/turbo-assembler.h4
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64-inl.h4
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64.cc55
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64.h49
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.cc409
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.h42
-rw-r--r--deps/v8/src/codegen/x64/register-x64.h4
-rw-r--r--deps/v8/src/codegen/x64/sse-instr.h1
-rw-r--r--deps/v8/src/common/assert-scope.cc2
-rw-r--r--deps/v8/src/common/assert-scope.h17
-rw-r--r--deps/v8/src/common/globals.h22
-rw-r--r--deps/v8/src/common/message-template.h8
-rw-r--r--deps/v8/src/common/ptr-compr-inl.h24
-rw-r--r--deps/v8/src/common/ptr-compr.h1
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc3
-rw-r--r--deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc5
-rw-r--r--deps/v8/src/compiler/OWNERS3
-rw-r--r--deps/v8/src/compiler/access-builder.cc23
-rw-r--r--deps/v8/src/compiler/access-builder.h7
-rw-r--r--deps/v8/src/compiler/access-info.cc29
-rw-r--r--deps/v8/src/compiler/access-info.h4
-rw-r--r--deps/v8/src/compiler/backend/arm/code-generator-arm.cc81
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-codes-arm.h1
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc1
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc68
-rw-r--r--deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc204
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h9
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc10
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc220
-rw-r--r--deps/v8/src/compiler/backend/code-generator-impl.h8
-rw-r--r--deps/v8/src/compiler/backend/code-generator.cc2
-rw-r--r--deps/v8/src/compiler/backend/code-generator.h2
-rw-r--r--deps/v8/src/compiler/backend/frame-elider.cc2
-rw-r--r--deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc335
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h20
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc20
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc156
-rw-r--r--deps/v8/src/compiler/backend/instruction-scheduler.cc2
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector-impl.h25
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector.cc44
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector.h15
-rw-r--r--deps/v8/src/compiler/backend/instruction.cc15
-rw-r--r--deps/v8/src/compiler/backend/instruction.h35
-rw-r--r--deps/v8/src/compiler/backend/jump-threading.cc12
-rw-r--r--deps/v8/src/compiler/backend/jump-threading.h13
-rw-r--r--deps/v8/src/compiler/backend/mips/code-generator-mips.cc70
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-codes-mips.h1
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc1
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc6
-rw-r--r--deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc70
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h1
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc1
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc6
-rw-r--r--deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc18
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc10
-rw-r--r--deps/v8/src/compiler/backend/register-allocator-verifier.cc12
-rw-r--r--deps/v8/src/compiler/backend/register-allocator-verifier.h1
-rw-r--r--deps/v8/src/compiler/backend/register-allocator.cc344
-rw-r--r--deps/v8/src/compiler/backend/register-allocator.h55
-rw-r--r--deps/v8/src/compiler/backend/s390/code-generator-s390.cc5
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc87
-rw-r--r--deps/v8/src/compiler/backend/x64/code-generator-x64.cc343
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-codes-x64.h7
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc7
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc136
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc52
-rw-r--r--deps/v8/src/compiler/c-linkage.cc6
-rw-r--r--deps/v8/src/compiler/code-assembler.cc40
-rw-r--r--deps/v8/src/compiler/code-assembler.h390
-rw-r--r--deps/v8/src/compiler/compilation-dependencies.cc33
-rw-r--r--deps/v8/src/compiler/compilation-dependencies.h11
-rw-r--r--deps/v8/src/compiler/decompression-elimination.cc39
-rw-r--r--deps/v8/src/compiler/decompression-elimination.h5
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc184
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.cc5
-rw-r--r--deps/v8/src/compiler/frame-states.cc18
-rw-r--r--deps/v8/src/compiler/functional-list.h2
-rw-r--r--deps/v8/src/compiler/graph-assembler.cc10
-rw-r--r--deps/v8/src/compiler/graph-assembler.h2
-rw-r--r--deps/v8/src/compiler/graph-visualizer.cc9
-rw-r--r--deps/v8/src/compiler/heap-refs.h63
-rw-r--r--deps/v8/src/compiler/int64-lowering.h2
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc31
-rw-r--r--deps/v8/src/compiler/js-context-specialization.cc2
-rw-r--r--deps/v8/src/compiler/js-create-lowering.cc66
-rw-r--r--deps/v8/src/compiler/js-create-lowering.h1
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc21
-rw-r--r--deps/v8/src/compiler/js-heap-broker.cc502
-rw-r--r--deps/v8/src/compiler/js-heap-broker.h32
-rw-r--r--deps/v8/src/compiler/js-heap-copy-reducer.cc176
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.cc56
-rw-r--r--deps/v8/src/compiler/js-inlining.cc23
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc288
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.h15
-rw-r--r--deps/v8/src/compiler/js-operator.cc76
-rw-r--r--deps/v8/src/compiler/js-operator.h72
-rw-r--r--deps/v8/src/compiler/js-type-hint-lowering.cc26
-rw-r--r--deps/v8/src/compiler/js-type-hint-lowering.h7
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc14
-rw-r--r--deps/v8/src/compiler/machine-graph-verifier.cc13
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc4
-rw-r--r--deps/v8/src/compiler/machine-operator.cc10
-rw-r--r--deps/v8/src/compiler/machine-operator.h17
-rw-r--r--deps/v8/src/compiler/memory-lowering.cc551
-rw-r--r--deps/v8/src/compiler/memory-lowering.h136
-rw-r--r--deps/v8/src/compiler/memory-optimizer.cc546
-rw-r--r--deps/v8/src/compiler/memory-optimizer.h101
-rw-r--r--deps/v8/src/compiler/node-matchers.h14
-rw-r--r--deps/v8/src/compiler/node.h2
-rw-r--r--deps/v8/src/compiler/opcodes.h16
-rw-r--r--deps/v8/src/compiler/operator-properties.cc1
-rw-r--r--deps/v8/src/compiler/pipeline.cc347
-rw-r--r--deps/v8/src/compiler/pipeline.h2
-rw-r--r--deps/v8/src/compiler/processed-feedback.h31
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.cc5
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h14
-rw-r--r--deps/v8/src/compiler/representation-change.cc24
-rw-r--r--deps/v8/src/compiler/scheduler.cc2
-rw-r--r--deps/v8/src/compiler/select-lowering.cc38
-rw-r--r--deps/v8/src/compiler/select-lowering.h18
-rw-r--r--deps/v8/src/compiler/serializer-for-background-compilation.cc850
-rw-r--r--deps/v8/src/compiler/serializer-for-background-compilation.h7
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.cc64
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc42
-rw-r--r--deps/v8/src/compiler/simplified-operator-reducer.cc17
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc29
-rw-r--r--deps/v8/src/compiler/simplified-operator.h6
-rw-r--r--deps/v8/src/compiler/store-store-elimination.cc365
-rw-r--r--deps/v8/src/compiler/store-store-elimination.h176
-rw-r--r--deps/v8/src/compiler/typer.cc12
-rw-r--r--deps/v8/src/compiler/types.cc40
-rw-r--r--deps/v8/src/compiler/verifier.cc10
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc386
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h80
-rw-r--r--deps/v8/src/compiler/zone-stats.h2
-rw-r--r--deps/v8/src/d8/OWNERS2
-rw-r--r--deps/v8/src/d8/d8-platforms.cc17
-rw-r--r--deps/v8/src/d8/d8.cc264
-rw-r--r--deps/v8/src/d8/d8.h76
-rw-r--r--deps/v8/src/debug/debug-coverage.cc38
-rw-r--r--deps/v8/src/debug/debug-coverage.h1
-rw-r--r--deps/v8/src/debug/debug-evaluate.cc42
-rw-r--r--deps/v8/src/debug/debug-evaluate.h4
-rw-r--r--deps/v8/src/debug/debug-frames.cc6
-rw-r--r--deps/v8/src/debug/debug-frames.h2
-rw-r--r--deps/v8/src/debug/debug-interface.h11
-rw-r--r--deps/v8/src/debug/debug-scopes.cc267
-rw-r--r--deps/v8/src/debug/debug-scopes.h15
-rw-r--r--deps/v8/src/debug/debug-stack-trace-iterator.cc11
-rw-r--r--deps/v8/src/debug/debug-stack-trace-iterator.h2
-rw-r--r--deps/v8/src/debug/debug-type-profile.h1
-rw-r--r--deps/v8/src/debug/debug.cc17
-rw-r--r--deps/v8/src/debug/debug.h1
-rw-r--r--deps/v8/src/debug/interface-types.h2
-rw-r--r--deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc21
-rw-r--r--deps/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc18
-rw-r--r--deps/v8/src/deoptimizer/deoptimize-reason.h1
-rw-r--r--deps/v8/src/deoptimizer/deoptimizer.cc23
-rw-r--r--deps/v8/src/deoptimizer/deoptimizer.h12
-rw-r--r--deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc21
-rw-r--r--deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc46
-rw-r--r--deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc2
-rw-r--r--deps/v8/src/diagnostics/arm/disasm-arm.cc24
-rw-r--r--deps/v8/src/diagnostics/arm64/disasm-arm64.cc8
-rw-r--r--deps/v8/src/diagnostics/basic-block-profiler.h1
-rw-r--r--deps/v8/src/diagnostics/ia32/disasm-ia32.cc115
-rw-r--r--deps/v8/src/diagnostics/objects-debug.cc88
-rw-r--r--deps/v8/src/diagnostics/objects-printer.cc73
-rw-r--r--deps/v8/src/diagnostics/unwinding-info-win64.cc31
-rw-r--r--deps/v8/src/diagnostics/x64/disasm-x64.cc14
-rw-r--r--deps/v8/src/execution/arguments-inl.h6
-rw-r--r--deps/v8/src/execution/arguments.h20
-rw-r--r--deps/v8/src/execution/arm/simulator-arm.cc236
-rw-r--r--deps/v8/src/execution/arm64/simulator-arm64.cc130
-rw-r--r--deps/v8/src/execution/frames.cc24
-rw-r--r--deps/v8/src/execution/frames.h20
-rw-r--r--deps/v8/src/execution/isolate-inl.h55
-rw-r--r--deps/v8/src/execution/isolate.cc377
-rw-r--r--deps/v8/src/execution/isolate.h104
-rw-r--r--deps/v8/src/execution/messages.cc6
-rw-r--r--deps/v8/src/execution/protectors.cc23
-rw-r--r--deps/v8/src/execution/protectors.h79
-rw-r--r--deps/v8/src/execution/s390/frame-constants-s390.h16
-rw-r--r--deps/v8/src/execution/simulator.h16
-rw-r--r--deps/v8/src/execution/stack-guard.cc8
-rw-r--r--deps/v8/src/execution/x64/frame-constants-x64.h2
-rw-r--r--deps/v8/src/extensions/free-buffer-extension.cc5
-rw-r--r--deps/v8/src/extensions/gc-extension.cc147
-rw-r--r--deps/v8/src/extensions/gc-extension.h15
-rw-r--r--deps/v8/src/flags/flag-definitions.h103
-rw-r--r--deps/v8/src/handles/global-handles.cc20
-rw-r--r--deps/v8/src/handles/global-handles.h1
-rw-r--r--deps/v8/src/handles/handles.cc21
-rw-r--r--deps/v8/src/handles/handles.h21
-rw-r--r--deps/v8/src/heap/array-buffer-collector.cc25
-rw-r--r--deps/v8/src/heap/array-buffer-collector.h4
-rw-r--r--deps/v8/src/heap/array-buffer-tracker-inl.h105
-rw-r--r--deps/v8/src/heap/array-buffer-tracker.cc33
-rw-r--r--deps/v8/src/heap/array-buffer-tracker.h25
-rw-r--r--deps/v8/src/heap/basic-memory-chunk.h22
-rw-r--r--deps/v8/src/heap/concurrent-marking.cc49
-rw-r--r--deps/v8/src/heap/concurrent-marking.h6
-rw-r--r--deps/v8/src/heap/embedder-tracing.h7
-rw-r--r--deps/v8/src/heap/factory-inl.h27
-rw-r--r--deps/v8/src/heap/factory.cc349
-rw-r--r--deps/v8/src/heap/factory.h141
-rw-r--r--deps/v8/src/heap/heap-inl.h45
-rw-r--r--deps/v8/src/heap/heap-write-barrier-inl.h4
-rw-r--r--deps/v8/src/heap/heap.cc235
-rw-r--r--deps/v8/src/heap/heap.h92
-rw-r--r--deps/v8/src/heap/incremental-marking-job.cc8
-rw-r--r--deps/v8/src/heap/invalidated-slots-inl.h78
-rw-r--r--deps/v8/src/heap/invalidated-slots.cc49
-rw-r--r--deps/v8/src/heap/invalidated-slots.h17
-rw-r--r--deps/v8/src/heap/mark-compact-inl.h6
-rw-r--r--deps/v8/src/heap/mark-compact.cc86
-rw-r--r--deps/v8/src/heap/memory-measurement.cc80
-rw-r--r--deps/v8/src/heap/memory-measurement.h29
-rw-r--r--deps/v8/src/heap/memory-reducer.cc5
-rw-r--r--deps/v8/src/heap/object-stats.cc16
-rw-r--r--deps/v8/src/heap/object-stats.h1
-rw-r--r--deps/v8/src/heap/objects-visiting-inl.h6
-rw-r--r--deps/v8/src/heap/objects-visiting.h109
-rw-r--r--deps/v8/src/heap/remembered-set.h188
-rw-r--r--deps/v8/src/heap/scavenge-job.cc2
-rw-r--r--deps/v8/src/heap/scavenger.cc64
-rw-r--r--deps/v8/src/heap/setup-heap-internal.cc168
-rw-r--r--deps/v8/src/heap/slot-set.cc32
-rw-r--r--deps/v8/src/heap/slot-set.h133
-rw-r--r--deps/v8/src/heap/spaces.cc163
-rw-r--r--deps/v8/src/heap/spaces.h32
-rw-r--r--deps/v8/src/heap/store-buffer-inl.h26
-rw-r--r--deps/v8/src/heap/store-buffer.cc174
-rw-r--r--deps/v8/src/heap/store-buffer.h153
-rw-r--r--deps/v8/src/heap/sweeper.cc32
-rw-r--r--deps/v8/src/ic/accessor-assembler.cc1105
-rw-r--r--deps/v8/src/ic/accessor-assembler.h186
-rw-r--r--deps/v8/src/ic/binary-op-assembler.cc319
-rw-r--r--deps/v8/src/ic/binary-op-assembler.h60
-rw-r--r--deps/v8/src/ic/handler-configuration-inl.h36
-rw-r--r--deps/v8/src/ic/handler-configuration.cc2
-rw-r--r--deps/v8/src/ic/handler-configuration.h18
-rw-r--r--deps/v8/src/ic/ic-stats.cc3
-rw-r--r--deps/v8/src/ic/ic-stats.h1
-rw-r--r--deps/v8/src/ic/ic.cc126
-rw-r--r--deps/v8/src/ic/ic.h35
-rw-r--r--deps/v8/src/ic/keyed-store-generic.cc261
-rw-r--r--deps/v8/src/ic/keyed-store-generic.h3
-rw-r--r--deps/v8/src/ic/stub-cache.cc5
-rw-r--r--deps/v8/src/ic/stub-cache.h18
-rw-r--r--deps/v8/src/init/bootstrapper.cc317
-rw-r--r--deps/v8/src/init/heap-symbols.h67
-rw-r--r--deps/v8/src/init/icu_util.cc15
-rw-r--r--deps/v8/src/init/isolate-allocator.cc77
-rw-r--r--deps/v8/src/init/isolate-allocator.h4
-rw-r--r--deps/v8/src/init/setup-isolate-deserialize.cc1
-rw-r--r--deps/v8/src/init/startup-data-util.cc25
-rw-r--r--deps/v8/src/init/startup-data-util.h1
-rw-r--r--deps/v8/src/init/v8.cc6
-rw-r--r--deps/v8/src/inspector/custom-preview.h2
-rw-r--r--deps/v8/src/inspector/injected-script.cc10
-rw-r--r--deps/v8/src/inspector/injected-script.h1
-rw-r--r--deps/v8/src/inspector/inspected-context.cc2
-rw-r--r--deps/v8/src/inspector/inspected-context.h1
-rw-r--r--deps/v8/src/inspector/remote-object-id.h2
-rw-r--r--deps/v8/src/inspector/search-util.h2
-rw-r--r--deps/v8/src/inspector/string-16.cc7
-rw-r--r--deps/v8/src/inspector/string-16.h1
-rw-r--r--deps/v8/src/inspector/string-util.cc9
-rw-r--r--deps/v8/src/inspector/string-util.h17
-rw-r--r--deps/v8/src/inspector/v8-console-message.cc6
-rw-r--r--deps/v8/src/inspector/v8-console-message.h2
-rw-r--r--deps/v8/src/inspector/v8-console.cc2
-rw-r--r--deps/v8/src/inspector/v8-console.h4
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.cc88
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.h4
-rw-r--r--deps/v8/src/inspector/v8-debugger-script.cc22
-rw-r--r--deps/v8/src/inspector/v8-debugger-script.h3
-rw-r--r--deps/v8/src/inspector/v8-debugger.cc226
-rw-r--r--deps/v8/src/inspector/v8-debugger.h55
-rw-r--r--deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc7
-rw-r--r--deps/v8/src/inspector/v8-heap-profiler-agent-impl.h2
-rw-r--r--deps/v8/src/inspector/v8-inspector-impl.cc2
-rw-r--r--deps/v8/src/inspector/v8-inspector-impl.h1
-rw-r--r--deps/v8/src/inspector/v8-inspector-session-impl.h1
-rw-r--r--deps/v8/src/inspector/v8-profiler-agent-impl.cc28
-rw-r--r--deps/v8/src/inspector/v8-profiler-agent-impl.h1
-rw-r--r--deps/v8/src/inspector/v8-runtime-agent-impl.cc14
-rw-r--r--deps/v8/src/inspector/v8-runtime-agent-impl.h3
-rw-r--r--deps/v8/src/inspector/v8-schema-agent-impl.cc7
-rw-r--r--deps/v8/src/inspector/v8-schema-agent-impl.h2
-rw-r--r--deps/v8/src/inspector/v8-stack-trace-impl.cc44
-rw-r--r--deps/v8/src/inspector/value-mirror.cc83
-rw-r--r--deps/v8/src/inspector/wasm-translation.cc50
-rw-r--r--deps/v8/src/inspector/wasm-translation.h1
-rw-r--r--deps/v8/src/interpreter/bytecode-array-accessor.cc2
-rw-r--r--deps/v8/src/interpreter/bytecode-array-accessor.h2
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.cc13
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.h7
-rw-r--r--deps/v8/src/interpreter/bytecode-array-iterator.h2
-rw-r--r--deps/v8/src/interpreter/bytecode-array-random-iterator.h2
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc276
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.h11
-rw-r--r--deps/v8/src/interpreter/bytecodes.cc1
-rw-r--r--deps/v8/src/interpreter/bytecodes.h3
-rw-r--r--deps/v8/src/interpreter/constant-array-builder.cc2
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.cc592
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.h274
-rw-r--r--deps/v8/src/interpreter/interpreter-generator.cc601
-rw-r--r--deps/v8/src/interpreter/interpreter-generator.h4
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics-generator.cc211
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics-generator.h6
-rw-r--r--deps/v8/src/interpreter/interpreter.cc8
-rw-r--r--deps/v8/src/json/json-parser.cc50
-rw-r--r--deps/v8/src/json/json-stringifier.cc2
-rw-r--r--deps/v8/src/libplatform/default-foreground-task-runner.cc10
-rw-r--r--deps/v8/src/libplatform/default-foreground-task-runner.h6
-rw-r--r--deps/v8/src/libplatform/default-worker-threads-task-runner.cc2
-rw-r--r--deps/v8/src/libplatform/default-worker-threads-task-runner.h1
-rw-r--r--deps/v8/src/libplatform/delayed-task-queue.h1
-rw-r--r--deps/v8/src/libplatform/task-queue.h1
-rw-r--r--deps/v8/src/libplatform/tracing/DEPS4
-rw-r--r--deps/v8/src/libplatform/tracing/json-trace-event-listener.cc4
-rw-r--r--deps/v8/src/libplatform/tracing/trace-event-listener.cc2
-rw-r--r--deps/v8/src/libplatform/tracing/trace-object.cc11
-rw-r--r--deps/v8/src/libplatform/tracing/tracing-controller.cc6
-rw-r--r--deps/v8/src/libsampler/sampler.cc2
-rw-r--r--deps/v8/src/libsampler/sampler.h1
-rw-r--r--deps/v8/src/logging/counters-definitions.h2
-rw-r--r--deps/v8/src/logging/counters.cc2
-rw-r--r--deps/v8/src/logging/counters.h6
-rw-r--r--deps/v8/src/logging/log-utils.cc7
-rw-r--r--deps/v8/src/logging/log-utils.h2
-rw-r--r--deps/v8/src/logging/log.cc128
-rw-r--r--deps/v8/src/logging/log.h26
-rw-r--r--deps/v8/src/numbers/OWNERS2
-rw-r--r--deps/v8/src/numbers/math-random.cc5
-rw-r--r--deps/v8/src/objects/arguments.h13
-rw-r--r--deps/v8/src/objects/backing-store.cc648
-rw-r--r--deps/v8/src/objects/backing-store.h206
-rw-r--r--deps/v8/src/objects/bigint.cc5
-rw-r--r--deps/v8/src/objects/bigint.h12
-rw-r--r--deps/v8/src/objects/code.cc10
-rw-r--r--deps/v8/src/objects/code.h1
-rw-r--r--deps/v8/src/objects/contexts-inl.h41
-rw-r--r--deps/v8/src/objects/contexts.cc42
-rw-r--r--deps/v8/src/objects/contexts.h67
-rw-r--r--deps/v8/src/objects/data-handler.h2
-rw-r--r--deps/v8/src/objects/debug-objects-inl.h14
-rw-r--r--deps/v8/src/objects/debug-objects.h31
-rw-r--r--deps/v8/src/objects/descriptor-array-inl.h91
-rw-r--r--deps/v8/src/objects/descriptor-array.h57
-rw-r--r--deps/v8/src/objects/elements.cc493
-rw-r--r--deps/v8/src/objects/elements.h24
-rw-r--r--deps/v8/src/objects/feedback-cell-inl.h7
-rw-r--r--deps/v8/src/objects/feedback-cell.h15
-rw-r--r--deps/v8/src/objects/feedback-vector-inl.h4
-rw-r--r--deps/v8/src/objects/feedback-vector.cc38
-rw-r--r--deps/v8/src/objects/feedback-vector.h8
-rw-r--r--deps/v8/src/objects/field-index-inl.h4
-rw-r--r--deps/v8/src/objects/field-index.h7
-rw-r--r--deps/v8/src/objects/fixed-array-inl.h6
-rw-r--r--deps/v8/src/objects/fixed-array.h19
-rw-r--r--deps/v8/src/objects/function-kind.h9
-rw-r--r--deps/v8/src/objects/heap-number-inl.h4
-rw-r--r--deps/v8/src/objects/heap-number.h8
-rw-r--r--deps/v8/src/objects/instance-type.h381
-rw-r--r--deps/v8/src/objects/internal-index.h79
-rw-r--r--deps/v8/src/objects/intl-objects.cc185
-rw-r--r--deps/v8/src/objects/intl-objects.h17
-rw-r--r--deps/v8/src/objects/js-array-buffer-inl.h80
-rw-r--r--deps/v8/src/objects/js-array-buffer.cc211
-rw-r--r--deps/v8/src/objects/js-array-buffer.h113
-rw-r--r--deps/v8/src/objects/js-array.h4
-rw-r--r--deps/v8/src/objects/js-break-iterator.cc13
-rw-r--r--deps/v8/src/objects/js-break-iterator.h4
-rw-r--r--deps/v8/src/objects/js-collator.cc26
-rw-r--r--deps/v8/src/objects/js-collator.h4
-rw-r--r--deps/v8/src/objects/js-collection-iterator.h4
-rw-r--r--deps/v8/src/objects/js-date-time-format.cc216
-rw-r--r--deps/v8/src/objects/js-date-time-format.h7
-rw-r--r--deps/v8/src/objects/js-list-format.cc35
-rw-r--r--deps/v8/src/objects/js-list-format.h2
-rw-r--r--deps/v8/src/objects/js-locale.cc15
-rw-r--r--deps/v8/src/objects/js-locale.h9
-rw-r--r--deps/v8/src/objects/js-number-format-inl.h38
-rw-r--r--deps/v8/src/objects/js-number-format.cc386
-rw-r--r--deps/v8/src/objects/js-number-format.h28
-rw-r--r--deps/v8/src/objects/js-objects-inl.h17
-rw-r--r--deps/v8/src/objects/js-objects.cc190
-rw-r--r--deps/v8/src/objects/js-objects.h79
-rw-r--r--deps/v8/src/objects/js-plural-rules.cc11
-rw-r--r--deps/v8/src/objects/js-plural-rules.h2
-rw-r--r--deps/v8/src/objects/js-proxy.h2
-rw-r--r--deps/v8/src/objects/js-regexp-inl.h10
-rw-r--r--deps/v8/src/objects/js-regexp.cc118
-rw-r--r--deps/v8/src/objects/js-regexp.h68
-rw-r--r--deps/v8/src/objects/js-relative-time-format.cc3
-rw-r--r--deps/v8/src/objects/js-relative-time-format.h2
-rw-r--r--deps/v8/src/objects/js-segment-iterator.h2
-rw-r--r--deps/v8/src/objects/js-segmenter.h2
-rw-r--r--deps/v8/src/objects/js-weak-refs-inl.h29
-rw-r--r--deps/v8/src/objects/js-weak-refs.h57
-rw-r--r--deps/v8/src/objects/keys.cc47
-rw-r--r--deps/v8/src/objects/keys.h6
-rw-r--r--deps/v8/src/objects/layout-descriptor-inl.h4
-rw-r--r--deps/v8/src/objects/layout-descriptor.cc3
-rw-r--r--deps/v8/src/objects/literal-objects.cc18
-rw-r--r--deps/v8/src/objects/lookup-inl.h5
-rw-r--r--deps/v8/src/objects/lookup.cc133
-rw-r--r--deps/v8/src/objects/lookup.h2
-rw-r--r--deps/v8/src/objects/map-inl.h23
-rw-r--r--deps/v8/src/objects/map-updater.cc51
-rw-r--r--deps/v8/src/objects/map-updater.h20
-rw-r--r--deps/v8/src/objects/map.cc153
-rw-r--r--deps/v8/src/objects/map.h51
-rw-r--r--deps/v8/src/objects/module-inl.h41
-rw-r--r--deps/v8/src/objects/module.cc64
-rw-r--r--deps/v8/src/objects/module.h14
-rw-r--r--deps/v8/src/objects/name-inl.h5
-rw-r--r--deps/v8/src/objects/name.h19
-rw-r--r--deps/v8/src/objects/object-list-macros.h8
-rw-r--r--deps/v8/src/objects/objects-body-descriptors-inl.h38
-rw-r--r--deps/v8/src/objects/objects-definitions.h244
-rw-r--r--deps/v8/src/objects/objects-inl.h21
-rw-r--r--deps/v8/src/objects/objects.cc196
-rw-r--r--deps/v8/src/objects/objects.h71
-rw-r--r--deps/v8/src/objects/oddball.h5
-rw-r--r--deps/v8/src/objects/ordered-hash-table-inl.h8
-rw-r--r--deps/v8/src/objects/ordered-hash-table.cc26
-rw-r--r--deps/v8/src/objects/ordered-hash-table.h5
-rw-r--r--deps/v8/src/objects/osr-optimized-code-cache-inl.h25
-rw-r--r--deps/v8/src/objects/osr-optimized-code-cache.cc223
-rw-r--r--deps/v8/src/objects/osr-optimized-code-cache.h77
-rw-r--r--deps/v8/src/objects/primitive-heap-object-inl.h26
-rw-r--r--deps/v8/src/objects/primitive-heap-object.h33
-rw-r--r--deps/v8/src/objects/property-descriptor.cc2
-rw-r--r--deps/v8/src/objects/property-details.h14
-rw-r--r--deps/v8/src/objects/scope-info.cc89
-rw-r--r--deps/v8/src/objects/scope-info.h43
-rw-r--r--deps/v8/src/objects/script-inl.h30
-rw-r--r--deps/v8/src/objects/script.h18
-rw-r--r--deps/v8/src/objects/shared-function-info-inl.h48
-rw-r--r--deps/v8/src/objects/shared-function-info.h113
-rw-r--r--deps/v8/src/objects/slots-inl.h2
-rw-r--r--deps/v8/src/objects/source-text-module.cc532
-rw-r--r--deps/v8/src/objects/source-text-module.h91
-rw-r--r--deps/v8/src/objects/stack-frame-info.cc81
-rw-r--r--deps/v8/src/objects/stack-frame-info.h6
-rw-r--r--deps/v8/src/objects/string-inl.h8
-rw-r--r--deps/v8/src/objects/string.cc53
-rw-r--r--deps/v8/src/objects/string.h9
-rw-r--r--deps/v8/src/objects/struct-inl.h7
-rw-r--r--deps/v8/src/objects/struct.h17
-rw-r--r--deps/v8/src/objects/synthetic-module.cc32
-rw-r--r--deps/v8/src/objects/synthetic-module.h18
-rw-r--r--deps/v8/src/objects/transitions-inl.h8
-rw-r--r--deps/v8/src/objects/transitions.cc5
-rw-r--r--deps/v8/src/objects/transitions.h1
-rw-r--r--deps/v8/src/objects/value-serializer.cc101
-rw-r--r--deps/v8/src/objects/value-serializer.h1
-rw-r--r--deps/v8/src/parsing/expression-scope-reparenter.cc9
-rw-r--r--deps/v8/src/parsing/expression-scope.h8
-rw-r--r--deps/v8/src/parsing/parse-info.cc6
-rw-r--r--deps/v8/src/parsing/parse-info.h3
-rw-r--r--deps/v8/src/parsing/parser-base.h74
-rw-r--r--deps/v8/src/parsing/parser.cc102
-rw-r--r--deps/v8/src/parsing/parser.h18
-rw-r--r--deps/v8/src/parsing/preparse-data-impl.h10
-rw-r--r--deps/v8/src/parsing/preparse-data.cc73
-rw-r--r--deps/v8/src/parsing/preparse-data.h6
-rw-r--r--deps/v8/src/parsing/preparser.h40
-rw-r--r--deps/v8/src/parsing/scanner-character-streams.cc2
-rw-r--r--deps/v8/src/parsing/scanner-character-streams.h2
-rw-r--r--deps/v8/src/parsing/scanner.h13
-rw-r--r--deps/v8/src/parsing/token.cc3
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.cc3
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.h1
-rw-r--r--deps/v8/src/profiler/profile-generator-inl.h2
-rw-r--r--deps/v8/src/profiler/profile-generator.cc2
-rw-r--r--deps/v8/src/profiler/profiler-listener.cc9
-rw-r--r--deps/v8/src/profiler/sampling-heap-profiler.cc5
-rw-r--r--deps/v8/src/profiler/tracing-cpu-profiler.h2
-rw-r--r--deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc7
-rw-r--r--deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc8
-rw-r--r--deps/v8/src/regexp/regexp-bytecode-generator.cc22
-rw-r--r--deps/v8/src/regexp/regexp-bytecode-generator.h6
-rw-r--r--deps/v8/src/regexp/regexp-bytecode-peephole.cc1037
-rw-r--r--deps/v8/src/regexp/regexp-bytecode-peephole.h31
-rw-r--r--deps/v8/src/regexp/regexp-bytecodes.cc46
-rw-r--r--deps/v8/src/regexp/regexp-bytecodes.h155
-rw-r--r--deps/v8/src/regexp/regexp-compiler.cc21
-rw-r--r--deps/v8/src/regexp/regexp-interpreter.cc265
-rw-r--r--deps/v8/src/regexp/regexp-interpreter.h4
-rw-r--r--deps/v8/src/regexp/regexp-parser.cc3
-rw-r--r--deps/v8/src/regexp/regexp-stack.h6
-rw-r--r--deps/v8/src/regexp/regexp-utils.cc9
-rw-r--r--deps/v8/src/regexp/regexp.cc22
-rw-r--r--deps/v8/src/regexp/regexp.h5
-rw-r--r--deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc59
-rw-r--r--deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h27
-rw-r--r--deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc20
-rw-r--r--deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h4
-rw-r--r--deps/v8/src/roots/roots.h68
-rw-r--r--deps/v8/src/runtime/runtime-array.cc5
-rw-r--r--deps/v8/src/runtime/runtime-classes.cc7
-rw-r--r--deps/v8/src/runtime/runtime-compiler.cc10
-rw-r--r--deps/v8/src/runtime/runtime-debug.cc16
-rw-r--r--deps/v8/src/runtime/runtime-forin.cc5
-rw-r--r--deps/v8/src/runtime/runtime-internal.cc6
-rw-r--r--deps/v8/src/runtime/runtime-literals.cc49
-rw-r--r--deps/v8/src/runtime/runtime-object.cc15
-rw-r--r--deps/v8/src/runtime/runtime-regexp.cc27
-rw-r--r--deps/v8/src/runtime/runtime-test.cc98
-rw-r--r--deps/v8/src/runtime/runtime-typedarray.cc15
-rw-r--r--deps/v8/src/runtime/runtime-wasm.cc14
-rw-r--r--deps/v8/src/runtime/runtime.h5
-rw-r--r--deps/v8/src/sanitizer/OWNERS2
-rw-r--r--deps/v8/src/snapshot/deserializer.cc49
-rw-r--r--deps/v8/src/snapshot/deserializer.h5
-rw-r--r--deps/v8/src/snapshot/embedded/embedded-file-writer.h1
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-base.cc14
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc7
-rw-r--r--deps/v8/src/snapshot/natives-external.cc5
-rw-r--r--deps/v8/src/snapshot/object-deserializer.cc2
-rw-r--r--deps/v8/src/snapshot/partial-serializer.cc3
-rw-r--r--deps/v8/src/snapshot/serializer.cc40
-rw-r--r--deps/v8/src/snapshot/serializer.h2
-rw-r--r--deps/v8/src/snapshot/snapshot-source-sink.h18
-rw-r--r--deps/v8/src/snapshot/snapshot.h1
-rw-r--r--deps/v8/src/strings/string-hasher-inl.h65
-rw-r--r--deps/v8/src/strings/string-stream.cc3
-rw-r--r--deps/v8/src/strings/string-stream.h2
-rw-r--r--deps/v8/src/strings/uri.cc12
-rw-r--r--deps/v8/src/tasks/OWNERS2
-rw-r--r--deps/v8/src/tasks/task-utils.cc8
-rw-r--r--deps/v8/src/torque/ast.h27
-rw-r--r--deps/v8/src/torque/class-debug-reader-generator.cc54
-rw-r--r--deps/v8/src/torque/constants.h24
-rw-r--r--deps/v8/src/torque/csa-generator.cc47
-rw-r--r--deps/v8/src/torque/declaration-visitor.cc14
-rw-r--r--deps/v8/src/torque/declarations.h1
-rw-r--r--deps/v8/src/torque/earley-parser.h4
-rw-r--r--deps/v8/src/torque/global-context.cc2
-rw-r--r--deps/v8/src/torque/global-context.h1
-rw-r--r--deps/v8/src/torque/implementation-visitor.cc255
-rw-r--r--deps/v8/src/torque/implementation-visitor.h8
-rw-r--r--deps/v8/src/torque/instance-type-generator.cc376
-rw-r--r--deps/v8/src/torque/ls/json.h6
-rw-r--r--deps/v8/src/torque/ls/message-handler.cc17
-rw-r--r--deps/v8/src/torque/server-data.h5
-rw-r--r--deps/v8/src/torque/torque-compiler.cc3
-rw-r--r--deps/v8/src/torque/torque-parser.cc146
-rw-r--r--deps/v8/src/torque/type-oracle.h2
-rw-r--r--deps/v8/src/torque/type-visitor.cc11
-rw-r--r--deps/v8/src/torque/types.cc18
-rw-r--r--deps/v8/src/torque/types.h27
-rw-r--r--deps/v8/src/torque/utils.cc16
-rw-r--r--deps/v8/src/utils/allocation.cc4
-rw-r--r--deps/v8/src/utils/memcopy.cc13
-rw-r--r--deps/v8/src/utils/memcopy.h331
-rw-r--r--deps/v8/src/utils/utils-inl.h36
-rw-r--r--deps/v8/src/utils/utils.h13
-rw-r--r--deps/v8/src/utils/vector.h1
-rw-r--r--deps/v8/src/wasm/DEPS7
-rw-r--r--deps/v8/src/wasm/OWNERS2
-rw-r--r--deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h46
-rw-r--r--deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h40
-rw-r--r--deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h33
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.cc8
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.h3
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.cc163
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-register.h19
-rw-r--r--deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h57
-rw-r--r--deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h62
-rw-r--r--deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h82
-rw-r--r--deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h81
-rw-r--r--deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h31
-rw-r--r--deps/v8/src/wasm/c-api.cc19
-rw-r--r--deps/v8/src/wasm/decoder.h6
-rw-r--r--deps/v8/src/wasm/function-body-decoder-impl.h80
-rw-r--r--deps/v8/src/wasm/function-compiler.cc6
-rw-r--r--deps/v8/src/wasm/function-compiler.h2
-rw-r--r--deps/v8/src/wasm/graph-builder-interface.cc141
-rw-r--r--deps/v8/src/wasm/jump-table-assembler.cc178
-rw-r--r--deps/v8/src/wasm/jump-table-assembler.h108
-rw-r--r--deps/v8/src/wasm/memory-tracing.cc21
-rw-r--r--deps/v8/src/wasm/module-compiler.cc75
-rw-r--r--deps/v8/src/wasm/module-decoder.cc149
-rw-r--r--deps/v8/src/wasm/module-decoder.h2
-rw-r--r--deps/v8/src/wasm/module-instantiate.cc240
-rw-r--r--deps/v8/src/wasm/streaming-decoder.cc25
-rw-r--r--deps/v8/src/wasm/value-type.h2
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.cc573
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.h140
-rw-r--r--deps/v8/src/wasm/wasm-constants.h1
-rw-r--r--deps/v8/src/wasm/wasm-debug.cc27
-rw-r--r--deps/v8/src/wasm/wasm-engine.cc20
-rw-r--r--deps/v8/src/wasm/wasm-engine.h9
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.cc4
-rw-r--r--deps/v8/src/wasm/wasm-feature-flags.h2
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.cc233
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.h9
-rw-r--r--deps/v8/src/wasm/wasm-js.cc53
-rw-r--r--deps/v8/src/wasm/wasm-limits.h1
-rw-r--r--deps/v8/src/wasm/wasm-memory.cc2
-rw-r--r--deps/v8/src/wasm/wasm-memory.h289
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.cc8
-rw-r--r--deps/v8/src/wasm/wasm-module.cc70
-rw-r--r--deps/v8/src/wasm/wasm-module.h34
-rw-r--r--deps/v8/src/wasm/wasm-objects-inl.h16
-rw-r--r--deps/v8/src/wasm/wasm-objects.cc497
-rw-r--r--deps/v8/src/wasm/wasm-objects.h102
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.cc23
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.h29
-rw-r--r--deps/v8/src/wasm/wasm-serialization.cc27
-rw-r--r--deps/v8/src/wasm/wasm-text.cc16
-rw-r--r--deps/v8/src/zone/OWNERS2
-rw-r--r--deps/v8/test/benchmarks/benchmarks.status8
-rw-r--r--deps/v8/test/cctest/BUILD.gn3
-rw-r--r--deps/v8/test/cctest/DEPS5
-rw-r--r--deps/v8/test/cctest/cctest.cc6
-rw-r--r--deps/v8/test/cctest/cctest.status14
-rw-r--r--deps/v8/test/cctest/compiler/serializer-tester.cc8
-rw-r--r--deps/v8/test/cctest/compiler/serializer-tester.h2
-rw-r--r--deps/v8/test/cctest/compiler/test-code-assembler.cc124
-rw-r--r--deps/v8/test/cctest/compiler/test-gap-resolver.cc8
-rw-r--r--deps/v8/test/cctest/compiler/test-jump-threading.cc135
-rw-r--r--deps/v8/test/cctest/compiler/test-loop-analysis.cc68
-rw-r--r--deps/v8/test/cctest/compiler/test-multiple-return.cc86
-rw-r--r--deps/v8/test/cctest/compiler/test-run-load-store.cc17
-rw-r--r--deps/v8/test/cctest/compiler/test-run-machops.cc24
-rw-r--r--deps/v8/test/cctest/compiler/test-run-native-calls.cc90
-rw-r--r--deps/v8/test/cctest/heap/heap-tester.h2
-rw-r--r--deps/v8/test/cctest/heap/heap-utils.cc14
-rw-r--r--deps/v8/test/cctest/heap/test-array-buffer-tracker.cc4
-rw-r--r--deps/v8/test/cctest/heap/test-compaction.cc108
-rw-r--r--deps/v8/test/cctest/heap/test-embedder-tracing.cc76
-rw-r--r--deps/v8/test/cctest/heap/test-heap.cc386
-rw-r--r--deps/v8/test/cctest/heap/test-invalidated-slots.cc27
-rw-r--r--deps/v8/test/cctest/heap/test-page-promotion.cc15
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc172
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h33
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden10
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden63
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncModules.golden349
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden12
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden155
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/DestructuringAssignment.golden42
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden103
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden48
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden104
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden26
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden117
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorDeclaration.golden307
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFields.golden255
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodDeclaration.golden162
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PublicClassFields.golden171
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StaticClassFields.golden217
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden290
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodDeclaration.golden231
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden12
-rw-r--r--deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc68
-rw-r--r--deps/v8/test/cctest/interpreter/test-bytecode-generator.cc186
-rw-r--r--deps/v8/test/cctest/interpreter/test-interpreter.cc49
-rw-r--r--deps/v8/test/cctest/libplatform/DEPS2
-rw-r--r--deps/v8/test/cctest/libplatform/test-tracing.cc22
-rw-r--r--deps/v8/test/cctest/manually-externalized-buffer.h34
-rw-r--r--deps/v8/test/cctest/test-accessor-assembler.cc1
-rw-r--r--deps/v8/test/cctest/test-api-accessors.cc118
-rw-r--r--deps/v8/test/cctest/test-api-array-buffer.cc158
-rw-r--r--deps/v8/test/cctest/test-api-interceptors.cc46
-rw-r--r--deps/v8/test/cctest/test-api.cc578
-rw-r--r--deps/v8/test/cctest/test-assembler-arm.cc8
-rw-r--r--deps/v8/test/cctest/test-assembler-arm64.cc24
-rw-r--r--deps/v8/test/cctest/test-assembler-mips.cc79
-rw-r--r--deps/v8/test/cctest/test-assembler-mips64.cc79
-rw-r--r--deps/v8/test/cctest/test-backing-store.cc85
-rw-r--r--deps/v8/test/cctest/test-code-stub-assembler.cc201
-rw-r--r--deps/v8/test/cctest/test-compiler.cc2
-rw-r--r--deps/v8/test/cctest/test-cpu-profiler.cc4
-rw-r--r--deps/v8/test/cctest/test-debug-helper.cc95
-rw-r--r--deps/v8/test/cctest/test-debug.cc10
-rw-r--r--deps/v8/test/cctest/test-disasm-arm.cc6
-rw-r--r--deps/v8/test/cctest/test-disasm-arm64.cc2
-rw-r--r--deps/v8/test/cctest/test-disasm-ia32.cc61
-rw-r--r--deps/v8/test/cctest/test-disasm-x64.cc2
-rw-r--r--deps/v8/test/cctest/test-field-type-tracking.cc363
-rw-r--r--deps/v8/test/cctest/test-flags.cc6
-rw-r--r--deps/v8/test/cctest/test-global-handles.cc2
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc28
-rw-r--r--deps/v8/test/cctest/test-inobject-slack-tracking.cc2
-rw-r--r--deps/v8/test/cctest/test-lockers.cc2
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-x64.cc38
-rw-r--r--deps/v8/test/cctest/test-modules.cc819
-rw-r--r--deps/v8/test/cctest/test-orderedhashtable.cc121
-rw-r--r--deps/v8/test/cctest/test-parsing.cc87
-rw-r--r--deps/v8/test/cctest/test-poison-disasm-arm.cc44
-rw-r--r--deps/v8/test/cctest/test-poison-disasm-arm64.cc78
-rw-r--r--deps/v8/test/cctest/test-profile-generator.cc15
-rw-r--r--deps/v8/test/cctest/test-regexp.cc575
-rw-r--r--deps/v8/test/cctest/test-roots.cc2
-rw-r--r--deps/v8/test/cctest/test-serialize.cc55
-rw-r--r--deps/v8/test/cctest/test-smi-lexicographic-compare.cc10
-rw-r--r--deps/v8/test/cctest/test-strings.cc63
-rw-r--r--deps/v8/test/cctest/test-threads.cc2
-rw-r--r--deps/v8/test/cctest/test-trace-event.cc6
-rw-r--r--deps/v8/test/cctest/test-typedarrays.cc24
-rw-r--r--deps/v8/test/cctest/test-unboxed-doubles.cc39
-rw-r--r--deps/v8/test/cctest/torque/test-torque.cc1
-rw-r--r--deps/v8/test/cctest/wasm/OWNERS2
-rw-r--r--deps/v8/test/cctest/wasm/test-grow-memory.cc131
-rw-r--r--deps/v8/test/cctest/wasm/test-jump-table-assembler.cc24
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-64.cc4
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-bulk-memory.cc104
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc2
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-module.cc162
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-simd.cc416
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm.cc31
-rw-r--r--deps/v8/test/cctest/wasm/test-streaming-compilation.cc40
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc182
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-import-wrapper-cache.cc4
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc42
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-serialization.cc30
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc116
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.cc34
-rw-r--r--deps/v8/test/common/assembler-tester.h4
-rw-r--r--deps/v8/test/common/wasm/wasm-macro-gen.h28
-rw-r--r--deps/v8/test/debugger/debug/debug-evaluate-shadowed-context-3.js39
-rw-r--r--deps/v8/test/debugger/debug/es6/generators-relocation.js3
-rw-r--r--deps/v8/test/debugger/debugger.status9
-rw-r--r--deps/v8/test/debugger/regress/regress-9482.js32
-rw-r--r--deps/v8/test/fuzzer/fuzzer-support.cc2
-rw-r--r--deps/v8/test/fuzzer/fuzzer-support.h2
-rw-r--r--deps/v8/test/fuzzer/wasm-compile.cc12
-rw-r--r--deps/v8/test/fuzzer/wasm-fuzzer-common.h1
-rw-r--r--deps/v8/test/inspector/cpu-profiler/console-profile-wasm.js6
-rw-r--r--deps/v8/test/inspector/debugger/class-fields-scopes-expected.txt22
-rw-r--r--deps/v8/test/inspector/debugger/pause-on-async-call-expected.txt70
-rw-r--r--deps/v8/test/inspector/debugger/pause-on-async-call-set-timeout.js24
-rw-r--r--deps/v8/test/inspector/debugger/pause-on-async-call.js46
-rw-r--r--deps/v8/test/inspector/debugger/step-into-break-on-async-call-expected.txt6
-rw-r--r--deps/v8/test/inspector/debugger/step-into-break-on-async-call.js30
-rw-r--r--deps/v8/test/inspector/debugger/step-into-external-async-task-expected.txt1
-rw-r--r--deps/v8/test/inspector/debugger/step-into-external-async-task-same-context-expected.txt1
-rw-r--r--deps/v8/test/inspector/debugger/step-into-external-async-task-same-context.js7
-rw-r--r--deps/v8/test/inspector/debugger/step-into-external-async-task.js7
-rw-r--r--deps/v8/test/inspector/debugger/wasm-anyref-global.js2
-rw-r--r--deps/v8/test/inspector/debugger/wasm-clone-module-expected.txt6
-rw-r--r--deps/v8/test/inspector/debugger/wasm-get-breakable-locations-expected.txt48
-rw-r--r--deps/v8/test/inspector/debugger/wasm-get-breakable-locations.js4
-rw-r--r--deps/v8/test/inspector/debugger/wasm-reset-context-group.js2
-rw-r--r--deps/v8/test/inspector/debugger/wasm-scope-info.js12
-rw-r--r--deps/v8/test/inspector/debugger/wasm-scripts-expected.txt68
-rw-r--r--deps/v8/test/inspector/debugger/wasm-scripts.js160
-rw-r--r--deps/v8/test/inspector/debugger/wasm-set-breakpoint-expected.txt16
-rw-r--r--deps/v8/test/inspector/debugger/wasm-set-breakpoint.js6
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stepping-expected.txt58
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stepping-with-source-map-expected.txt54
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stepping-with-source-map.js6
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stepping.js146
-rw-r--r--deps/v8/test/inspector/inspector-test.cc8
-rw-r--r--deps/v8/test/inspector/isolate-data.cc5
-rw-r--r--deps/v8/test/inspector/isolate-data.h2
-rw-r--r--deps/v8/test/inspector/runtime/evaluate-with-disable-breaks-expected.txt72
-rw-r--r--deps/v8/test/inspector/runtime/evaluate-with-disable-breaks.js60
-rw-r--r--deps/v8/test/inspector/task-runner.h1
-rw-r--r--deps/v8/test/intl/assert.js2
-rw-r--r--deps/v8/test/intl/bigint/tolocalestring.js2
-rw-r--r--deps/v8/test/intl/date-format/check-calendar.js20
-rw-r--r--deps/v8/test/intl/date-format/check-numbering-system.js24
-rw-r--r--deps/v8/test/intl/date-format/constructor-calendar-numberingSytem-order.js35
-rw-r--r--deps/v8/test/intl/date-format/constructor-date-style-order.js2
-rw-r--r--deps/v8/test/intl/date-format/constructor-date-time-style-order.js2
-rw-r--r--deps/v8/test/intl/date-format/constructor-date-time-style.js2
-rw-r--r--deps/v8/test/intl/date-format/constructor-no-style-order.js2
-rw-r--r--deps/v8/test/intl/date-format/constructor-time-style-order.js2
-rw-r--r--deps/v8/test/intl/date-format/en-format-range-to-parts.js2
-rw-r--r--deps/v8/test/intl/date-format/format-range-to-parts.js2
-rw-r--r--deps/v8/test/intl/date-format/format-range.js2
-rw-r--r--deps/v8/test/intl/date-format/format-with-extensions.js12
-rw-r--r--deps/v8/test/intl/date-format/property-override-date-style.js4
-rw-r--r--deps/v8/test/intl/date-format/property-override-date-time-style.js2
-rw-r--r--deps/v8/test/intl/date-format/property-override-time-style.js2
-rw-r--r--deps/v8/test/intl/date-format/related-year.js14
-rw-r--r--deps/v8/test/intl/general/case-mapping.js3
-rw-r--r--deps/v8/test/intl/general/grandfathered_tags_without_preferred_value.js10
-rw-r--r--deps/v8/test/intl/general/language_tags_with_preferred_values.js2
-rw-r--r--deps/v8/test/intl/general/supported-locales-of.js7
-rw-r--r--deps/v8/test/intl/intl.status9
-rw-r--r--deps/v8/test/intl/list-format/resolved-options.js4
-rw-r--r--deps/v8/test/intl/number-format/check-numbering-system.js23
-rw-r--r--deps/v8/test/intl/number-format/constructor-numberingSytem-order.js30
-rw-r--r--deps/v8/test/intl/number-format/property-override.js80
-rw-r--r--deps/v8/test/intl/number-format/unified/compact-display.js2
-rw-r--r--deps/v8/test/intl/number-format/unified/constructor-order.js1
-rw-r--r--deps/v8/test/intl/number-format/unified/currency-display.js2
-rw-r--r--deps/v8/test/intl/number-format/unified/currency-sign.js2
-rw-r--r--deps/v8/test/intl/number-format/unified/no-compact-display.js2
-rw-r--r--deps/v8/test/intl/number-format/unified/notation-engineering-formatToParts.js2
-rw-r--r--deps/v8/test/intl/number-format/unified/notation-scientific-formatToParts.js2
-rw-r--r--deps/v8/test/intl/number-format/unified/notation.js2
-rw-r--r--deps/v8/test/intl/number-format/unified/percent.js2
-rw-r--r--deps/v8/test/intl/number-format/unified/sign-display.js2
-rw-r--r--deps/v8/test/intl/number-format/unified/style-unit.js2
-rw-r--r--deps/v8/test/intl/number-format/unified/unit-display.js2
-rw-r--r--deps/v8/test/intl/regress-1003748.js18
-rw-r--r--deps/v8/test/intl/regress-1012579.js6
-rw-r--r--deps/v8/test/intl/regress-8725514.js2
-rw-r--r--deps/v8/test/intl/regress-903566.js4
-rw-r--r--deps/v8/test/intl/regress-9356.js14
-rw-r--r--deps/v8/test/intl/regress-9408.js1
-rw-r--r--deps/v8/test/intl/regress-9464.js51
-rw-r--r--deps/v8/test/intl/regress-9475.js1
-rw-r--r--deps/v8/test/intl/regress-9513.js1
-rw-r--r--deps/v8/test/intl/regress-9731.js15
-rw-r--r--deps/v8/test/intl/regress-9747.js50
-rw-r--r--deps/v8/test/intl/regress-9786.js21
-rw-r--r--deps/v8/test/intl/regress-9787.js10
-rw-r--r--deps/v8/test/intl/regress-9788.js29
-rw-r--r--deps/v8/test/intl/regress-9812.js65
-rw-r--r--deps/v8/test/intl/regress-9849.js15
-rw-r--r--deps/v8/test/intl/regress-992694.js12
-rw-r--r--deps/v8/test/intl/regress-997401.js7
-rw-r--r--deps/v8/test/intl/relative-time-format/check-numbering-system.js85
-rw-r--r--deps/v8/test/intl/relative-time-format/resolved-options.js4
-rw-r--r--deps/v8/test/js-perf-test/BytecodeHandlers/LdaGlobal.js60
-rw-r--r--deps/v8/test/js-perf-test/JSTests3.json11
-rw-r--r--deps/v8/test/message/fail/modules-import-top-level-await-fail-1.mjs9
-rw-r--r--deps/v8/test/message/fail/modules-import-top-level-await-fail-1.out3
-rw-r--r--deps/v8/test/message/fail/modules-import-top-level-await-fail-2.mjs9
-rw-r--r--deps/v8/test/message/fail/modules-import-top-level-await-fail-2.out3
-rw-r--r--deps/v8/test/message/fail/modules-skip-1-top-level-await-fail.mjs7
-rw-r--r--deps/v8/test/message/fail/modules-skip-2-top-level-await-fail.mjs7
-rw-r--r--deps/v8/test/message/fail/modules-skip-3-top-level-await-fail.mjs5
-rw-r--r--deps/v8/test/message/message.status2
-rw-r--r--deps/v8/test/message/wasm-trace-memory-interpreted.js1
-rw-r--r--deps/v8/test/message/wasm-trace-memory-interpreted.out2
-rw-r--r--deps/v8/test/message/wasm-trace-memory-liftoff.js4
-rw-r--r--deps/v8/test/message/wasm-trace-memory-liftoff.out2
-rw-r--r--deps/v8/test/message/wasm-trace-memory.js21
-rw-r--r--deps/v8/test/message/wasm-trace-memory.out2
-rw-r--r--deps/v8/test/mjsunit/asm/regress-1013920.js17
-rw-r--r--deps/v8/test/mjsunit/bit-not.js13
-rw-r--r--deps/v8/test/mjsunit/code-coverage-block-async.js122
-rw-r--r--deps/v8/test/mjsunit/code-coverage-block.js44
-rw-r--r--deps/v8/test/mjsunit/compiler/concurrent-inlining-1.js26
-rw-r--r--deps/v8/test/mjsunit/compiler/concurrent-inlining-2.js26
-rw-r--r--deps/v8/test/mjsunit/compiler/promise-constructor.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-9017.js2
-rw-r--r--deps/v8/test/mjsunit/d8/d8-performance-measure-memory.js47
-rw-r--r--deps/v8/test/mjsunit/es6/array-iterator-detached.js4
-rw-r--r--deps/v8/test/mjsunit/es6/block-conflicts-sloppy.js2
-rw-r--r--deps/v8/test/mjsunit/es6/block-const-assign.js6
-rw-r--r--deps/v8/test/mjsunit/es6/iterator-eager-deopt.js69
-rw-r--r--deps/v8/test/mjsunit/es6/iterator-invalid-receiver-opt.js51
-rw-r--r--deps/v8/test/mjsunit/es6/iterator-lazy-deopt.js71
-rw-r--r--deps/v8/test/mjsunit/es6/large-classes-properties.js2
-rw-r--r--deps/v8/test/mjsunit/global-proxy-globalThis.js91
-rw-r--r--deps/v8/test/mjsunit/global-proxy-this.js91
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-15-top-level-await.mjs58
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-15.mjs3
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-rqstd-order-top-level-await-1.mjs12
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-rqstd-order-top-level-await-2.mjs15
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-rqstd-order-top-level-await-3.mjs13
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-rqstd-order-top-level-await-4.mjs17
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-rqstd-order-top-level-await-5.mjs16
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-rqstd-order-top-level-await-6.mjs16
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-rqstd-order-top-level-await-7.mjs12
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-rqstd-order-top-level-await-8.mjs12
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-top-level-await-1.mjs14
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-top-level-await-2.mjs10
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-top-level-await-3.mjs14
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-top-level-await-4.mjs9
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-top-level-await-5.mjs10
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-top-level-await-6.mjs10
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-top-level-await-7.mjs9
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-top-level-await-8.mjs9
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-top-level-await-cycle.mjs16
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-top-level-await-exception-1.mjs18
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-top-level-await-exception-2.mjs16
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-top-level-await-exception-3.mjs16
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-1-rqstd-order-indirect-top-level-await.mjs6
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-1-rqstd-order-indirect-unreached-top-level-await.mjs8
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-1-rqstd-order-top-level-await.mjs12
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-1-rqstd-order-unreached-top-level-await.mjs14
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-1-rqstd-order.mjs9
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-1-top-level-await-cycle.mjs8
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-1-top-level-await.mjs11
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-2-rqstd-order-indirect-top-level-await.mjs6
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-2-rqstd-order-top-level-await.mjs8
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-2-rqstd-order.mjs5
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-2-top-level-await-cycle.mjs7
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-2-top-level-await.mjs15
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-3-rqstd-order-indirect-top-level-await.mjs6
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-3-rqstd-order-top-level-await.mjs8
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-3-rqstd-order.mjs5
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-3-top-level-await-cycle.mjs7
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-3-top-level-await.mjs12
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-4-rqstd-order-indirect-top-level-await.mjs6
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-4-rqstd-order-top-level-await.mjs8
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-4-rqstd-order.mjs5
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-4-top-level-await-cycle.mjs7
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-4-top-level-await.mjs7
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-5-top-level-await.mjs5
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-6-top-level-await.mjs10
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-7-top-level-await.mjs14
-rw-r--r--deps/v8/test/mjsunit/harmony/private-accessors.js24
-rw-r--r--deps/v8/test/mjsunit/harmony/private-fields.js9
-rw-r--r--deps/v8/test/mjsunit/harmony/private-methods.js9
-rw-r--r--deps/v8/test/mjsunit/harmony/private-name-scopes.js137
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-match-indices.js105
-rw-r--r--deps/v8/test/mjsunit/harmony/sharedarraybuffer-stress.js4
-rw-r--r--deps/v8/test/mjsunit/harmony/static-private-methods.js248
-rw-r--r--deps/v8/test/mjsunit/md5.js8
-rw-r--r--deps/v8/test/mjsunit/messages.js4
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status82
-rw-r--r--deps/v8/test/mjsunit/mod.js1
-rw-r--r--deps/v8/test/mjsunit/packed-elements.js11
-rw-r--r--deps/v8/test/mjsunit/readonly.js2
-rw-r--r--deps/v8/test/mjsunit/regexp-tier-up-multiple.js101
-rw-r--r--deps/v8/test/mjsunit/regexp-tier-up.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1002827.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1003730.js25
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1003919.js20
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1004912.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1005400.js23
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1006629.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1006640.js20
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1006670.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1011980.js22
-rw-r--r--deps/v8/test/mjsunit/regress/regress-752764.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-779407.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-9165.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-9832.js35
-rw-r--r--deps/v8/test/mjsunit/regress/regress-9894.js48
-rw-r--r--deps/v8/test/mjsunit/regress/regress-996161.js43
-rw-r--r--deps/v8/test/mjsunit/regress/regress-997485.js127
-rw-r--r--deps/v8/test/mjsunit/regress/regress-997989.js27
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1002628.js22
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1003403.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1003732.js25
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1004037.js23
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1004061.js55
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1006592.js21
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1008632.js24
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1012301-1.js27
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1012301.js23
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1015567.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1015945.js20
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-729597.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-808192.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-941743.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-9758.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-9825.mjs11
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-02256.js14
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1014798.js20
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1016515.js20
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-648079.js9
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-689450.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-702460.js6
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-7049.js18
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-7353.js6
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-7366.js14
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-739768.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-7422.js10
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-7508.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-752423.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-7565.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-7579.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-763697.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-771243.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-772332.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-7785.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-791810.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-793551.js4
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-801785.js4
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-802244.js6
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-8059.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-808848.js4
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-808980.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-817380.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-834619.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-834624.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-8505.js4
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-854011.js18
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-854050.js14
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-864509.js8
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-894307.js4
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-910824.js16
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-917412.js6
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-918284.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-918917.js6
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-919308.js12
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-919533.js4
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-922432.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-922670.js4
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-922933.js26
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-924843.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-935138.js4
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-952342.js6
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-957405.js6
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-968078.js6
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-9759.js26
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-crbug-1006631.js7
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-crbug-1007608.js2
-rw-r--r--deps/v8/test/mjsunit/string-replace-gc.js33
-rw-r--r--deps/v8/test/mjsunit/tools/compiler-trace-flags-wasm.js4
-rw-r--r--deps/v8/test/mjsunit/unicodelctest-no-optimization.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/OWNERS2
-rw-r--r--deps/v8/test/mjsunit/wasm/adapter-frame.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/anyfunc.js58
-rw-r--r--deps/v8/test/mjsunit/wasm/anyref-globals.js120
-rw-r--r--deps/v8/test/mjsunit/wasm/anyref.js36
-rw-r--r--deps/v8/test/mjsunit/wasm/asm-wasm-i32.js23
-rw-r--r--deps/v8/test/mjsunit/wasm/asm-wasm-u32.js23
-rw-r--r--deps/v8/test/mjsunit/wasm/atomics-stress.js18
-rw-r--r--deps/v8/test/mjsunit/wasm/atomics.js24
-rw-r--r--deps/v8/test/mjsunit/wasm/atomics64-stress.js20
-rw-r--r--deps/v8/test/mjsunit/wasm/bigint.js18
-rw-r--r--deps/v8/test/mjsunit/wasm/bounds-check-64bit.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/bounds-check-turbofan.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/bulk-memory.js18
-rw-r--r--deps/v8/test/mjsunit/wasm/calls.js8
-rw-r--r--deps/v8/test/mjsunit/wasm/code-space-exhaustion.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/compare-exchange-stress.js46
-rw-r--r--deps/v8/test/mjsunit/wasm/compare-exchange64-stress.js46
-rw-r--r--deps/v8/test/mjsunit/wasm/compilation-hints-async-compilation.js8
-rw-r--r--deps/v8/test/mjsunit/wasm/compilation-hints-decoder.js44
-rw-r--r--deps/v8/test/mjsunit/wasm/compilation-hints-ignored.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/compilation-hints-interpreter.js24
-rw-r--r--deps/v8/test/mjsunit/wasm/compilation-hints-lazy-validation.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/compilation-hints-streaming-compilation.js32
-rw-r--r--deps/v8/test/mjsunit/wasm/compilation-hints-streaming-lazy-validation.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/compilation-hints-sync-compilation.js8
-rw-r--r--deps/v8/test/mjsunit/wasm/compiled-module-serialization.js20
-rw-r--r--deps/v8/test/mjsunit/wasm/data-segments.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/divrem-trap.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/empirical_max_memory.js6
-rw-r--r--deps/v8/test/mjsunit/wasm/ensure-wasm-binaries-up-to-date.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/errors.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions-anyref.js10
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions-global.js20
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions-rethrow.js24
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions-simd.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions.js94
-rw-r--r--deps/v8/test/mjsunit/wasm/export-mutable-global.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/ffi-error.js14
-rw-r--r--deps/v8/test/mjsunit/wasm/ffi.js14
-rw-r--r--deps/v8/test/mjsunit/wasm/float-constant-folding.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/futex.js22
-rw-r--r--deps/v8/test/mjsunit/wasm/gc-buffer.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/gc-frame.js42
-rw-r--r--deps/v8/test/mjsunit/wasm/gc-memory.js41
-rw-r--r--deps/v8/test/mjsunit/wasm/gc-stress.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/globals.js28
-rw-r--r--deps/v8/test/mjsunit/wasm/graceful_shutdown.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/graceful_shutdown_during_tierup.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/grow-memory-detaching.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/grow-memory-in-branch.js44
-rw-r--r--deps/v8/test/mjsunit/wasm/grow-memory-in-call.js98
-rw-r--r--deps/v8/test/mjsunit/wasm/grow-memory-in-loop.js44
-rw-r--r--deps/v8/test/mjsunit/wasm/grow-memory.js20
-rw-r--r--deps/v8/test/mjsunit/wasm/grow-shared-memory.js22
-rw-r--r--deps/v8/test/mjsunit/wasm/huge-memory.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/import-function.js10
-rw-r--r--deps/v8/test/mjsunit/wasm/import-memory.js38
-rw-r--r--deps/v8/test/mjsunit/wasm/import-mutable-global.js16
-rw-r--r--deps/v8/test/mjsunit/wasm/import-table.js14
-rw-r--r--deps/v8/test/mjsunit/wasm/indirect-call-non-zero-table.js22
-rw-r--r--deps/v8/test/mjsunit/wasm/indirect-calls.js48
-rw-r--r--deps/v8/test/mjsunit/wasm/indirect-sig-mismatch.js6
-rw-r--r--deps/v8/test/mjsunit/wasm/indirect-tables.js64
-rw-r--r--deps/v8/test/mjsunit/wasm/instance-memory-gc-stress.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/instantiate-module-basic.js10
-rw-r--r--deps/v8/test/mjsunit/wasm/interpreter-mixed.js10
-rw-r--r--deps/v8/test/mjsunit/wasm/interpreter.js36
-rw-r--r--deps/v8/test/mjsunit/wasm/lazy-compilation.js6
-rw-r--r--deps/v8/test/mjsunit/wasm/liftoff-trap-handler.js8
-rw-r--r--deps/v8/test/mjsunit/wasm/liftoff.js6
-rw-r--r--deps/v8/test/mjsunit/wasm/loop-rotation.js12
-rw-r--r--deps/v8/test/mjsunit/wasm/many-modules.js45
-rw-r--r--deps/v8/test/mjsunit/wasm/many-parameters.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/memory-external-call.js44
-rw-r--r--deps/v8/test/mjsunit/wasm/memory-instance-validation.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/memory_1gb_oob.js6
-rw-r--r--deps/v8/test/mjsunit/wasm/memory_2gb_oob.js6
-rw-r--r--deps/v8/test/mjsunit/wasm/memory_4gb_oob.js6
-rw-r--r--deps/v8/test/mjsunit/wasm/module-memory.js39
-rw-r--r--deps/v8/test/mjsunit/wasm/multi-value.js193
-rw-r--r--deps/v8/test/mjsunit/wasm/multiple-code-spaces.js54
-rw-r--r--deps/v8/test/mjsunit/wasm/origin-trial-flags.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/parallel_compilation.js10
-rw-r--r--deps/v8/test/mjsunit/wasm/params.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/receiver.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/return-calls.js40
-rw-r--r--deps/v8/test/mjsunit/wasm/shared-arraybuffer-worker-simple-gc.js84
-rw-r--r--deps/v8/test/mjsunit/wasm/shared-memory-gc-stress.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/shared-memory-worker-gc.js6
-rw-r--r--deps/v8/test/mjsunit/wasm/shared-memory-worker-simple-gc.js85
-rw-r--r--deps/v8/test/mjsunit/wasm/shared-memory.js8
-rw-r--r--deps/v8/test/mjsunit/wasm/stackwalk.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/start-function.js6
-rw-r--r--deps/v8/test/mjsunit/wasm/streaming-api.js8
-rw-r--r--deps/v8/test/mjsunit/wasm/streaming-error-position.js6
-rw-r--r--deps/v8/test/mjsunit/wasm/table-access.js10
-rw-r--r--deps/v8/test/mjsunit/wasm/table-copy-anyref.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/table-copy.js10
-rw-r--r--deps/v8/test/mjsunit/wasm/table-fill.js8
-rw-r--r--deps/v8/test/mjsunit/wasm/table-grow-from-wasm.js32
-rw-r--r--deps/v8/test/mjsunit/wasm/table-grow.js26
-rw-r--r--deps/v8/test/mjsunit/wasm/test-wasm-module-builder.js14
-rw-r--r--deps/v8/test/mjsunit/wasm/trap-location.js8
-rw-r--r--deps/v8/test/mjsunit/wasm/type-reflection-with-anyref.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/type-reflection-with-mv.js80
-rw-r--r--deps/v8/test/mjsunit/wasm/type-reflection.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/unicode.js6
-rw-r--r--deps/v8/test/mjsunit/wasm/user-properties-exported.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/user-properties-module.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-math-intrinsic.js6
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-module-builder.js25
-rw-r--r--deps/v8/test/mjsunit/wasm/worker-interpreter.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/worker-module.js2
-rw-r--r--deps/v8/test/mkgrokdump/mkgrokdump.cc34
-rw-r--r--deps/v8/test/mozilla/mozilla.status5
-rw-r--r--deps/v8/test/test262/OWNERS1
-rw-r--r--deps/v8/test/test262/test262.status843
-rw-r--r--deps/v8/test/test262/testcfg.py14
-rw-r--r--deps/v8/test/unittests/BUILD.gn2
-rw-r--r--deps/v8/test/unittests/api/isolate-unittest.cc3
-rw-r--r--deps/v8/test/unittests/base/template-utils-unittest.cc2
-rw-r--r--deps/v8/test/unittests/base/utils/random-number-generator-unittest.cc31
-rw-r--r--deps/v8/test/unittests/codegen/code-stub-assembler-unittest.cc1
-rw-r--r--deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc5
-rw-r--r--deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc239
-rw-r--r--deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.h12
-rw-r--r--deps/v8/test/unittests/compiler/common-operator-unittest.cc3
-rw-r--r--deps/v8/test/unittests/compiler/int64-lowering-unittest.cc4
-rw-r--r--deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc10
-rw-r--r--deps/v8/test/unittests/compiler/js-operator-unittest.cc4
-rw-r--r--deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc7
-rw-r--r--deps/v8/test/unittests/compiler/regalloc/OWNERS4
-rw-r--r--deps/v8/test/unittests/compiler/regalloc/move-optimizer-unittest.cc44
-rw-r--r--deps/v8/test/unittests/compiler/regalloc/register-allocator-unittest.cc2
-rw-r--r--deps/v8/test/unittests/heap/heap-unittest.cc3
-rw-r--r--deps/v8/test/unittests/heap/slot-set-unittest.cc19
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc3
-rw-r--r--deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc223
-rw-r--r--deps/v8/test/unittests/libplatform/default-worker-threads-task-runner-unittest.cc38
-rw-r--r--deps/v8/test/unittests/logging/counters-unittest.cc5
-rw-r--r--deps/v8/test/unittests/objects/backing-store-unittest.cc128
-rw-r--r--deps/v8/test/unittests/objects/object-unittest.cc17
-rw-r--r--deps/v8/test/unittests/objects/osr-optimized-code-cache-unittest.cc412
-rw-r--r--deps/v8/test/unittests/objects/value-serializer-unittest.cc51
-rw-r--r--deps/v8/test/unittests/tasks/background-compile-task-unittest.cc3
-rw-r--r--deps/v8/test/unittests/tasks/cancelable-tasks-unittest.cc2
-rw-r--r--deps/v8/test/unittests/test-helpers.cc3
-rw-r--r--deps/v8/test/unittests/unittests.status15
-rw-r--r--deps/v8/test/unittests/wasm/OWNERS2
-rw-r--r--deps/v8/test/unittests/wasm/control-transfer-unittest.cc3
-rw-r--r--deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc70
-rw-r--r--deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc4
-rw-r--r--deps/v8/test/unittests/wasm/module-decoder-unittest.cc83
-rw-r--r--deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc8
-rw-r--r--deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc37
-rw-r--r--deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc225
-rw-r--r--deps/v8/test/wasm-js/testcfg.py7
-rw-r--r--deps/v8/test/wasm-js/tests.tar.gz.sha12
-rw-r--r--deps/v8/test/wasm-js/wasm-js.status3
-rw-r--r--deps/v8/test/wasm-spec-tests/OWNERS2
-rw-r--r--deps/v8/test/wasm-spec-tests/testcfg.py7
-rw-r--r--deps/v8/test/wasm-spec-tests/tests.tar.gz.sha12
-rw-r--r--deps/v8/test/wasm-spec-tests/wasm-spec-tests.status11
-rw-r--r--deps/v8/third_party/inspector_protocol/README.v82
-rw-r--r--deps/v8/third_party/inspector_protocol/bindings/bindings.h3
-rw-r--r--deps/v8/third_party/inspector_protocol/encoding/encoding.cc23
-rw-r--r--deps/v8/third_party/inspector_protocol/encoding/encoding.h22
-rw-r--r--deps/v8/third_party/inspector_protocol/encoding/encoding_test.cc65
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/DispatcherBase_cpp.template14
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/DispatcherBase_h.template5
-rw-r--r--deps/v8/third_party/inspector_protocol/templates/TypeBuilder_cpp.template1
-rw-r--r--deps/v8/third_party/v8/builtins/array-sort.tq1
-rw-r--r--deps/v8/tools/clusterfuzz/toolchain/BUILD.gn15
-rwxr-xr-xdeps/v8/tools/clusterfuzz/v8_foozzie.py9
-rwxr-xr-xdeps/v8/tools/clusterfuzz/v8_foozzie_test.py3
-rw-r--r--deps/v8/tools/clusterfuzz/v8_fuzz_config.py21
-rw-r--r--deps/v8/tools/debug_helper/BUILD.gn3
-rw-r--r--deps/v8/tools/debug_helper/debug-helper-internal.cc11
-rw-r--r--deps/v8/tools/debug_helper/debug-helper-internal.h42
-rw-r--r--deps/v8/tools/debug_helper/debug-helper.h28
-rw-r--r--deps/v8/tools/debug_helper/gen-heap-constants.py19
-rw-r--r--deps/v8/tools/debug_helper/get-object-properties.cc321
-rw-r--r--deps/v8/tools/debug_helper/heap-constants.cc52
-rw-r--r--deps/v8/tools/debug_helper/heap-constants.h47
-rwxr-xr-xdeps/v8/tools/dev/gm.py3
-rw-r--r--deps/v8/tools/gcmole/BUILD.gn1
-rw-r--r--deps/v8/tools/gcmole/gcmole.lua19
-rw-r--r--deps/v8/tools/gdbinit8
-rw-r--r--deps/v8/tools/gen-postmortem-metadata.py31
-rwxr-xr-xdeps/v8/tools/generate-header-include-checks.py2
-rw-r--r--deps/v8/tools/heap-stats/categories.js30
-rw-r--r--deps/v8/tools/ic-explorer.html2
-rw-r--r--deps/v8/tools/inspect-d8.js30
-rwxr-xr-xdeps/v8/tools/js2c.py14
-rw-r--r--deps/v8/tools/mips_toolchain.tar.gz.sha11
-rwxr-xr-xdeps/v8/tools/regexp-sequences.py67
-rwxr-xr-xdeps/v8/tools/release/merge_to_branch.py2
-rw-r--r--deps/v8/tools/testrunner/base_runner.py40
-rw-r--r--deps/v8/tools/testrunner/local/junit_output.py49
-rw-r--r--deps/v8/tools/testrunner/local/statusfile.py9
-rw-r--r--deps/v8/tools/testrunner/local/variants.py2
-rwxr-xr-xdeps/v8/tools/testrunner/standard_runner.py58
-rw-r--r--deps/v8/tools/testrunner/testproc/progress.py58
-rwxr-xr-xdeps/v8/tools/torque/format-torque.py2
-rw-r--r--deps/v8/tools/ubsan/blacklist.txt6
-rwxr-xr-xdeps/v8/tools/unittests/run_tests_test.py5
-rw-r--r--deps/v8/tools/unittests/testdata/testroot1/test/sweet/sweet.status2
-rw-r--r--deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json1
-rw-r--r--deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json1
-rw-r--r--deps/v8/tools/v8heapconst.py757
-rw-r--r--deps/v8/tools/wasm-compilation-hints/OWNERS2
-rwxr-xr-xdeps/v8/tools/wasm/update-wasm-spec-tests.sh2
-rw-r--r--deps/v8/tools/whitespace.txt2
-rw-r--r--deps/v8/tools/windbg.js315
1433 files changed, 48332 insertions, 30291 deletions
diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore
index 6a9bbd6799..c7f3cba05c 100644
--- a/deps/v8/.gitignore
+++ b/deps/v8/.gitignore
@@ -79,8 +79,6 @@
/tools/jsfunfuzz/jsfunfuzz
/tools/jsfunfuzz/jsfunfuzz.tar.gz
/tools/luci-go
-/tools/mips_toolchain
-/tools/mips_toolchain.tar.gz
/tools/oom_dump/oom_dump
/tools/oom_dump/oom_dump.o
/tools/swarming_client
diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS
index 1198de8f35..40c4f16c81 100644
--- a/deps/v8/AUTHORS
+++ b/deps/v8/AUTHORS
@@ -111,6 +111,7 @@ Johan Bergström <johan@bergstroem.nu>
Jonathan Liu <net147@gmail.com>
Julien Brianceau <jbriance@cisco.com>
JunHo Seo <sejunho@gmail.com>
+Junming Huang <kiminghjm@gmail.com>
Kang-Hao (Kenny) Lu <kennyluck@csail.mit.edu>
Karl Skomski <karl@skomski.com>
Kevin Gibbons <bakkot@gmail.com>
@@ -124,6 +125,7 @@ Marcin Cieślak <saper@marcincieslak.com>
Marcin Wiącek <marcin@mwiacek.com>
Mateusz Czeladka <mateusz.szczap@gmail.com>
Matheus Marchini <mat@mmarchini.me>
+Matheus Marchini <mmarchini@netflix.com>
Mathias Bynens <mathias@qiwi.be>
Matt Hanselman <mjhanselman@gmail.com>
Matthew Sporleder <msporleder@gmail.com>
diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn
index a026749a31..0a1f12b5de 100644
--- a/deps/v8/BUILD.gn
+++ b/deps/v8/BUILD.gn
@@ -156,9 +156,7 @@ declare_args() {
# List of extra files to snapshot. They will be snapshotted in order so
# if files export symbols used by later files, they should go first.
- #
- # This default is used by cctests. Projects using V8 will want to override.
- v8_extra_library_files = [ "//test/cctest/test-extra.js" ]
+ v8_extra_library_files = []
v8_enable_gdbjit =
((v8_current_cpu == "x86" || v8_current_cpu == "x64") &&
@@ -204,6 +202,15 @@ declare_args() {
v8_enable_regexp_interpreter_threaded_dispatch = true
}
+# Toggle pointer compression for correctness fuzzing when building the
+# clang_x64_pointer_compression toolchain. We'll correctness-compare the
+# default build with the clang_x64_pointer_compression build.
+if (v8_multi_arch_build &&
+ rebase_path(get_label_info(":d8", "root_out_dir"), root_build_dir) ==
+ "clang_x64_pointer_compression") {
+ v8_enable_pointer_compression = !v8_enable_pointer_compression
+}
+
# Derived defaults.
if (v8_enable_verify_heap == "") {
v8_enable_verify_heap = v8_enable_debugging_features
@@ -231,7 +238,7 @@ if (v8_enable_snapshot_native_code_counters == "") {
v8_enable_snapshot_native_code_counters = v8_enable_debugging_features
}
if (v8_enable_shared_ro_heap == "") {
- v8_enable_shared_ro_heap = !v8_enable_pointer_compression && v8_use_snapshot
+ v8_enable_shared_ro_heap = !v8_enable_pointer_compression
}
if (v8_enable_fast_torque == "") {
v8_enable_fast_torque = v8_enable_fast_mksnapshot
@@ -249,14 +256,14 @@ assert(v8_current_cpu != "x86" || !v8_untrusted_code_mitigations,
assert(!v8_enable_lite_mode || v8_enable_embedded_builtins,
"Lite mode requires embedded builtins")
-assert(!v8_enable_lite_mode || v8_use_snapshot,
- "Lite mode requires a snapshot build")
assert(
!v8_enable_pointer_compression || !v8_enable_shared_ro_heap,
"Pointer compression is not supported with shared read-only heap enabled")
-assert(v8_use_snapshot || !v8_enable_shared_ro_heap,
- "Shared read-only heap requires snapshot")
+
+assert(v8_extra_library_files == [],
+ "v8_extra_library_files is no longer supported. Consider implementing " +
+ "custom API in C++ instead.")
v8_random_seed = "314159265"
v8_toolset_for_shell = "host"
@@ -431,11 +438,8 @@ config("features") {
if (v8_enable_handle_zapping) {
defines += [ "ENABLE_HANDLE_ZAPPING" ]
}
- if (v8_use_snapshot) {
- defines += [ "V8_USE_SNAPSHOT" ]
- if (v8_enable_snapshot_native_code_counters) {
- defines += [ "V8_SNAPSHOT_NATIVE_CODE_COUNTERS" ]
- }
+ if (v8_enable_snapshot_native_code_counters) {
+ defines += [ "V8_SNAPSHOT_NATIVE_CODE_COUNTERS" ]
}
if (v8_enable_single_generation) {
defines += [ "V8_ENABLE_SINGLE_GENERATION" ]
@@ -644,6 +648,30 @@ config("toolchain") {
defines += [ "V8_ANDROID_LOG_STDOUT" ]
}
+ # V8_TARGET_OS_ defines. The target OS may differ from host OS e.g. in
+ # mksnapshot. We additionally set V8_HAVE_TARGET_OS to determine that a
+ # target OS has in fact been set; otherwise we internally assume that target
+ # OS == host OS (see v8config.h).
+ if (target_os == "android") {
+ defines += [ "V8_HAVE_TARGET_OS" ]
+ defines += [ "V8_TARGET_OS_ANDROID" ]
+ } else if (target_os == "fuchsia") {
+ defines += [ "V8_HAVE_TARGET_OS" ]
+ defines += [ "V8_TARGET_OS_FUCHSIA" ]
+ } else if (target_os == "ios") {
+ defines += [ "V8_HAVE_TARGET_OS" ]
+ defines += [ "V8_TARGET_OS_IOS" ]
+ } else if (target_os == "linux") {
+ defines += [ "V8_HAVE_TARGET_OS" ]
+ defines += [ "V8_TARGET_OS_LINUX" ]
+ } else if (target_os == "mac") {
+ defines += [ "V8_HAVE_TARGET_OS" ]
+ defines += [ "V8_TARGET_OS_MACOSX" ]
+ } else if (target_os == "win") {
+ defines += [ "V8_HAVE_TARGET_OS" ]
+ defines += [ "V8_TARGET_OS_WIN" ]
+ }
+
# TODO(jochen): Support v8_enable_prof on Windows.
# TODO(jochen): Add support for compiling with simulators.
@@ -895,6 +923,8 @@ action("postmortem-metadata") {
"src/objects/code.h",
"src/objects/data-handler.h",
"src/objects/data-handler-inl.h",
+ "src/objects/descriptor-array.h",
+ "src/objects/descriptor-array-inl.h",
"src/objects/feedback-cell.h",
"src/objects/feedback-cell-inl.h",
"src/objects/fixed-array-inl.h",
@@ -913,6 +943,7 @@ action("postmortem-metadata") {
"src/objects/js-promise-inl.h",
"src/objects/js-promise.h",
"src/objects/js-regexp-inl.h",
+ "src/objects/js-regexp.cc",
"src/objects/js-regexp.h",
"src/objects/js-regexp-string-iterator-inl.h",
"src/objects/js-regexp-string-iterator.h",
@@ -924,6 +955,8 @@ action("postmortem-metadata") {
"src/objects/name-inl.h",
"src/objects/oddball-inl.h",
"src/objects/oddball.h",
+ "src/objects/primitive-heap-object.h",
+ "src/objects/primitive-heap-object-inl.h",
"src/objects/scope-info.h",
"src/objects/script.h",
"src/objects/script-inl.h",
@@ -936,6 +969,7 @@ action("postmortem-metadata") {
"src/objects/string-inl.h",
"src/objects/struct.h",
"src/objects/struct-inl.h",
+ "$target_gen_dir/torque-generated/instance-types-tq.h",
]
outputs = [
@@ -944,6 +978,10 @@ action("postmortem-metadata") {
args = rebase_path(outputs, root_build_dir) +
rebase_path(sources, root_build_dir)
+
+ deps = [
+ ":run_torque",
+ ]
}
torque_files = [
@@ -993,9 +1031,13 @@ torque_files = [
"src/builtins/proxy-set-prototype-of.tq",
"src/builtins/proxy.tq",
"src/builtins/reflect.tq",
+ "src/builtins/regexp-exec.tq",
+ "src/builtins/regexp-match-all.tq",
"src/builtins/regexp-match.tq",
"src/builtins/regexp-replace.tq",
+ "src/builtins/regexp-search.tq",
"src/builtins/regexp-source.tq",
+ "src/builtins/regexp-split.tq",
"src/builtins/regexp-test.tq",
"src/builtins/regexp.tq",
"src/builtins/string.tq",
@@ -1047,6 +1089,7 @@ action("run_torque") {
outputs = [
"$target_gen_dir/torque-generated/builtin-definitions-tq.h",
+ "$target_gen_dir/torque-generated/interface-descriptors-tq.inc",
"$target_gen_dir/torque-generated/field-offsets-tq.h",
"$target_gen_dir/torque-generated/class-verifiers-tq.cc",
"$target_gen_dir/torque-generated/class-verifiers-tq.h",
@@ -1280,31 +1323,29 @@ template("run_mksnapshot") {
}
}
-if (v8_use_snapshot) {
- run_mksnapshot("default") {
+run_mksnapshot("default") {
+ args = []
+ if (v8_enable_embedded_builtins) {
+ embedded_variant = "Default"
+ }
+}
+if (emit_builtins_as_inline_asm) {
+ asm_to_inline_asm("default") {
args = []
+ }
+}
+if (v8_use_multi_snapshots) {
+ run_mksnapshot("trusted") {
+ args = [ "--no-untrusted-code-mitigations" ]
if (v8_enable_embedded_builtins) {
- embedded_variant = "Default"
+ embedded_variant = "Trusted"
}
}
if (emit_builtins_as_inline_asm) {
- asm_to_inline_asm("default") {
+ asm_to_inline_asm("trusted") {
args = []
}
}
- if (v8_use_multi_snapshots) {
- run_mksnapshot("trusted") {
- args = [ "--no-untrusted-code-mitigations" ]
- if (v8_enable_embedded_builtins) {
- embedded_variant = "Trusted"
- }
- }
- if (emit_builtins_as_inline_asm) {
- asm_to_inline_asm("trusted") {
- args = []
- }
- }
- }
}
action("v8_dump_build_config") {
@@ -1334,7 +1375,6 @@ action("v8_dump_build_config") {
"v8_enable_i18n_support=$v8_enable_i18n_support",
"v8_enable_verify_predictable=$v8_enable_verify_predictable",
"v8_target_cpu=\"$v8_target_cpu\"",
- "v8_use_snapshot=$v8_use_snapshot",
"v8_enable_embedded_builtins=$v8_enable_embedded_builtins",
"v8_enable_verify_csa=$v8_enable_verify_csa",
"v8_enable_lite_mode=$v8_enable_lite_mode",
@@ -1355,19 +1395,13 @@ action("v8_dump_build_config") {
#
source_set("v8_maybe_snapshot") {
- if (v8_use_snapshot && v8_use_external_startup_data) {
+ if (v8_use_external_startup_data) {
public_deps = [
":v8_external_snapshot",
]
- } else if (v8_use_snapshot) {
- public_deps = [
- ":v8_snapshot",
- ]
} else {
- # Ignore v8_use_external_startup_data setting if no snapshot is used.
public_deps = [
- ":v8_init",
- ":v8_nosnapshot",
+ ":v8_snapshot",
]
}
}
@@ -1393,7 +1427,7 @@ v8_source_set("v8_nosnapshot") {
configs = [ ":internal_config" ]
}
-if (v8_use_snapshot && !v8_use_external_startup_data) {
+if (!v8_use_external_startup_data) {
v8_source_set("v8_snapshot") {
# Only targets in this file and the top-level visibility target can
# depend on this.
@@ -1435,7 +1469,7 @@ if (v8_use_snapshot && !v8_use_external_startup_data) {
}
}
-if (v8_use_snapshot && v8_use_external_startup_data) {
+if (v8_use_external_startup_data) {
v8_source_set("v8_external_snapshot") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
@@ -1852,6 +1886,8 @@ v8_compiler_sources = [
"src/compiler/machine-operator.h",
"src/compiler/map-inference.cc",
"src/compiler/map-inference.h",
+ "src/compiler/memory-lowering.cc",
+ "src/compiler/memory-lowering.h",
"src/compiler/memory-optimizer.cc",
"src/compiler/memory-optimizer.h",
"src/compiler/node-aux-data.h",
@@ -2047,6 +2083,7 @@ v8_source_set("v8_base_without_compiler") {
"src/builtins/builtins-api.cc",
"src/builtins/builtins-array.cc",
"src/builtins/builtins-arraybuffer.cc",
+ "src/builtins/builtins-async-module.cc",
"src/builtins/builtins-bigint.cc",
"src/builtins/builtins-call.cc",
"src/builtins/builtins-callsite.cc",
@@ -2143,6 +2180,7 @@ v8_source_set("v8_base_without_compiler") {
"src/codegen/string-constants.h",
"src/codegen/tick-counter.cc",
"src/codegen/tick-counter.h",
+ "src/codegen/tnode.h",
"src/codegen/turbo-assembler.cc",
"src/codegen/turbo-assembler.h",
"src/codegen/unoptimized-compilation-info.cc",
@@ -2316,6 +2354,8 @@ v8_source_set("v8_base_without_compiler") {
"src/heap/mark-compact.h",
"src/heap/marking.cc",
"src/heap/marking.h",
+ "src/heap/memory-measurement.cc",
+ "src/heap/memory-measurement.h",
"src/heap/memory-reducer.cc",
"src/heap/memory-reducer.h",
"src/heap/object-stats.cc",
@@ -2337,9 +2377,6 @@ v8_source_set("v8_base_without_compiler") {
"src/heap/spaces-inl.h",
"src/heap/spaces.cc",
"src/heap/spaces.h",
- "src/heap/store-buffer-inl.h",
- "src/heap/store-buffer.cc",
- "src/heap/store-buffer.h",
"src/heap/stress-marking-observer.cc",
"src/heap/stress-marking-observer.h",
"src/heap/stress-scavenge-observer.cc",
@@ -2461,6 +2498,8 @@ v8_source_set("v8_base_without_compiler") {
"src/objects/api-callbacks.h",
"src/objects/arguments-inl.h",
"src/objects/arguments.h",
+ "src/objects/backing-store.cc",
+ "src/objects/backing-store.h",
"src/objects/bigint.cc",
"src/objects/bigint.h",
"src/objects/cell-inl.h",
@@ -2515,6 +2554,7 @@ v8_source_set("v8_base_without_compiler") {
"src/objects/heap-object.h",
"src/objects/instance-type-inl.h",
"src/objects/instance-type.h",
+ "src/objects/internal-index.h",
"src/objects/intl-objects.cc",
"src/objects/intl-objects.h",
"src/objects/js-array-buffer-inl.h",
@@ -2558,6 +2598,7 @@ v8_source_set("v8_base_without_compiler") {
"src/objects/js-regexp-inl.h",
"src/objects/js-regexp-string-iterator-inl.h",
"src/objects/js-regexp-string-iterator.h",
+ "src/objects/js-regexp.cc",
"src/objects/js-regexp.h",
"src/objects/js-relative-time-format-inl.h",
"src/objects/js-relative-time-format.cc",
@@ -2613,6 +2654,11 @@ v8_source_set("v8_base_without_compiler") {
"src/objects/ordered-hash-table-inl.h",
"src/objects/ordered-hash-table.cc",
"src/objects/ordered-hash-table.h",
+ "src/objects/osr-optimized-code-cache-inl.h",
+ "src/objects/osr-optimized-code-cache.cc",
+ "src/objects/osr-optimized-code-cache.h",
+ "src/objects/primitive-heap-object-inl.h",
+ "src/objects/primitive-heap-object.h",
"src/objects/promise-inl.h",
"src/objects/promise.h",
"src/objects/property-array-inl.h",
@@ -2738,6 +2784,9 @@ v8_source_set("v8_base_without_compiler") {
"src/regexp/regexp-bytecode-generator-inl.h",
"src/regexp/regexp-bytecode-generator.cc",
"src/regexp/regexp-bytecode-generator.h",
+ "src/regexp/regexp-bytecode-peephole.cc",
+ "src/regexp/regexp-bytecode-peephole.h",
+ "src/regexp/regexp-bytecodes.cc",
"src/regexp/regexp-bytecodes.h",
"src/regexp/regexp-compiler-tonode.cc",
"src/regexp/regexp-compiler.cc",
@@ -2754,13 +2803,13 @@ v8_source_set("v8_base_without_compiler") {
"src/regexp/regexp-nodes.h",
"src/regexp/regexp-parser.cc",
"src/regexp/regexp-parser.h",
- "src/regexp/regexp-special-case.h",
"src/regexp/regexp-stack.cc",
"src/regexp/regexp-stack.h",
"src/regexp/regexp-utils.cc",
"src/regexp/regexp-utils.h",
"src/regexp/regexp.cc",
"src/regexp/regexp.h",
+ "src/regexp/special-case.h",
"src/roots/roots-inl.h",
"src/roots/roots.cc",
"src/roots/roots.h",
@@ -2953,8 +3002,6 @@ v8_source_set("v8_base_without_compiler") {
"src/wasm/wasm-js.h",
"src/wasm/wasm-limits.h",
"src/wasm/wasm-linkage.h",
- "src/wasm/wasm-memory.cc",
- "src/wasm/wasm-memory.h",
"src/wasm/wasm-module-builder.cc",
"src/wasm/wasm-module-builder.h",
"src/wasm/wasm-module-sourcemap.cc",
@@ -3386,6 +3433,7 @@ v8_source_set("torque_base") {
"src/torque/global-context.h",
"src/torque/implementation-visitor.cc",
"src/torque/implementation-visitor.h",
+ "src/torque/instance-type-generator.cc",
"src/torque/instructions.cc",
"src/torque/instructions.h",
"src/torque/server-data.cc",
@@ -3482,7 +3530,6 @@ v8_source_set("torque_ls_base") {
v8_component("v8_libbase") {
sources = [
- "src/base/adapters.h",
"src/base/address-region.h",
"src/base/atomic-utils.h",
"src/base/atomicops.h",
@@ -3668,6 +3715,12 @@ v8_component("v8_libbase") {
]
}
+ if (is_ubsan && (v8_current_cpu == "x86" || v8_current_cpu == "arm" ||
+ v8_current_cpu == "mips")) {
+ # Special UBSan 32-bit requirement.
+ sources += [ "src/base/ubsan.cc" ]
+ }
+
if (is_tsan && !build_with_chromium) {
data += [ "tools/sanitizers/tsan_suppressions.txt" ]
}
@@ -3840,7 +3893,7 @@ if (current_toolchain == v8_generator_toolchain) {
}
}
-if (v8_use_snapshot && current_toolchain == v8_snapshot_toolchain) {
+if (current_toolchain == v8_snapshot_toolchain) {
v8_executable("mksnapshot") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
@@ -4037,6 +4090,7 @@ group("v8_clusterfuzz") {
":d8(//build/toolchain/linux:clang_x64_v8_arm64)",
":d8(//build/toolchain/linux:clang_x86)",
":d8(//build/toolchain/linux:clang_x86_v8_arm)",
+ ":d8(tools/clusterfuzz/toolchain:clang_x64_pointer_compression)",
]
}
}
@@ -4115,13 +4169,10 @@ if (is_component_build) {
":torque_ls_base",
":v8_base",
":v8_headers",
+ ":v8_initializers",
":v8_maybe_snapshot",
]
- if (v8_use_snapshot) {
- public_deps += [ ":v8_initializers" ]
- }
-
configs = [ ":internal_config" ]
public_configs = [ ":external_config" ]
@@ -4143,13 +4194,10 @@ if (is_component_build) {
":torque_base",
":torque_ls_base",
":v8_base",
+ ":v8_initializers",
":v8_maybe_snapshot",
]
- if (v8_use_snapshot) {
- public_deps += [ ":v8_initializers" ]
- }
-
public_configs = [ ":external_config" ]
}
}
diff --git a/deps/v8/COMMON_OWNERS b/deps/v8/COMMON_OWNERS
index 79f1428658..542c5abd3e 100644
--- a/deps/v8/COMMON_OWNERS
+++ b/deps/v8/COMMON_OWNERS
@@ -4,7 +4,7 @@ bbudge@chromium.org
binji@chromium.org
bmeurer@chromium.org
cbruni@chromium.org
-clemensh@chromium.org
+clemensb@chromium.org
danno@chromium.org
delphick@chromium.org
gdeepti@chromium.org
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index be6a58859c..b3ca3548e8 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,3 +1,1618 @@
+2019-10-16: Version 7.9.317
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-16: Version 7.9.316
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-16: Version 7.9.315
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-15: Version 7.9.314
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-15: Version 7.9.313
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-15: Version 7.9.312
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-15: Version 7.9.311
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-15: Version 7.9.310
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-15: Version 7.9.309
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-15: Version 7.9.308
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-15: Version 7.9.307
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-14: Version 7.9.306
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-14: Version 7.9.305
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-14: Version 7.9.304
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-14: Version 7.9.303
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-14: Version 7.9.302
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-14: Version 7.9.301
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-14: Version 7.9.300
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-14: Version 7.9.299
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-14: Version 7.9.298
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-14: Version 7.9.297
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-13: Version 7.9.296
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-12: Version 7.9.295
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-12: Version 7.9.294
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-11: Version 7.9.293
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-11: Version 7.9.292
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-11: Version 7.9.291
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-11: Version 7.9.290
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-11: Version 7.9.289
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-11: Version 7.9.288
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-11: Version 7.9.287
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-11: Version 7.9.286
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-11: Version 7.9.285
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-11: Version 7.9.284
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-11: Version 7.9.283
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-11: Version 7.9.282
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-11: Version 7.9.281
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-11: Version 7.9.280
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-11: Version 7.9.279
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-11: Version 7.9.278
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-11: Version 7.9.277
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-11: Version 7.9.276
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-10: Version 7.9.275
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-10: Version 7.9.274
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-10: Version 7.9.273
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-10: Version 7.9.272
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-10: Version 7.9.271
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-10: Version 7.9.270
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-10: Version 7.9.269
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-10: Version 7.9.268
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-10: Version 7.9.267
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-10: Version 7.9.266
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-10: Version 7.9.265
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-10: Version 7.9.264
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-09: Version 7.9.263
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-09: Version 7.9.262
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-09: Version 7.9.261
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-09: Version 7.9.260
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-09: Version 7.9.259
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-09: Version 7.9.258
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-09: Version 7.9.257
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-09: Version 7.9.256
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-09: Version 7.9.255
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-09: Version 7.9.254
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-09: Version 7.9.253
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-09: Version 7.9.252
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-09: Version 7.9.251
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-09: Version 7.9.250
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-09: Version 7.9.249
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-09: Version 7.9.248
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-09: Version 7.9.247
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-09: Version 7.9.246
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-09: Version 7.9.245
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-09: Version 7.9.244
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-09: Version 7.9.243
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-09: Version 7.9.242
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-08: Version 7.9.241
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-08: Version 7.9.240
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-08: Version 7.9.239
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-08: Version 7.9.238
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-08: Version 7.9.237
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-08: Version 7.9.236
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-08: Version 7.9.235
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-08: Version 7.9.234
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-08: Version 7.9.233
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-08: Version 7.9.232
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-07: Version 7.9.231
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-07: Version 7.9.230
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-07: Version 7.9.229
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-07: Version 7.9.228
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-07: Version 7.9.227
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-07: Version 7.9.226
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-07: Version 7.9.225
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-07: Version 7.9.224
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-07: Version 7.9.223
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-07: Version 7.9.222
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-07: Version 7.9.221
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-06: Version 7.9.220
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-05: Version 7.9.219
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-04: Version 7.9.218
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-04: Version 7.9.217
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-04: Version 7.9.216
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-04: Version 7.9.215
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-04: Version 7.9.214
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-04: Version 7.9.213
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-04: Version 7.9.212
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-04: Version 7.9.211
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-04: Version 7.9.210
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-04: Version 7.9.209
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-04: Version 7.9.208
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-04: Version 7.9.207
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-04: Version 7.9.206
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-04: Version 7.9.205
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-04: Version 7.9.204
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-03: Version 7.9.203
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-03: Version 7.9.202
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-03: Version 7.9.201
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-03: Version 7.9.200
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-03: Version 7.9.199
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-02: Version 7.9.198
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-02: Version 7.9.197
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-02: Version 7.9.196
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-02: Version 7.9.195
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-02: Version 7.9.194
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-02: Version 7.9.193
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-02: Version 7.9.192
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-02: Version 7.9.191
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-02: Version 7.9.190
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-02: Version 7.9.189
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-02: Version 7.9.188
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-01: Version 7.9.187
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-01: Version 7.9.186
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-01: Version 7.9.185
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-01: Version 7.9.184
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-01: Version 7.9.183
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-01: Version 7.9.182
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-01: Version 7.9.181
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-01: Version 7.9.180
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-01: Version 7.9.179
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-01: Version 7.9.178
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-01: Version 7.9.177
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-01: Version 7.9.176
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-01: Version 7.9.175
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-01: Version 7.9.174
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-01: Version 7.9.173
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-01: Version 7.9.172
+
+ Performance and stability improvements on all platforms.
+
+
+2019-10-01: Version 7.9.171
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-30: Version 7.9.170
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-30: Version 7.9.169
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-30: Version 7.9.168
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-30: Version 7.9.167
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-30: Version 7.9.166
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-30: Version 7.9.165
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-30: Version 7.9.164
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-29: Version 7.9.163
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-28: Version 7.9.162
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-27: Version 7.9.161
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-27: Version 7.9.160
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-27: Version 7.9.159
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-27: Version 7.9.158
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-27: Version 7.9.157
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-27: Version 7.9.156
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-27: Version 7.9.155
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-27: Version 7.9.154
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-27: Version 7.9.153
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-27: Version 7.9.152
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-26: Version 7.9.151
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-26: Version 7.9.150
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-26: Version 7.9.149
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-26: Version 7.9.148
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-26: Version 7.9.147
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-26: Version 7.9.146
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-26: Version 7.9.145
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-26: Version 7.9.144
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-26: Version 7.9.143
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-26: Version 7.9.142
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-26: Version 7.9.141
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-26: Version 7.9.140
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-26: Version 7.9.139
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-26: Version 7.9.138
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-26: Version 7.9.137
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-26: Version 7.9.136
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-26: Version 7.9.135
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-26: Version 7.9.134
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-26: Version 7.9.133
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-26: Version 7.9.132
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-25: Version 7.9.131
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-25: Version 7.9.130
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-25: Version 7.9.129
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-25: Version 7.9.128
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-25: Version 7.9.127
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-25: Version 7.9.126
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-25: Version 7.9.125
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-25: Version 7.9.124
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-25: Version 7.9.123
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-25: Version 7.9.122
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-25: Version 7.9.121
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-25: Version 7.9.120
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-25: Version 7.9.119
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-25: Version 7.9.118
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-25: Version 7.9.117
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-24: Version 7.9.116
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-24: Version 7.9.115
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-24: Version 7.9.114
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-24: Version 7.9.113
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-24: Version 7.9.112
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-24: Version 7.9.111
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-24: Version 7.9.110
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-24: Version 7.9.109
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-24: Version 7.9.108
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-24: Version 7.9.107
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-24: Version 7.9.106
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-24: Version 7.9.105
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-24: Version 7.9.104
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-23: Version 7.9.103
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-23: Version 7.9.102
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-23: Version 7.9.101
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-23: Version 7.9.100
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-23: Version 7.9.99
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-23: Version 7.9.98
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-23: Version 7.9.97
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-23: Version 7.9.96
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-23: Version 7.9.95
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-23: Version 7.9.94
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-23: Version 7.9.93
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-23: Version 7.9.92
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-23: Version 7.9.91
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-22: Version 7.9.90
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-21: Version 7.9.89
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-20: Version 7.9.88
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-20: Version 7.9.87
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-20: Version 7.9.86
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-20: Version 7.9.85
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-20: Version 7.9.84
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-20: Version 7.9.83
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-20: Version 7.9.82
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-20: Version 7.9.81
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-20: Version 7.9.80
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-20: Version 7.9.79
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-20: Version 7.9.78
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-20: Version 7.9.77
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-19: Version 7.9.76
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-19: Version 7.9.75
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-19: Version 7.9.74
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-19: Version 7.9.73
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-19: Version 7.9.72
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-19: Version 7.9.71
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-19: Version 7.9.70
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-19: Version 7.9.69
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-18: Version 7.9.68
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-18: Version 7.9.67
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-18: Version 7.9.66
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-18: Version 7.9.65
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-18: Version 7.9.64
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-18: Version 7.9.63
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-18: Version 7.9.62
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-18: Version 7.9.61
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-18: Version 7.9.60
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-18: Version 7.9.59
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-18: Version 7.9.58
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-17: Version 7.9.57
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-17: Version 7.9.56
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-16: Version 7.9.55
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-16: Version 7.9.54
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-16: Version 7.9.53
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-16: Version 7.9.52
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-16: Version 7.9.51
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-16: Version 7.9.50
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-16: Version 7.9.49
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-16: Version 7.9.48
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-16: Version 7.9.47
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-16: Version 7.9.46
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-16: Version 7.9.45
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-16: Version 7.9.44
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-14: Version 7.9.43
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-13: Version 7.9.42
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-13: Version 7.9.41
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-13: Version 7.9.40
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-13: Version 7.9.39
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-13: Version 7.9.38
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-13: Version 7.9.37
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-13: Version 7.9.36
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-13: Version 7.9.35
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-12: Version 7.9.34
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-12: Version 7.9.33
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-12: Version 7.9.32
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-12: Version 7.9.31
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-12: Version 7.9.30
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-12: Version 7.9.29
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-12: Version 7.9.28
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-12: Version 7.9.27
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-12: Version 7.9.26
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-12: Version 7.9.25
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-12: Version 7.9.24
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-12: Version 7.9.23
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-12: Version 7.9.22
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-12: Version 7.9.21
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-12: Version 7.9.20
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-12: Version 7.9.19
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-11: Version 7.9.18
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-11: Version 7.9.17
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-11: Version 7.9.16
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-11: Version 7.9.15
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-11: Version 7.9.14
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-11: Version 7.9.13
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-11: Version 7.9.12
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-10: Version 7.9.11
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-10: Version 7.9.10
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-10: Version 7.9.9
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-09: Version 7.9.8
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-09: Version 7.9.7
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-09: Version 7.9.6
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-09: Version 7.9.5
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-09: Version 7.9.4
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-09: Version 7.9.3
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-09: Version 7.9.2
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-08: Version 7.9.1
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-05: Version 7.8.285
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-04: Version 7.8.284
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-04: Version 7.8.283
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-04: Version 7.8.282
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-04: Version 7.8.281
+
+ Performance and stability improvements on all platforms.
+
+
+2019-09-04: Version 7.8.280
+
+ Performance and stability improvements on all platforms.
+
+
2019-09-04: Version 7.8.279
Performance and stability improvements on all platforms.
diff --git a/deps/v8/DEPS b/deps/v8/DEPS
index a7d4081edb..0faa57e5b0 100644
--- a/deps/v8/DEPS
+++ b/deps/v8/DEPS
@@ -23,11 +23,10 @@ vars = {
'android_url': 'https://android.googlesource.com',
'download_gcmole': False,
'download_jsfunfuzz': False,
- 'download_mips_toolchain': False,
'check_v8_header_includes': False,
# GN CIPD package version.
- 'gn_version': 'git_revision:152c5144ceed9592c20f0c8fd55769646077569b',
+ 'gn_version': 'git_revision:ad9e442d92dcd9ee73a557428cfc336b55cbd533',
# luci-go CIPD package version.
'luci_go': 'git_revision:7d11fd9e66407c49cb6c8546a2ae45ea993a240c',
@@ -72,15 +71,15 @@ vars = {
deps = {
'v8/build':
- Var('chromium_url') + '/chromium/src/build.git' + '@' + '693faeda4ee025796c7e473d953a5a7b6ad64c93',
+ Var('chromium_url') + '/chromium/src/build.git' + '@' + '082f11b29976c3be67dddd74bd75c6d1793201c7',
'v8/third_party/depot_tools':
- Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + 'f38bc1796282c61087dcf15abc61b8fd18a68402',
+ Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + 'ba97f6065ed1e9336585468dd85e680cf09d5166',
'v8/third_party/icu':
- Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '53f6b233a41ec982d8445996247093f7aaf41639',
+ Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '5005010d694e16571b8dfbf07d70817841f80a69',
'v8/third_party/instrumented_libraries':
- Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + 'b1c3ca20848c117eb935b02c25d441f03e6fbc5e',
+ Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + 'e2897773b97b65f70b0bb15b753c73d9f6e3afdb',
'v8/buildtools':
- Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '74cfb57006f83cfe050817526db359d5c8a11628',
+ Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + 'cf454b247c611167388742c7a31ef138a6031172',
'v8/buildtools/clang_format/script':
Var('chromium_url') + '/chromium/llvm-project/cfe/tools/clang-format.git' + '@' + '96636aa0e9f047f17447f2d45a094d0b59ed7917',
'v8/buildtools/linux64': {
@@ -122,7 +121,7 @@ deps = {
'v8/base/trace_event/common':
Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '5e4fce17a9d2439c44a7b57ceecef6df9287ec2f',
'v8/third_party/android_ndk': {
- 'url': Var('chromium_url') + '/android_ndk.git' + '@' + '62582753e869484bf0cc7f7e8d184ce0077033c2',
+ 'url': Var('chromium_url') + '/android_ndk.git' + '@' + '89e8db0cdf323af8bc24de875d7d2a43a66bf10e',
'condition': 'checkout_android',
},
'v8/third_party/android_sdk/public': {
@@ -168,7 +167,7 @@ deps = {
'dep_type': 'cipd',
},
'v8/third_party/catapult': {
- 'url': Var('chromium_url') + '/catapult.git' + '@' + 'e7c719c3e85f76938bf4fef0ba37c27f89246f71',
+ 'url': Var('chromium_url') + '/catapult.git' + '@' + 'b9fad2fbcc499b984d88f4c4aec26d162297efae',
'condition': 'checkout_android',
},
'v8/third_party/colorama/src': {
@@ -180,19 +179,19 @@ deps = {
'condition': 'checkout_fuchsia',
},
'v8/third_party/googletest/src':
- Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '565f1b848215b77c3732bca345fe76a0431d8b34',
+ Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + 'f2fb48c3b3d79a75a88a99fba6576b25d42ec528',
'v8/third_party/jinja2':
Var('chromium_url') + '/chromium/src/third_party/jinja2.git' + '@' + 'b41863e42637544c2941b574c7877d3e1f663e25',
'v8/third_party/markupsafe':
Var('chromium_url') + '/chromium/src/third_party/markupsafe.git' + '@' + '8f45f5cfa0009d2a70589bcda0349b8cb2b72783',
'v8/tools/swarming_client':
- Var('chromium_url') + '/infra/luci/client-py.git' + '@' + '96f125709acfd0b48fc1e5dae7d6ea42291726ac',
+ Var('chromium_url') + '/infra/luci/client-py.git' + '@' + '885b3febcc170a60f25795304e60927b77d1e92d',
'v8/test/benchmarks/data':
Var('chromium_url') + '/v8/deps/third_party/benchmarks.git' + '@' + '05d7188267b4560491ff9155c5ee13e207ecd65f',
'v8/test/mozilla/data':
Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be',
'v8/test/test262/data':
- Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '59a1a016b7cf5cf43f66b274c7d1db4ec6066935',
+ Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'd49777de27240262fa65c3b49dc014839e6897da',
'v8/test/test262/harness':
Var('chromium_url') + '/external/github.com/test262-utils/test262-harness-py.git' + '@' + '4555345a943d0c99a9461182705543fb171dda4b',
'v8/third_party/qemu-linux-x64': {
@@ -216,7 +215,7 @@ deps = {
'dep_type': 'cipd',
},
'v8/tools/clang':
- Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '2fef805e5b05b26a8c87c47865590b5f43218611',
+ Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'c5d85f1e9d3a01e4de2ccf4dfaa7847653ae9121',
'v8/tools/luci-go': {
'packages': [
{
@@ -246,7 +245,7 @@ deps = {
'dep_type': 'cipd',
},
'v8/third_party/perfetto':
- Var('android_url') + '/platform/external/perfetto.git' + '@' + '01615892494a9a8dc84414962d0a817bf97de2c2',
+ Var('android_url') + '/platform/external/perfetto.git' + '@' + '28b633cd961b50c4c75bfb7f62eeac79e27c1a79',
'v8/third_party/protobuf':
Var('chromium_url') + '/external/github.com/google/protobuf'+ '@' + 'b68a347f56137b4b1a746e8c7438495a6ac1bd91',
}
@@ -392,13 +391,6 @@ hooks = [
'--arch=x86'],
},
{
- 'name': 'sysroot_mips',
- 'pattern': '.',
- 'condition': '(checkout_linux and checkout_mips)',
- 'action': ['python', 'v8/build/linux/sysroot_scripts/install-sysroot.py',
- '--arch=mips'],
- },
- {
'name': 'sysroot_x64',
'pattern': '.',
'condition': 'checkout_linux and checkout_x64',
@@ -495,19 +487,6 @@ hooks = [
'condition': 'host_os == "mac" and checkout_fuchsia',
'action': ['python', 'v8/tools/clang/scripts/download_objdump.py'],
},
- {
- 'name': 'mips_toolchain',
- 'pattern': '.',
- 'condition': 'download_mips_toolchain',
- 'action': [ 'download_from_google_storage',
- '--no_resume',
- '--platform=linux',
- '--no_auth',
- '-u',
- '--bucket', 'chromium-v8',
- '-s', 'v8/tools/mips_toolchain.tar.gz.sha1',
- ],
- },
# Download and initialize "vpython" VirtualEnv environment packages.
{
'name': 'vpython_common',
diff --git a/deps/v8/OWNERS b/deps/v8/OWNERS
index 9ab84b1e27..e096d3c950 100644
--- a/deps/v8/OWNERS
+++ b/deps/v8/OWNERS
@@ -16,7 +16,8 @@ per-file BUILD.gn=file:COMMON_OWNERS
per-file DEPS=file:INFRA_OWNERS
# For Test262 rolls.
per-file DEPS=mathias@chromium.org
-per-file PRESUBMIT=file:INFRA_OWNERS
+per-file DEPS=syg@chromium.org
+per-file PRESUBMIT.py=file:INFRA_OWNERS
per-file codereview.settings=file:INFRA_OWNERS
per-file AUTHORS=file:COMMON_OWNERS
diff --git a/deps/v8/PRESUBMIT.py b/deps/v8/PRESUBMIT.py
index 201bf55f71..67986d8303 100644
--- a/deps/v8/PRESUBMIT.py
+++ b/deps/v8/PRESUBMIT.py
@@ -32,6 +32,7 @@ for more details about the presubmit API built into gcl.
"""
import json
+import os
import re
import sys
@@ -134,8 +135,68 @@ def _CheckUnwantedDependencies(input_api, output_api):
# Restore sys.path to what it was before.
sys.path = original_sys_path
+ def _FilesImpactedByDepsChange(files):
+ all_files = [f.AbsoluteLocalPath() for f in files]
+ deps_files = [p for p in all_files if IsDepsFile(p)]
+ impacted_files = union([_CollectImpactedFiles(path) for path in deps_files])
+ impacted_file_objs = [ImpactedFile(path) for path in impacted_files]
+ return impacted_file_objs
+
+ def IsDepsFile(p):
+ return os.path.isfile(p) and os.path.basename(p) == 'DEPS'
+
+ def union(list_of_lists):
+ """Ensure no duplicates"""
+ return set(sum(list_of_lists, []))
+
+ def _CollectImpactedFiles(deps_file):
+ # TODO(liviurau): Do not walk paths twice. Then we have no duplicates.
+ # Higher level DEPS changes may dominate lower level DEPS changes.
+ # TODO(liviurau): Check if DEPS changed in the right way.
+ # 'include_rules' impact c++ files but 'vars' or 'deps' do not.
+ # Maybe we just eval both old and new DEPS content and check
+ # if the list are the same.
+ result = []
+ parent_dir = os.path.dirname(deps_file)
+ for relative_f in input_api.change.AllFiles(parent_dir):
+ abs_f = os.path.join(parent_dir, relative_f)
+ if CppChecker.IsCppFile(abs_f):
+ result.append(abs_f)
+ return result
+
+ class ImpactedFile(object):
+ """Duck type version of AffectedFile needed to check files under directories
+ where a DEPS file changed. Extend the interface along the line of
+ AffectedFile if you need it for other checks."""
+
+ def __init__(self, path):
+ self._path = path
+
+ def LocalPath(self):
+ path = self._path.replace(os.sep, '/')
+ return os.path.normpath(path)
+
+ def ChangedContents(self):
+ with open(self._path) as f:
+ # TODO(liviurau): read only '#include' lines
+ lines = f.readlines()
+ return enumerate(lines, start=1)
+
+ def _FilterDuplicates(impacted_files, affected_files):
+ """"We include all impacted files but exclude affected files that are also
+ impacted. Files impacted by DEPS changes take precedence before files
+ affected by direct changes."""
+ result = impacted_files[:]
+ only_paths = set([imf.LocalPath() for imf in impacted_files])
+ for af in affected_files:
+ if not af.LocalPath() in only_paths:
+ result.append(af)
+ return result
+
added_includes = []
- for f in input_api.AffectedFiles():
+ affected_files = input_api.AffectedFiles()
+ impacted_by_deps = _FilesImpactedByDepsChange(affected_files)
+ for f in _FilterDuplicates(impacted_by_deps, affected_files):
if not CppChecker.IsCppFile(f.LocalPath()):
continue
@@ -301,39 +362,43 @@ def _CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api):
return []
+def _CheckGenderNeutralInLicenses(input_api, output_api):
+ # License files are taken as is, even if they include gendered pronouns.
+ def LicenseFilter(path):
+ input_api.FilterSourceFile(path, black_list=_LICENSE_FILE)
+
+ return input_api.canned_checks.CheckGenderNeutral(
+ input_api, output_api, source_file_filter=LicenseFilter)
+
+
+def _RunTestsWithVPythonSpec(input_api, output_api):
+ return input_api.RunTests(
+ input_api.canned_checks.CheckVPythonSpec(input_api, output_api))
+
+
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
- results = []
# TODO(machenbach): Replace some of those checks, e.g. owners and copyright,
# with the canned PanProjectChecks. Need to make sure that the checks all
# pass on all existing files.
- results.extend(input_api.canned_checks.CheckOwnersFormat(
- input_api, output_api))
- results.extend(input_api.canned_checks.CheckOwners(
- input_api, output_api))
- results.extend(_CheckCommitMessageBugEntry(input_api, output_api))
- results.extend(input_api.canned_checks.CheckPatchFormatted(
- input_api, output_api))
-
- # License files are taken as is, even if they include gendered pronouns.
- license_filter = lambda path: input_api.FilterSourceFile(
- path, black_list=_LICENSE_FILE)
- results.extend(input_api.canned_checks.CheckGenderNeutral(
- input_api, output_api, source_file_filter=license_filter))
-
- results.extend(_V8PresubmitChecks(input_api, output_api))
- results.extend(_CheckUnwantedDependencies(input_api, output_api))
- results.extend(
- _CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api))
- results.extend(_CheckHeadersHaveIncludeGuards(input_api, output_api))
- results.extend(
- _CheckNoInlineHeaderIncludesInNormalHeaders(input_api, output_api))
- results.extend(_CheckJSONFiles(input_api, output_api))
- results.extend(_CheckMacroUndefs(input_api, output_api))
- results.extend(_CheckNoexceptAnnotations(input_api, output_api))
- results.extend(input_api.RunTests(
- input_api.canned_checks.CheckVPythonSpec(input_api, output_api)))
- return results
+ checks = [
+ input_api.canned_checks.CheckOwnersFormat,
+ input_api.canned_checks.CheckOwners,
+ _CheckCommitMessageBugEntry,
+ input_api.canned_checks.CheckPatchFormatted,
+ _CheckGenderNeutralInLicenses,
+ _V8PresubmitChecks,
+ _CheckUnwantedDependencies,
+ _CheckNoProductionCodeUsingTestOnlyFunctions,
+ _CheckHeadersHaveIncludeGuards,
+ _CheckNoInlineHeaderIncludesInNormalHeaders,
+ _CheckJSONFiles,
+ _CheckMacroUndefs,
+ _CheckNoexceptAnnotations,
+ _RunTestsWithVPythonSpec,
+ ]
+
+ return sum([check(input_api, output_api) for check in checks], [])
def _SkipTreeCheck(input_api, output_api):
@@ -395,7 +460,7 @@ def _CheckMacroUndefs(input_api, output_api):
"""
Checks that each #define in a .cc file is eventually followed by an #undef.
- TODO(clemensh): This check should eventually be enabled for all cc files via
+ TODO(clemensb): This check should eventually be enabled for all cc files via
tools/presubmit.py (https://crbug.com/v8/6811).
"""
def FilterFile(affected_file):
@@ -404,13 +469,29 @@ def _CheckMacroUndefs(input_api, output_api):
white_list = (r'.+\.cc',r'.+\.cpp',r'.+\.c')
return input_api.FilterSourceFile(affected_file, white_list=white_list)
+ def Touches(line):
+ return line.startswith('+') or line.startswith('-')
+
+ def InvolvesMacros(text):
+ return define_pattern.match(text) or undef_pattern.match(text)
+
def TouchesMacros(f):
- for line in f.GenerateScmDiff().splitlines():
- if not line.startswith('+') and not line.startswith('-'):
- continue
- if define_pattern.match(line[1:]) or undef_pattern.match(line[1:]):
- return True
- return False
+ return any(Touches(line) and InvolvesMacros(line[1:])
+ for line in f.GenerateScmDiff().splitlines())
+
+ def CollectUndefsWithNoDef(defined_macros, errors, f, line, line_nr):
+ define_match = define_pattern.match(line)
+ if define_match:
+ name = define_match.group(1)
+ defined_macros[name] = line_nr
+ undef_match = undef_pattern.match(line)
+ if undef_match and not "// NOLINT" in line:
+ name = undef_match.group(1)
+ if name in defined_macros:
+ del defined_macros[name]
+ else:
+ errors.append('{}:{}: Macro named \'{}\' was not defined before.'
+ .format(f.LocalPath(), line_nr, name))
define_pattern = input_api.re.compile(r'#define (\w+)')
undef_pattern = input_api.re.compile(r'#undef (\w+)')
@@ -422,25 +503,9 @@ def _CheckMacroUndefs(input_api, output_api):
defined_macros = dict()
with open(f.LocalPath()) as fh:
- line_nr = 0
- for line in fh:
- line_nr += 1
-
- define_match = define_pattern.match(line)
- if define_match:
- name = define_match.group(1)
- defined_macros[name] = line_nr
-
- undef_match = undef_pattern.match(line)
- if undef_match:
- if "// NOLINT" in line:
- continue
- name = undef_match.group(1)
- if not name in defined_macros:
- errors.append('{}:{}: Macro named \'{}\' was not defined before.'
- .format(f.LocalPath(), line_nr, name))
- else:
- del defined_macros[name]
+ for line_nr, line in enumerate(fh, start=1):
+ CollectUndefsWithNoDef(defined_macros, errors, f, line, line_nr)
+
for name, line_nr in sorted(defined_macros.items(), key=lambda e: e[1]):
errors.append('{}:{}: Macro missing #undef: {}'
.format(f.LocalPath(), line_nr, name))
@@ -463,7 +528,7 @@ def _CheckNoexceptAnnotations(input_api, output_api):
Omitting it at some places can result in weird compiler errors if this is
mixed with other classes that have the annotation.
- TODO(clemensh): This check should eventually be enabled for all files via
+ TODO(clemensb): This check should eventually be enabled for all files via
tools/presubmit.py (https://crbug.com/v8/8616).
"""
diff --git a/deps/v8/gni/v8.gni b/deps/v8/gni/v8.gni
index e55c4cf346..2644dea36b 100644
--- a/deps/v8/gni/v8.gni
+++ b/deps/v8/gni/v8.gni
@@ -35,15 +35,6 @@ declare_args() {
# on platform and embedder level.
v8_enable_raw_heap_snapshots = false
- # Enable the snapshot feature, for fast context creation.
- # https://v8.dev/blog/custom-startup-snapshots
- # TODO(thakis): Make snapshots work in 64-bit win/cross builds,
- # https://803591
- # On Mac hosts, 32-bit builds targeting Windows can't use snapshots, see
- # https://crbug.com/794838
- v8_use_snapshot = !(is_win && host_os != "win" && target_cpu == "x64") &&
- !(is_win && host_os == "mac" && target_cpu == "x86")
-
# Enable several snapshots side-by-side (e.g. default and for trusted code).
v8_use_multi_snapshots = false
@@ -71,8 +62,8 @@ declare_args() {
if (v8_use_external_startup_data == "") {
# If not specified as a gn arg, use external startup data by default if
- # a snapshot is used and if we're not on ios.
- v8_use_external_startup_data = v8_use_snapshot && !is_ios
+ # we're not on ios.
+ v8_use_external_startup_data = !is_ios
}
if (v8_use_multi_snapshots) {
@@ -213,9 +204,17 @@ template("v8_executable") {
template("v8_component") {
component(target_name) {
- forward_variables_from(invoker, "*", [ "configs" ])
+ forward_variables_from(invoker,
+ "*",
+ [
+ "configs",
+ "remove_configs",
+ ])
configs -= v8_remove_configs
configs += v8_add_configs
+ if (defined(invoker.remove_configs)) {
+ configs -= invoker.remove_configs
+ }
configs += invoker.configs
}
}
diff --git a/deps/v8/include/OWNERS b/deps/v8/include/OWNERS
index b64069847b..1e0794df7a 100644
--- a/deps/v8/include/OWNERS
+++ b/deps/v8/include/OWNERS
@@ -1,6 +1,7 @@
adamk@chromium.org
danno@chromium.org
ulan@chromium.org
+verwaest@chromium.org
yangguo@chromium.org
per-file *DEPS=file:../COMMON_OWNERS
diff --git a/deps/v8/include/js_protocol.pdl b/deps/v8/include/js_protocol.pdl
index c4ff51b060..51f3c6f68a 100644
--- a/deps/v8/include/js_protocol.pdl
+++ b/deps/v8/include/js_protocol.pdl
@@ -227,6 +227,15 @@ domain Debugger
# Script source.
string scriptSource
+ # Returns bytecode for the WebAssembly script with given id.
+ command getWasmBytecode
+ parameters
+ # Id of the Wasm script to get source for.
+ Runtime.ScriptId scriptId
+ returns
+ # Script source.
+ binary bytecode
+
# Returns stack trace with given `stackTraceId`.
experimental command getStackTrace
parameters
@@ -237,7 +246,7 @@ domain Debugger
# Stops on the next JavaScript statement.
command pause
- experimental command pauseOnAsyncCall
+ experimental deprecated command pauseOnAsyncCall
parameters
# Debugger will pause when async call with given stack trace is started.
Runtime.StackTraceId parentStackTraceId
@@ -435,7 +444,7 @@ domain Debugger
# Steps into the function call.
command stepInto
parameters
- # Debugger will issue additional Debugger.paused notification if any async task is scheduled
+ # Debugger will pause on the execution of the first async task which was scheduled
# before next pause.
experimental optional boolean breakOnAsyncCall
@@ -479,9 +488,8 @@ domain Debugger
optional Runtime.StackTrace asyncStackTrace
# Async stack trace, if any.
experimental optional Runtime.StackTraceId asyncStackTraceId
- # Just scheduled async call will have this stack trace as parent stack during async execution.
- # This field is available only after `Debugger.stepInto` call with `breakOnAsynCall` flag.
- experimental optional Runtime.StackTraceId asyncCallStackTraceId
+ # Never present, will be removed.
+ experimental deprecated optional Runtime.StackTraceId asyncCallStackTraceId
# Fired when the virtual machine resumed execution.
event resumed
@@ -1243,9 +1251,12 @@ domain Runtime
# resolved.
optional boolean awaitPromise
# Whether to throw an exception if side effect cannot be ruled out during evaluation.
+ # This implies `disableBreaks` below.
experimental optional boolean throwOnSideEffect
# Terminate execution after timing out (number of milliseconds).
experimental optional TimeDelta timeout
+ # Disable breakpoints during execution.
+ experimental optional boolean disableBreaks
returns
# Evaluation result.
RemoteObject result
diff --git a/deps/v8/include/libplatform/libplatform.h b/deps/v8/include/libplatform/libplatform.h
index 6908aeaa88..18d585d6d9 100644
--- a/deps/v8/include/libplatform/libplatform.h
+++ b/deps/v8/include/libplatform/libplatform.h
@@ -5,6 +5,8 @@
#ifndef V8_LIBPLATFORM_LIBPLATFORM_H_
#define V8_LIBPLATFORM_LIBPLATFORM_H_
+#include <memory>
+
#include "libplatform/libplatform-export.h"
#include "libplatform/v8-tracing.h"
#include "v8-platform.h" // NOLINT(build/include)
@@ -70,11 +72,10 @@ V8_PLATFORM_EXPORT void RunIdleTasks(v8::Platform* platform,
* The |platform| has to be created using |NewDefaultPlatform|.
*
*/
-V8_PLATFORM_EXPORT V8_DEPRECATE_SOON(
- "Access the DefaultPlatform directly",
- void SetTracingController(
- v8::Platform* platform,
- v8::platform::tracing::TracingController* tracing_controller));
+V8_DEPRECATE_SOON("Access the DefaultPlatform directly")
+V8_PLATFORM_EXPORT void SetTracingController(
+ v8::Platform* platform,
+ v8::platform::tracing::TracingController* tracing_controller);
} // namespace platform
} // namespace v8
diff --git a/deps/v8/include/v8-inspector.h b/deps/v8/include/v8-inspector.h
index cfa2aaba96..5f53f21d55 100644
--- a/deps/v8/include/v8-inspector.h
+++ b/deps/v8/include/v8-inspector.h
@@ -24,6 +24,7 @@ namespace Runtime {
namespace API {
class RemoteObject;
class StackTrace;
+class StackTraceId;
}
}
namespace Schema {
@@ -229,12 +230,20 @@ class V8_EXPORT V8InspectorClient {
struct V8_EXPORT V8StackTraceId {
uintptr_t id;
std::pair<int64_t, int64_t> debugger_id;
+ bool should_pause = false;
V8StackTraceId();
+ V8StackTraceId(const V8StackTraceId&) = default;
V8StackTraceId(uintptr_t id, const std::pair<int64_t, int64_t> debugger_id);
+ V8StackTraceId(uintptr_t id, const std::pair<int64_t, int64_t> debugger_id,
+ bool should_pause);
+ explicit V8StackTraceId(const StringView&);
+ V8StackTraceId& operator=(const V8StackTraceId&) = default;
+ V8StackTraceId& operator=(V8StackTraceId&&) noexcept = default;
~V8StackTraceId() = default;
bool IsInvalid() const;
+ std::unique_ptr<StringBuffer> ToString();
};
class V8_EXPORT V8Inspector {
diff --git a/deps/v8/include/v8-internal.h b/deps/v8/include/v8-internal.h
index 6ecddf45d6..29f391b673 100644
--- a/deps/v8/include/v8-internal.h
+++ b/deps/v8/include/v8-internal.h
@@ -112,6 +112,8 @@ using PlatformSmiTagging = SmiTagging<kApiInt32Size>;
using PlatformSmiTagging = SmiTagging<kApiTaggedSize>;
#endif
+// TODO(ishell): Consinder adding kSmiShiftBits = kSmiShiftSize + kSmiTagSize
+// since it's used much more often than the inividual constants.
const int kSmiShiftSize = PlatformSmiTagging::kSmiShiftSize;
const int kSmiValueSize = PlatformSmiTagging::kSmiValueSize;
const int kSmiMinValue = static_cast<int>(PlatformSmiTagging::kSmiMinValue);
@@ -327,14 +329,11 @@ class Internals {
#ifdef V8_COMPRESS_POINTERS
// See v8:7703 or src/ptr-compr.* for details about pointer compression.
static constexpr size_t kPtrComprHeapReservationSize = size_t{1} << 32;
- static constexpr size_t kPtrComprIsolateRootBias =
- kPtrComprHeapReservationSize / 2;
static constexpr size_t kPtrComprIsolateRootAlignment = size_t{1} << 32;
V8_INLINE static internal::Address GetRootFromOnHeapAddress(
internal::Address addr) {
- return (addr + kPtrComprIsolateRootBias) &
- -static_cast<intptr_t>(kPtrComprIsolateRootAlignment);
+ return addr & -static_cast<intptr_t>(kPtrComprIsolateRootAlignment);
}
V8_INLINE static internal::Address DecompressTaggedAnyField(
@@ -381,6 +380,10 @@ V8_EXPORT internal::Isolate* IsolateFromNeverReadOnlySpaceObject(Address obj);
// language mode is strict.
V8_EXPORT bool ShouldThrowOnError(v8::internal::Isolate* isolate);
+// A base class for backing stores, which is needed due to vagaries of
+// how static casts work with std::shared_ptr.
+class BackingStoreBase {};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/include/v8-platform.h b/deps/v8/include/v8-platform.h
index b707fafc49..c6e78f2381 100644
--- a/deps/v8/include/v8-platform.h
+++ b/deps/v8/include/v8-platform.h
@@ -367,9 +367,8 @@ class Platform {
* |isolate|. Tasks posted for the same isolate should be execute in order of
* scheduling. The definition of "foreground" is opaque to V8.
*/
- V8_DEPRECATE_SOON(
- "Use a taskrunner acquired by GetForegroundTaskRunner instead.",
- virtual void CallOnForegroundThread(Isolate* isolate, Task* task)) = 0;
+ V8_DEPRECATED("Use a taskrunner acquired by GetForegroundTaskRunner instead.")
+ virtual void CallOnForegroundThread(Isolate* isolate, Task* task) = 0;
/**
* Schedules a task to be invoked on a foreground thread wrt a specific
@@ -377,10 +376,9 @@ class Platform {
* Tasks posted for the same isolate should be execute in order of
* scheduling. The definition of "foreground" is opaque to V8.
*/
- V8_DEPRECATE_SOON(
- "Use a taskrunner acquired by GetForegroundTaskRunner instead.",
- virtual void CallDelayedOnForegroundThread(Isolate* isolate, Task* task,
- double delay_in_seconds)) = 0;
+ V8_DEPRECATED("Use a taskrunner acquired by GetForegroundTaskRunner instead.")
+ virtual void CallDelayedOnForegroundThread(Isolate* isolate, Task* task,
+ double delay_in_seconds) = 0;
/**
* Schedules a task to be invoked on a foreground thread wrt a specific
@@ -390,10 +388,8 @@ class Platform {
* starved for an arbitrarily long time if no idle time is available.
* The definition of "foreground" is opaque to V8.
*/
- V8_DEPRECATE_SOON(
- "Use a taskrunner acquired by GetForegroundTaskRunner instead.",
- virtual void CallIdleOnForegroundThread(Isolate* isolate,
- IdleTask* task)) {
+ V8_DEPRECATED("Use a taskrunner acquired by GetForegroundTaskRunner instead.")
+ virtual void CallIdleOnForegroundThread(Isolate* isolate, IdleTask* task) {
// This must be overriden if |IdleTasksEnabled()|.
abort();
}
diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h
index 360850b631..0e203511f5 100644
--- a/deps/v8/include/v8-profiler.h
+++ b/deps/v8/include/v8-profiler.h
@@ -6,8 +6,10 @@
#define V8_V8_PROFILER_H_
#include <limits.h>
+#include <memory>
#include <unordered_set>
#include <vector>
+
#include "v8.h" // NOLINT(build/include)
/**
@@ -143,9 +145,8 @@ class V8_EXPORT CpuProfileNode {
unsigned GetHitCount() const;
/** Returns function entry UID. */
- V8_DEPRECATE_SOON(
- "Use GetScriptId, GetLineNumber, and GetColumnNumber instead.",
- unsigned GetCallUid() const);
+ V8_DEPRECATED("Use GetScriptId, GetLineNumber, and GetColumnNumber instead.")
+ unsigned GetCallUid() const;
/** Returns id of the node. The id is unique within the tree */
unsigned GetNodeId() const;
@@ -371,20 +372,6 @@ class V8_EXPORT CpuProfiler {
CpuProfile* StopProfiling(Local<String> title);
/**
- * Force collection of a sample. Must be called on the VM thread.
- * Recording the forced sample does not contribute to the aggregated
- * profile statistics.
- */
- V8_DEPRECATED("Use static CollectSample(Isolate*) instead.",
- void CollectSample());
-
- /**
- * Tells the profiler whether the embedder is idle.
- */
- V8_DEPRECATED("Use Isolate::SetIdle(bool) instead.",
- void SetIdle(bool is_idle));
-
- /**
* Generate more detailed source positions to code objects. This results in
* better results when mapping profiling samples to script source.
*/
@@ -989,7 +976,8 @@ struct HeapStatsUpdate {
V(LazyCompile) \
V(RegExp) \
V(Script) \
- V(Stub)
+ V(Stub) \
+ V(Relocation)
/**
* Note that this enum may be extended in the future. Please include a default
@@ -1022,10 +1010,12 @@ class V8_EXPORT CodeEvent {
const char* GetComment();
static const char* GetCodeEventTypeName(CodeEventType code_event_type);
+
+ uintptr_t GetPreviousCodeStartAddress();
};
/**
- * Interface to listen to code creation events.
+ * Interface to listen to code creation and code relocation events.
*/
class V8_EXPORT CodeEventHandler {
public:
@@ -1037,9 +1027,26 @@ class V8_EXPORT CodeEventHandler {
explicit CodeEventHandler(Isolate* isolate);
virtual ~CodeEventHandler();
+ /**
+ * Handle is called every time a code object is created or moved. Information
+ * about each code event will be available through the `code_event`
+ * parameter.
+ *
+ * When the CodeEventType is kRelocationType, the code for this CodeEvent has
+ * moved from `GetPreviousCodeStartAddress()` to `GetCodeStartAddress()`.
+ */
virtual void Handle(CodeEvent* code_event) = 0;
+ /**
+ * Call `Enable()` to starts listening to code creation and code relocation
+ * events. These events will be handled by `Handle()`.
+ */
void Enable();
+
+ /**
+ * Call `Disable()` to stop listening to code creation and code relocation
+ * events.
+ */
void Disable();
private:
diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h
index ef90963d25..298b479329 100644
--- a/deps/v8/include/v8-version.h
+++ b/deps/v8/include/v8-version.h
@@ -9,9 +9,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 7
-#define V8_MINOR_VERSION 8
-#define V8_BUILD_NUMBER 279
-#define V8_PATCH_LEVEL 17
+#define V8_MINOR_VERSION 9
+#define V8_BUILD_NUMBER 317
+#define V8_PATCH_LEVEL 20
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index 9d0b6a6c65..1387f74715 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -97,6 +97,10 @@ template <class T>
class Global;
template <class T>
class TracedGlobal;
+template <class T>
+class TracedReference;
+template <class T>
+class TracedReferenceBase;
template<class K, class V, class T> class PersistentValueMap;
template <class K, class V, class T>
class PersistentValueMapBase;
@@ -282,7 +286,8 @@ class Local {
V8_INLINE static Local<T> New(Isolate* isolate, Local<T> that);
V8_INLINE static Local<T> New(Isolate* isolate,
const PersistentBase<T>& that);
- V8_INLINE static Local<T> New(Isolate* isolate, const TracedGlobal<T>& that);
+ V8_INLINE static Local<T> New(Isolate* isolate,
+ const TracedReferenceBase<T>& that);
private:
friend class Utils;
@@ -312,7 +317,13 @@ class Local {
template <class F>
friend class ReturnValue;
template <class F>
+ friend class Traced;
+ template <class F>
friend class TracedGlobal;
+ template <class F>
+ friend class TracedReferenceBase;
+ template <class F>
+ friend class TracedReference;
explicit V8_INLINE Local(T* that) : val_(that) {}
V8_INLINE static Local<T> New(Isolate* isolate, T* that);
@@ -793,22 +804,10 @@ template <class T>
using UniquePersistent = Global<T>;
/**
- * Trait specifying behavior of |TracedGlobal<T>|.
+ * Deprecated. Use |TracedReference<T>| instead.
*/
template <typename T>
-struct TracedGlobalTrait {
- /**
- * Specifies whether |TracedGlobal<T>| should clear its handle on destruction.
- *
- * V8 will *not* clear the embedder-side memory of the handle. The embedder is
- * expected to report all |TracedGlobal<T>| handles through
- * |EmbedderHeapTracer| upon garabge collection.
- *
- * See |EmbedderHeapTracer::IsRootForNonTracingGC| for handling with
- * non-tracing GCs in V8.
- */
- static constexpr bool kRequiresExplicitDestruction = true;
-};
+struct TracedGlobalTrait {};
/**
* A traced handle with copy and move semantics. The handle is to be used
@@ -821,15 +820,131 @@ struct TracedGlobalTrait {
* |v8::EmbedderHeapTracer::IsRootForNonTracingGC()| whether the handle should
* be treated as root or not.
*
- * For destruction semantics see |TracedGlobalTrait<T>|.
+ * Note that the base class cannot be instantiated itself. Choose from
+ * - TracedGlobal
+ * - TracedReference
*/
template <typename T>
-class TracedGlobal {
+class TracedReferenceBase {
public:
/**
+ * Returns true if this TracedReferenceBase is empty, i.e., has not been
+ * assigned an object.
+ */
+ bool IsEmpty() const { return val_ == nullptr; }
+
+ /**
+ * If non-empty, destroy the underlying storage cell. |IsEmpty| will return
+ * true after this call.
+ */
+ V8_INLINE void Reset();
+
+ /**
+ * Construct a Local<T> from this handle.
+ */
+ Local<T> Get(Isolate* isolate) const { return Local<T>::New(isolate, *this); }
+
+ template <class S>
+ V8_INLINE bool operator==(const TracedReferenceBase<S>& that) const {
+ internal::Address* a = reinterpret_cast<internal::Address*>(val_);
+ internal::Address* b = reinterpret_cast<internal::Address*>(that.val_);
+ if (a == nullptr) return b == nullptr;
+ if (b == nullptr) return false;
+ return *a == *b;
+ }
+
+ template <class S>
+ V8_INLINE bool operator==(const Local<S>& that) const {
+ internal::Address* a = reinterpret_cast<internal::Address*>(val_);
+ internal::Address* b = reinterpret_cast<internal::Address*>(that.val_);
+ if (a == nullptr) return b == nullptr;
+ if (b == nullptr) return false;
+ return *a == *b;
+ }
+
+ template <class S>
+ V8_INLINE bool operator!=(const TracedReferenceBase<S>& that) const {
+ return !operator==(that);
+ }
+
+ template <class S>
+ V8_INLINE bool operator!=(const Local<S>& that) const {
+ return !operator==(that);
+ }
+
+ /**
+ * Assigns a wrapper class ID to the handle.
+ */
+ V8_INLINE void SetWrapperClassId(uint16_t class_id);
+
+ /**
+ * Returns the class ID previously assigned to this handle or 0 if no class ID
+ * was previously assigned.
+ */
+ V8_INLINE uint16_t WrapperClassId() const;
+
+ /**
+ * Adds a finalization callback to the handle. The type of this callback is
+ * similar to WeakCallbackType::kInternalFields, i.e., it will pass the
+ * parameter and the first two internal fields of the object.
+ *
+ * The callback is then supposed to reset the handle in the callback. No
+ * further V8 API may be called in this callback. In case additional work
+ * involving V8 needs to be done, a second callback can be scheduled using
+ * WeakCallbackInfo<void>::SetSecondPassCallback.
+ */
+ V8_INLINE void SetFinalizationCallback(
+ void* parameter, WeakCallbackInfo<void>::Callback callback);
+
+ template <class S>
+ V8_INLINE TracedReferenceBase<S>& As() const {
+ return reinterpret_cast<TracedReferenceBase<S>&>(
+ const_cast<TracedReferenceBase<T>&>(*this));
+ }
+
+ private:
+ enum DestructionMode { kWithDestructor, kWithoutDestructor };
+
+ /**
+ * An empty TracedReferenceBase without storage cell.
+ */
+ TracedReferenceBase() = default;
+
+ V8_INLINE static T* New(Isolate* isolate, T* that, void* slot,
+ DestructionMode destruction_mode);
+
+ T* val_ = nullptr;
+
+ friend class EmbedderHeapTracer;
+ template <typename F>
+ friend class Local;
+ friend class Object;
+ template <typename F>
+ friend class TracedGlobal;
+ template <typename F>
+ friend class TracedReference;
+ template <typename F>
+ friend class ReturnValue;
+};
+
+/**
+ * A traced handle with destructor that clears the handle. For more details see
+ * TracedReferenceBase.
+ */
+template <typename T>
+class TracedGlobal : public TracedReferenceBase<T> {
+ public:
+ using TracedReferenceBase<T>::Reset;
+
+ /**
+ * Destructor resetting the handle.
+ */
+ ~TracedGlobal() { this->Reset(); }
+
+ /**
* An empty TracedGlobal without storage cell.
*/
- TracedGlobal() = default;
+ TracedGlobal() : TracedReferenceBase<T>() {}
/**
* Construct a TracedGlobal from a Local.
@@ -838,8 +953,9 @@ class TracedGlobal {
* pointing to the same object.
*/
template <class S>
- TracedGlobal(Isolate* isolate, Local<S> that)
- : val_(New(isolate, *that, &val_)) {
+ TracedGlobal(Isolate* isolate, Local<S> that) : TracedReferenceBase<T>() {
+ this->val_ = this->New(isolate, that.val_, &this->val_,
+ TracedReferenceBase<T>::kWithDestructor);
TYPE_CHECK(T, S);
}
@@ -906,121 +1022,126 @@ class TracedGlobal {
V8_INLINE TracedGlobal& operator=(const TracedGlobal<S>& rhs);
/**
- * Returns true if this TracedGlobal is empty, i.e., has not been assigned an
- * object.
- */
- bool IsEmpty() const { return val_ == nullptr; }
-
- /**
- * If non-empty, destroy the underlying storage cell. |IsEmpty| will return
- * true after this call.
- */
- V8_INLINE void Reset();
-
- /**
* If non-empty, destroy the underlying storage cell and create a new one with
* the contents of other if other is non empty
*/
template <class S>
V8_INLINE void Reset(Isolate* isolate, const Local<S>& other);
- /**
- * Construct a Local<T> from this handle.
- */
- Local<T> Get(Isolate* isolate) const { return Local<T>::New(isolate, *this); }
-
template <class S>
V8_INLINE TracedGlobal<S>& As() const {
return reinterpret_cast<TracedGlobal<S>&>(
const_cast<TracedGlobal<T>&>(*this));
}
+};
- template <class S>
- V8_INLINE bool operator==(const TracedGlobal<S>& that) const {
- internal::Address* a = reinterpret_cast<internal::Address*>(**this);
- internal::Address* b = reinterpret_cast<internal::Address*>(*that);
- if (a == nullptr) return b == nullptr;
- if (b == nullptr) return false;
- return *a == *b;
- }
+/**
+ * A traced handle without destructor that clears the handle. The embedder needs
+ * to ensure that the handle is not accessed once the V8 object has been
+ * reclaimed. This can happen when the handle is not passed through the
+ * EmbedderHeapTracer. For more details see TracedReferenceBase.
+ */
+template <typename T>
+class TracedReference : public TracedReferenceBase<T> {
+ public:
+ using TracedReferenceBase<T>::Reset;
+ /**
+ * An empty TracedReference without storage cell.
+ */
+ TracedReference() : TracedReferenceBase<T>() {}
+
+ /**
+ * Construct a TracedReference from a Local.
+ *
+ * When the Local is non-empty, a new storage cell is created
+ * pointing to the same object.
+ */
template <class S>
- V8_INLINE bool operator==(const Local<S>& that) const {
- internal::Address* a = reinterpret_cast<internal::Address*>(**this);
- internal::Address* b = reinterpret_cast<internal::Address*>(*that);
- if (a == nullptr) return b == nullptr;
- if (b == nullptr) return false;
- return *a == *b;
+ TracedReference(Isolate* isolate, Local<S> that) : TracedReferenceBase<T>() {
+ this->val_ = this->New(isolate, that.val_, &this->val_,
+ TracedReferenceBase<T>::kWithoutDestructor);
+ TYPE_CHECK(T, S);
}
- template <class S>
- V8_INLINE bool operator!=(const TracedGlobal<S>& that) const {
- return !operator==(that);
+ /**
+ * Move constructor initializing TracedReference from an
+ * existing one.
+ */
+ V8_INLINE TracedReference(TracedReference&& other) {
+ // Forward to operator=.
+ *this = std::move(other);
}
- template <class S>
- V8_INLINE bool operator!=(const Local<S>& that) const {
- return !operator==(that);
+ /**
+ * Move constructor initializing TracedReference from an
+ * existing one.
+ */
+ template <typename S>
+ V8_INLINE TracedReference(TracedReference<S>&& other) {
+ // Forward to operator=.
+ *this = std::move(other);
}
/**
- * Assigns a wrapper class ID to the handle.
+ * Copy constructor initializing TracedReference from an
+ * existing one.
*/
- V8_INLINE void SetWrapperClassId(uint16_t class_id);
+ V8_INLINE TracedReference(const TracedReference& other) {
+ // Forward to operator=;
+ *this = other;
+ }
/**
- * Returns the class ID previously assigned to this handle or 0 if no class ID
- * was previously assigned.
+ * Copy constructor initializing TracedReference from an
+ * existing one.
*/
- V8_INLINE uint16_t WrapperClassId() const;
+ template <typename S>
+ V8_INLINE TracedReference(const TracedReference<S>& other) {
+ // Forward to operator=;
+ *this = other;
+ }
/**
- * Adds a finalization callback to the handle. The type of this callback is
- * similar to WeakCallbackType::kInternalFields, i.e., it will pass the
- * parameter and the first two internal fields of the object.
- *
- * The callback is then supposed to reset the handle in the callback. No
- * further V8 API may be called in this callback. In case additional work
- * involving V8 needs to be done, a second callback can be scheduled using
- * WeakCallbackInfo<void>::SetSecondPassCallback.
+ * Move assignment operator initializing TracedGlobal from an existing one.
*/
- V8_INLINE void SetFinalizationCallback(
- void* parameter, WeakCallbackInfo<void>::Callback callback);
+ V8_INLINE TracedReference& operator=(TracedReference&& rhs);
- private:
- // Wrapping type used when clearing on destruction is required.
- struct WrappedForDestruction {
- T* value;
-
- explicit WrappedForDestruction(T* val) : value(val) {}
- ~WrappedForDestruction();
- operator T*() const { return value; }
- T* operator*() const { return value; }
- T* operator->() const { return value; }
- WrappedForDestruction& operator=(const WrappedForDestruction& other) {
- value = other.value;
- return *this;
- }
- WrappedForDestruction& operator=(T* val) {
- value = val;
- return *this;
- }
- };
+ /**
+ * Move assignment operator initializing TracedGlobal from an existing one.
+ */
+ template <class S>
+ V8_INLINE TracedReference& operator=(TracedReference<S>&& rhs);
- V8_INLINE static T* New(Isolate* isolate, T* that, void* slot);
+ /**
+ * Copy assignment operator initializing TracedGlobal from an existing one.
+ *
+ * Note: Prohibited when |other| has a finalization callback set through
+ * |SetFinalizationCallback|.
+ */
+ V8_INLINE TracedReference& operator=(const TracedReference& rhs);
- T* operator*() const { return this->val_; }
+ /**
+ * Copy assignment operator initializing TracedGlobal from an existing one.
+ *
+ * Note: Prohibited when |other| has a finalization callback set through
+ * |SetFinalizationCallback|.
+ */
+ template <class S>
+ V8_INLINE TracedReference& operator=(const TracedReference<S>& rhs);
- typename std::conditional<
- TracedGlobalTrait<TracedGlobal<T>>::kRequiresExplicitDestruction,
- WrappedForDestruction, T*>::type val_{nullptr};
+ /**
+ * If non-empty, destroy the underlying storage cell and create a new one with
+ * the contents of other if other is non empty
+ */
+ template <class S>
+ V8_INLINE void Reset(Isolate* isolate, const Local<S>& other);
- friend class EmbedderHeapTracer;
- template <typename F>
- friend class Local;
- friend class Object;
- template <typename F>
- friend class ReturnValue;
+ template <class S>
+ V8_INLINE TracedReference<S>& As() const {
+ return reinterpret_cast<TracedReference<S>&>(
+ const_cast<TracedReference<T>&>(*this));
+ }
};
/**
@@ -1154,9 +1275,8 @@ class V8_EXPORT SealHandleScope {
// --- Special objects ---
-
/**
- * The superclass of values and API object templates.
+ * The superclass of objects that can reside on V8's heap.
*/
class V8_EXPORT Data {
private:
@@ -1303,7 +1423,7 @@ class V8_EXPORT UnboundScript {
/**
* A compiled JavaScript module, not yet tied to a Context.
*/
-class V8_EXPORT UnboundModuleScript {
+class V8_EXPORT UnboundModuleScript : public Data {
// Only used as a container for code caching.
};
@@ -1326,7 +1446,7 @@ class V8_EXPORT Location {
/**
* A compiled JavaScript module.
*/
-class V8_EXPORT Module {
+class V8_EXPORT Module : public Data {
public:
/**
* The different states a module can be in.
@@ -1440,19 +1560,11 @@ class V8_EXPORT Module {
/**
* Set this module's exported value for the name export_name to the specified
* export_value. This method must be called only on Modules created via
- * CreateSyntheticModule. An error will be thrown if export_name is not one
- * of the export_names that were passed in that CreateSyntheticModule call.
- * Returns Just(true) on success, Nothing<bool>() if an error was thrown.
+ * CreateSyntheticModule. export_name must be one of the export_names that
+ * were passed in that CreateSyntheticModule call.
*/
- V8_WARN_UNUSED_RESULT Maybe<bool> SetSyntheticModuleExport(
- Isolate* isolate, Local<String> export_name, Local<Value> export_value);
- V8_DEPRECATE_SOON(
- "Use the preceding SetSyntheticModuleExport with an Isolate parameter, "
- "instead of the one that follows. The former will throw a runtime "
- "error if called for an export that doesn't exist (as per spec); "
- "the latter will crash with a failed CHECK().",
- void SetSyntheticModuleExport(Local<String> export_name,
- Local<Value> export_value));
+ void SetSyntheticModuleExport(Local<String> export_name,
+ Local<Value> export_value);
};
/**
@@ -1628,8 +1740,8 @@ class V8_EXPORT ScriptCompiler {
V8_DEPRECATE_SOON(
"This class takes ownership of source_stream, so use the constructor "
- "taking a unique_ptr to make these semantics clearer",
- StreamedSource(ExternalSourceStream* source_stream, Encoding encoding));
+ "taking a unique_ptr to make these semantics clearer")
+ StreamedSource(ExternalSourceStream* source_stream, Encoding encoding);
StreamedSource(std::unique_ptr<ExternalSourceStream> source_stream,
Encoding encoding);
~StreamedSource();
@@ -3405,7 +3517,7 @@ enum class IndexFilter { kIncludeIndices, kSkipIndices };
* kConvertToString will convert integer indices to strings.
* kKeepNumbers will return numbers for integer indices.
*/
-enum class KeyConversionMode { kConvertToString, kKeepNumbers };
+enum class KeyConversionMode { kConvertToString, kKeepNumbers, kNoNumbers };
/**
* Integrity level for objects.
@@ -3649,8 +3761,9 @@ class V8_EXPORT Object : public Value {
return object.val_->InternalFieldCount();
}
- /** Same as above, but works for TracedGlobal. */
- V8_INLINE static int InternalFieldCount(const TracedGlobal<Object>& object) {
+ /** Same as above, but works for TracedReferenceBase. */
+ V8_INLINE static int InternalFieldCount(
+ const TracedReferenceBase<Object>& object) {
return object.val_->InternalFieldCount();
}
@@ -3675,7 +3788,7 @@ class V8_EXPORT Object : public Value {
/** Same as above, but works for TracedGlobal. */
V8_INLINE static void* GetAlignedPointerFromInternalField(
- const TracedGlobal<Object>& object, int index) {
+ const TracedReferenceBase<Object>& object, int index) {
return object.val_->GetAlignedPointerFromInternalField(index);
}
@@ -3965,7 +4078,7 @@ class ReturnValue {
template <typename S>
V8_INLINE void Set(const Global<S>& handle);
template <typename S>
- V8_INLINE void Set(const TracedGlobal<S>& handle);
+ V8_INLINE void Set(const TracedReferenceBase<S>& handle);
template <typename S>
V8_INLINE void Set(const Local<S> handle);
// Fast primitive setters
@@ -4521,48 +4634,39 @@ class V8_EXPORT CompiledWasmModule {
// An instance of WebAssembly.Module.
class V8_EXPORT WasmModuleObject : public Object {
public:
+ WasmModuleObject() = delete;
+
/**
* An opaque, native heap object for transferring wasm modules. It
* supports move semantics, and does not support copy semantics.
- * TODO(wasm): Merge this with CompiledWasmModule once code sharing is always
- * enabled.
*/
- class TransferrableModule final {
- public:
- TransferrableModule(TransferrableModule&& src) = default;
- TransferrableModule(const TransferrableModule& src) = delete;
-
- TransferrableModule& operator=(TransferrableModule&& src) = default;
- TransferrableModule& operator=(const TransferrableModule& src) = delete;
-
- private:
- typedef std::shared_ptr<internal::wasm::NativeModule> SharedModule;
- friend class WasmModuleObject;
- explicit TransferrableModule(SharedModule shared_module)
- : shared_module_(std::move(shared_module)) {}
- TransferrableModule(OwnedBuffer serialized, OwnedBuffer bytes)
- : serialized_(std::move(serialized)), wire_bytes_(std::move(bytes)) {}
-
- SharedModule shared_module_;
- OwnedBuffer serialized_ = {nullptr, 0};
- OwnedBuffer wire_bytes_ = {nullptr, 0};
- };
+ using TransferrableModule V8_DEPRECATED("Use CompiledWasmModule directly") =
+ CompiledWasmModule;
/**
* Get an in-memory, non-persistable, and context-independent (meaning,
* suitable for transfer to another Isolate and Context) representation
* of this wasm compiled module.
*/
+ V8_DEPRECATED("Use GetCompiledModule")
TransferrableModule GetTransferrableModule();
/**
* Efficiently re-create a WasmModuleObject, without recompiling, from
* a TransferrableModule.
*/
+ V8_DEPRECATED("Use FromCompiledModule")
static MaybeLocal<WasmModuleObject> FromTransferrableModule(
Isolate* isolate, const TransferrableModule&);
/**
+ * Efficiently re-create a WasmModuleObject, without recompiling, from
+ * a CompiledWasmModule.
+ */
+ static MaybeLocal<WasmModuleObject> FromCompiledModule(
+ Isolate* isolate, const CompiledWasmModule&);
+
+ /**
* Get the compiled module for this module object. The compiled module can be
* shared by several module objects.
*/
@@ -4584,11 +4688,7 @@ class V8_EXPORT WasmModuleObject : public Object {
static MaybeLocal<WasmModuleObject> Compile(Isolate* isolate,
const uint8_t* start,
size_t length);
- static MemorySpan<const uint8_t> AsReference(const OwnedBuffer& buff) {
- return {buff.buffer.get(), buff.size};
- }
- WasmModuleObject();
static void CheckCast(Value* obj);
};
@@ -4720,6 +4820,33 @@ class V8_EXPORT WasmModuleObjectBuilderStreaming final {
enum class ArrayBufferCreationMode { kInternalized, kExternalized };
+/**
+ * A wrapper around the backing store (i.e. the raw memory) of an array buffer.
+ *
+ * The allocation and destruction of backing stores is generally managed by
+ * V8. Clients should always use standard C++ memory ownership types (i.e.
+ * std::unique_ptr and std::shared_ptr) to manage lifetimes of backing stores
+ * properly, since V8 internal objects may alias backing stores.
+ */
+class V8_EXPORT BackingStore : public v8::internal::BackingStoreBase {
+ public:
+ ~BackingStore();
+
+ /**
+ * Return a pointer to the beginning of the memory block for this backing
+ * store. The pointer is only valid as long as this backing store object
+ * lives.
+ */
+ void* Data() const;
+
+ /**
+ * The length (in bytes) of this backing store.
+ */
+ size_t ByteLength() const;
+
+ private:
+ BackingStore();
+};
/**
* An instance of the built-in ArrayBuffer constructor (ES6 draft 15.13.5).
@@ -4857,6 +4984,21 @@ class V8_EXPORT ArrayBuffer : public Object {
ArrayBufferCreationMode mode = ArrayBufferCreationMode::kExternalized);
/**
+ * Create a new ArrayBuffer with an existing backing store.
+ * The created array keeps a reference to the backing store until the array
+ * is garbage collected. Note that the IsExternal bit does not affect this
+ * reference from the array to the backing store.
+ *
+ * In future IsExternal bit will be removed. Until then the bit is set as
+ * follows. If the backing store does not own the underlying buffer, then
+ * the array is created in externalized state. Otherwise, the array is created
+ * in internalized state. In the latter case the array can be transitioned
+ * to the externalized state using Externalize(backing_store).
+ */
+ static Local<ArrayBuffer> New(Isolate* isolate,
+ std::shared_ptr<BackingStore> backing_store);
+
+ /**
* Returns true if ArrayBuffer is externalized, that is, does not
* own its memory block.
*/
@@ -4867,12 +5009,6 @@ class V8_EXPORT ArrayBuffer : public Object {
*/
bool IsDetachable() const;
- // TODO(913887): fix the use of 'neuter' in the API.
- V8_DEPRECATED("Use IsDetachable() instead.",
- inline bool IsNeuterable() const) {
- return IsDetachable();
- }
-
/**
* Detaches this ArrayBuffer and all its views (typed arrays).
* Detaching sets the byte length of the buffer and all typed arrays to zero,
@@ -4881,9 +5017,6 @@ class V8_EXPORT ArrayBuffer : public Object {
*/
void Detach();
- // TODO(913887): fix the use of 'neuter' in the API.
- V8_DEPRECATED("Use Detach() instead.", inline void Neuter()) { Detach(); }
-
/**
* Make this ArrayBuffer external. The pointer to underlying memory block
* and byte length are returned as |Contents| structure. After ArrayBuffer
@@ -4892,11 +5025,20 @@ class V8_EXPORT ArrayBuffer : public Object {
*
* The Data pointer of ArrayBuffer::Contents must be freed using the provided
* deleter, which will call ArrayBuffer::Allocator::Free if the buffer
- * was allocated with ArraryBuffer::Allocator::Allocate.
+ * was allocated with ArrayBuffer::Allocator::Allocate.
*/
Contents Externalize();
/**
+ * Marks this ArrayBuffer external given a witness that the embedder
+ * has fetched the backing store using the new GetBackingStore() function.
+ *
+ * With the new lifetime management of backing stores there is no need for
+ * externalizing, so this function exists only to make the transition easier.
+ */
+ void Externalize(const std::shared_ptr<BackingStore>& backing_store);
+
+ /**
* Get a pointer to the ArrayBuffer's underlying memory block without
* externalizing it. If the ArrayBuffer is not externalized, this pointer
* will become invalid as soon as the ArrayBuffer gets garbage collected.
@@ -4906,6 +5048,16 @@ class V8_EXPORT ArrayBuffer : public Object {
*/
Contents GetContents();
+ /**
+ * Get a shared pointer to the backing store of this array buffer. This
+ * pointer coordinates the lifetime management of the internal storage
+ * with any live ArrayBuffers on the heap, even across isolates. The embedder
+ * should not attempt to manage lifetime of the storage through other means.
+ *
+ * This function replaces both Externalize() and GetContents().
+ */
+ std::shared_ptr<BackingStore> GetBackingStore();
+
V8_INLINE static ArrayBuffer* Cast(Value* obj);
static const int kInternalFieldCount = V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT;
@@ -4914,6 +5066,7 @@ class V8_EXPORT ArrayBuffer : public Object {
private:
ArrayBuffer();
static void CheckCast(Value* obj);
+ Contents GetContents(bool externalize);
};
@@ -5281,14 +5434,28 @@ class V8_EXPORT SharedArrayBuffer : public Object {
ArrayBufferCreationMode mode = ArrayBufferCreationMode::kExternalized);
/**
+ * Create a new SharedArrayBuffer with an existing backing store.
+ * The created array keeps a reference to the backing store until the array
+ * is garbage collected. Note that the IsExternal bit does not affect this
+ * reference from the array to the backing store.
+ *
+ * In future IsExternal bit will be removed. Until then the bit is set as
+ * follows. If the backing store does not own the underlying buffer, then
+ * the array is created in externalized state. Otherwise, the array is created
+ * in internalized state. In the latter case the array can be transitioned
+ * to the externalized state using Externalize(backing_store).
+ */
+ static Local<SharedArrayBuffer> New(
+ Isolate* isolate, std::shared_ptr<BackingStore> backing_store);
+
+ /**
* Create a new SharedArrayBuffer over an existing memory block. Propagate
* flags to indicate whether the underlying buffer can be grown.
*/
- V8_DEPRECATED("Use New method with data, and byte_length instead.",
- static Local<SharedArrayBuffer> New(
- Isolate* isolate, const SharedArrayBuffer::Contents&,
- ArrayBufferCreationMode mode =
- ArrayBufferCreationMode::kExternalized));
+ V8_DEPRECATED("Use New method with data, and byte_length instead.")
+ static Local<SharedArrayBuffer> New(
+ Isolate* isolate, const SharedArrayBuffer::Contents&,
+ ArrayBufferCreationMode mode = ArrayBufferCreationMode::kExternalized);
/**
* Returns true if SharedArrayBuffer is externalized, that is, does not
@@ -5311,6 +5478,15 @@ class V8_EXPORT SharedArrayBuffer : public Object {
Contents Externalize();
/**
+ * Marks this SharedArrayBuffer external given a witness that the embedder
+ * has fetched the backing store using the new GetBackingStore() function.
+ *
+ * With the new lifetime management of backing stores there is no need for
+ * externalizing, so this function exists only to make the transition easier.
+ */
+ void Externalize(const std::shared_ptr<BackingStore>& backing_store);
+
+ /**
* Get a pointer to the ArrayBuffer's underlying memory block without
* externalizing it. If the ArrayBuffer is not externalized, this pointer
* will become invalid as soon as the ArrayBuffer became garbage collected.
@@ -5324,6 +5500,16 @@ class V8_EXPORT SharedArrayBuffer : public Object {
*/
Contents GetContents();
+ /**
+ * Get a shared pointer to the backing store of this array buffer. This
+ * pointer coordinates the lifetime management of the internal storage
+ * with any live ArrayBuffers on the heap, even across isolates. The embedder
+ * should not attempt to manage lifetime of the storage through other means.
+ *
+ * This function replaces both Externalize() and GetContents().
+ */
+ std::shared_ptr<BackingStore> GetBackingStore();
+
V8_INLINE static SharedArrayBuffer* Cast(Value* obj);
static const int kInternalFieldCount = V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT;
@@ -5331,6 +5517,7 @@ class V8_EXPORT SharedArrayBuffer : public Object {
private:
SharedArrayBuffer();
static void CheckCast(Value* obj);
+ Contents GetContents(bool externalize);
};
@@ -6655,34 +6842,26 @@ class V8_EXPORT ResourceConstraints {
/**
* Deprecated functions. Do not use in new code.
*/
- V8_DEPRECATE_SOON("Use code_range_size_in_bytes.",
- size_t code_range_size() const) {
- return code_range_size_ / kMB;
- }
- V8_DEPRECATE_SOON("Use set_code_range_size_in_bytes.",
- void set_code_range_size(size_t limit_in_mb)) {
+ V8_DEPRECATE_SOON("Use code_range_size_in_bytes.")
+ size_t code_range_size() const { return code_range_size_ / kMB; }
+ V8_DEPRECATE_SOON("Use set_code_range_size_in_bytes.")
+ void set_code_range_size(size_t limit_in_mb) {
code_range_size_ = limit_in_mb * kMB;
}
- V8_DEPRECATE_SOON("Use max_young_generation_size_in_bytes.",
- size_t max_semi_space_size_in_kb() const);
- V8_DEPRECATE_SOON("Use set_max_young_generation_size_in_bytes.",
- void set_max_semi_space_size_in_kb(size_t limit_in_kb));
- V8_DEPRECATE_SOON("Use max_old_generation_size_in_bytes.",
- size_t max_old_space_size() const) {
- return max_old_generation_size_ / kMB;
- }
- V8_DEPRECATE_SOON("Use set_max_old_generation_size_in_bytes.",
- void set_max_old_space_size(size_t limit_in_mb)) {
+ V8_DEPRECATE_SOON("Use max_young_generation_size_in_bytes.")
+ size_t max_semi_space_size_in_kb() const;
+ V8_DEPRECATE_SOON("Use set_max_young_generation_size_in_bytes.")
+ void set_max_semi_space_size_in_kb(size_t limit_in_kb);
+ V8_DEPRECATE_SOON("Use max_old_generation_size_in_bytes.")
+ size_t max_old_space_size() const { return max_old_generation_size_ / kMB; }
+ V8_DEPRECATE_SOON("Use set_max_old_generation_size_in_bytes.")
+ void set_max_old_space_size(size_t limit_in_mb) {
max_old_generation_size_ = limit_in_mb * kMB;
}
- V8_DEPRECATE_SOON("Zone does not pool memory any more.",
- size_t max_zone_pool_size() const) {
- return max_zone_pool_size_;
- }
- V8_DEPRECATE_SOON("Zone does not pool memory any more.",
- void set_max_zone_pool_size(size_t bytes)) {
- max_zone_pool_size_ = bytes;
- }
+ V8_DEPRECATE_SOON("Zone does not pool memory any more.")
+ size_t max_zone_pool_size() const { return max_zone_pool_size_; }
+ V8_DEPRECATE_SOON("Zone does not pool memory any more.")
+ void set_max_zone_pool_size(size_t bytes) { max_zone_pool_size_ = bytes; }
private:
static constexpr size_t kMB = 1048576u;
@@ -6756,6 +6935,7 @@ enum class CrashKeyId {
kReadonlySpaceFirstPageAddress,
kMapSpaceFirstPageAddress,
kCodeSpaceFirstPageAddress,
+ kDumpType,
};
typedef void (*AddCrashKeyCallback)(CrashKeyId id, const std::string& value);
@@ -6875,6 +7055,7 @@ class PromiseRejectMessage {
typedef void (*PromiseRejectCallback)(PromiseRejectMessage message);
// --- Microtasks Callbacks ---
+V8_DEPRECATE_SOON("Use *WithData version.")
typedef void (*MicrotasksCompletedCallback)(Isolate*);
typedef void (*MicrotasksCompletedCallbackWithData)(Isolate*, void*);
typedef void (*MicrotaskCallback)(void* data);
@@ -7384,7 +7565,8 @@ class V8_EXPORT EmbedderHeapTracer {
class V8_EXPORT TracedGlobalHandleVisitor {
public:
virtual ~TracedGlobalHandleVisitor() = default;
- virtual void VisitTracedGlobalHandle(const TracedGlobal<Value>& value) = 0;
+ virtual void VisitTracedGlobalHandle(const TracedGlobal<Value>& handle) {}
+ virtual void VisitTracedReference(const TracedReference<Value>& handle) {}
};
/**
@@ -7422,13 +7604,14 @@ class V8_EXPORT EmbedderHeapTracer {
virtual void RegisterV8References(
const std::vector<std::pair<void*, void*> >& embedder_fields) = 0;
- void RegisterEmbedderReference(const TracedGlobal<v8::Value>& ref);
+ V8_DEPRECATE_SOON("Use version taking TracedReferenceBase<v8::Data> argument")
+ void RegisterEmbedderReference(const TracedReferenceBase<v8::Value>& ref);
+ void RegisterEmbedderReference(const TracedReferenceBase<v8::Data>& ref);
/**
* Called at the beginning of a GC cycle.
*/
- V8_DEPRECATED("Use version with flags.", virtual void TracePrologue()) {}
- virtual void TracePrologue(TraceFlags flags);
+ virtual void TracePrologue(TraceFlags flags) {}
/**
* Called to advance tracing in the embedder.
@@ -7455,8 +7638,7 @@ class V8_EXPORT EmbedderHeapTracer {
* overriden to fill a |TraceSummary| that is used by V8 to schedule future
* garbage collections.
*/
- V8_DEPRECATE_SOON("Use version with parameter.",
- virtual void TraceEpilogue()) {}
+ V8_DEPRECATED("Use version with parameter.") virtual void TraceEpilogue() {}
virtual void TraceEpilogue(TraceSummary* trace_summary);
/**
@@ -7483,32 +7665,35 @@ class V8_EXPORT EmbedderHeapTracer {
*
* If this returns false, then V8 may decide that the object referred to by
* such a handle is reclaimed. In that case:
- * - No action is required if handles are used with destructors.
- * - When run without destructors (by specializing
- * |TracedGlobalTrait::kRequiresExplicitDestruction|) V8 calls
- * |ResetHandleInNonTracingGC|.
+ * - No action is required if handles are used with destructors, i.e., by just
+ * using |TracedGlobal|.
+ * - When run without destructors, i.e., by using
+ * |TracedReference|, V8 calls |ResetHandleInNonTracingGC|.
*
- * Note that the |handle| is different from the |TracedGlobal<T>| handle that
- * the embedder holds for retaining the object. The embedder may use
- * |TracedGlobal<T>::WrapperClassId()| to distinguish cases where it wants
- * handles to be treated as roots from not being treated as roots.
+ * Note that the |handle| is different from the handle that the embedder holds
+ * for retaining the object. The embedder may use |WrapperClassId()| to
+ * distinguish cases where it wants handles to be treated as roots from not
+ * being treated as roots.
*/
virtual bool IsRootForNonTracingGC(
- const v8::TracedGlobal<v8::Value>& handle) {
- return true;
- }
+ const v8::TracedReference<v8::Value>& handle);
+ virtual bool IsRootForNonTracingGC(const v8::TracedGlobal<v8::Value>& handle);
/**
* Used in combination with |IsRootForNonTracingGC|. Called by V8 when an
* object that is backed by a handle is reclaimed by a non-tracing garbage
* collection. It is up to the embedder to reset the original handle.
*
- * Note that the |handle| is different from the |TracedGlobal<T>| handle that
- * the embedder holds for retaining the object. It is up to the embedder to
- * find the orignal |TracedGlobal<T>| handle via the object or class id.
+ * Note that the |handle| is different from the handle that the embedder holds
+ * for retaining the object. It is up to the embedder to find the original
+ * handle via the object or class id.
*/
virtual void ResetHandleInNonTracingGC(
- const v8::TracedGlobal<v8::Value>& handle) {}
+ const v8::TracedReference<v8::Value>& handle);
+ V8_DEPRECATE_SOON(
+ "Use TracedReference version when not requiring destructors.")
+ virtual void ResetHandleInNonTracingGC(
+ const v8::TracedGlobal<v8::Value>& handle);
/*
* Called by the embedder to immediately perform a full garbage collection.
@@ -7575,6 +7760,8 @@ struct DeserializeInternalFieldsCallback {
};
typedef DeserializeInternalFieldsCallback DeserializeEmbedderFieldsCallback;
+enum class MeasureMemoryMode { kSummary, kDetailed };
+
/**
* Isolate represents an isolated instance of the V8 engine. V8 isolates have
* completely separate states. Objects from one isolate must not be used in
@@ -8096,6 +8283,17 @@ class V8_EXPORT Isolate {
bool GetHeapCodeAndMetadataStatistics(HeapCodeStatistics* object_statistics);
/**
+ * Enqueues a memory measurement request for the given context and mode.
+ * This API is experimental and may change significantly.
+ *
+ * \param mode Indicates whether the result should include per-context
+ * memory usage or just the total memory usage.
+ * \returns a promise that will be resolved with memory usage estimate.
+ */
+ v8::MaybeLocal<v8::Promise> MeasureMemory(v8::Local<v8::Context> context,
+ MeasureMemoryMode mode);
+
+ /**
* Get a call stack sample from the isolate.
* \param state Execution state.
* \param frames Caller allocated buffer to store stack frames.
@@ -8156,8 +8354,8 @@ class V8_EXPORT Isolate {
Local<Context> GetCurrentContext();
/** Returns the last context entered through V8's C++ API. */
- V8_DEPRECATED("Use GetEnteredOrMicrotaskContext().",
- Local<Context> GetEnteredContext());
+ V8_DEPRECATED("Use GetEnteredOrMicrotaskContext().")
+ Local<Context> GetEnteredContext();
/**
* Returns either the last context entered through V8's C++ API, or the
@@ -8469,18 +8667,16 @@ class V8_EXPORT Isolate {
* Executing scripts inside the callback will not re-trigger microtasks and
* the callback.
*/
- V8_DEPRECATE_SOON("Use *WithData version.",
- void AddMicrotasksCompletedCallback(
- MicrotasksCompletedCallback callback));
+ V8_DEPRECATE_SOON("Use *WithData version.")
+ void AddMicrotasksCompletedCallback(MicrotasksCompletedCallback callback);
void AddMicrotasksCompletedCallback(
MicrotasksCompletedCallbackWithData callback, void* data = nullptr);
/**
* Removes callback that was installed by AddMicrotasksCompletedCallback.
*/
- V8_DEPRECATE_SOON("Use *WithData version.",
- void RemoveMicrotasksCompletedCallback(
- MicrotasksCompletedCallback callback));
+ V8_DEPRECATE_SOON("Use *WithData version.")
+ void RemoveMicrotasksCompletedCallback(MicrotasksCompletedCallback callback);
void RemoveMicrotasksCompletedCallback(
MicrotasksCompletedCallbackWithData callback, void* data = nullptr);
@@ -8910,6 +9106,7 @@ class V8_EXPORT V8 {
* handled entirely on the embedders' side.
* - The call will abort if the data is invalid.
*/
+ V8_DEPRECATED("The natives blob is deprecated (https://crbug.com/v8/7624).")
static void SetNativesDataBlob(StartupData* startup_blob);
static void SetSnapshotDataBlob(StartupData* startup_blob);
@@ -8922,8 +9119,6 @@ class V8_EXPORT V8 {
*/
static void SetFlagsFromString(const char* str);
static void SetFlagsFromString(const char* str, size_t length);
- V8_DEPRECATED("use size_t version",
- static void SetFlagsFromString(const char* str, int length));
/**
* Sets V8 flags from the command line.
@@ -9006,8 +9201,11 @@ class V8_EXPORT V8 {
* not perform any file IO.
*/
static void InitializeExternalStartupData(const char* directory_path);
+ V8_DEPRECATED("The natives blob is deprecated (https://crbug.com/v8/7624).")
static void InitializeExternalStartupData(const char* natives_blob,
const char* snapshot_blob);
+ static void InitializeExternalStartupDataFromFile(const char* snapshot_blob);
+
/**
* Sets the v8::Platform to use. This should be invoked before V8 is
* initialized.
@@ -9040,9 +9238,8 @@ class V8_EXPORT V8 {
* \param context The third argument passed to the Linux signal handler, which
* points to a ucontext_t structure.
*/
- V8_DEPRECATE_SOON("Use TryHandleWebAssemblyTrapPosix",
- static bool TryHandleSignal(int signal_number, void* info,
- void* context));
+ V8_DEPRECATE_SOON("Use TryHandleWebAssemblyTrapPosix")
+ static bool TryHandleSignal(int signal_number, void* info, void* context);
#endif // V8_OS_POSIX
/**
@@ -9111,8 +9308,12 @@ class V8_EXPORT V8 {
template <class T>
friend class Maybe;
template <class T>
+ friend class TracedReferenceBase;
+ template <class T>
friend class TracedGlobal;
template <class T>
+ friend class TracedReference;
+ template <class T>
friend class WeakCallbackInfo;
template <class T> friend class Eternal;
template <class T> friend class PersistentBase;
@@ -9983,7 +10184,7 @@ Local<T> Local<T>::New(Isolate* isolate, const PersistentBase<T>& that) {
}
template <class T>
-Local<T> Local<T>::New(Isolate* isolate, const TracedGlobal<T>& that) {
+Local<T> Local<T>::New(Isolate* isolate, const TracedReferenceBase<T>& that) {
return New(isolate, that.val_);
}
@@ -10164,26 +10365,20 @@ Global<T>& Global<T>::operator=(Global<S>&& rhs) {
}
template <class T>
-TracedGlobal<T>::WrappedForDestruction::~WrappedForDestruction() {
- if (value == nullptr) return;
- V8::DisposeTracedGlobal(reinterpret_cast<internal::Address*>(value));
- value = nullptr;
-}
-
-template <class T>
-T* TracedGlobal<T>::New(Isolate* isolate, T* that, void* slot) {
+T* TracedReferenceBase<T>::New(Isolate* isolate, T* that, void* slot,
+ DestructionMode destruction_mode) {
if (that == nullptr) return nullptr;
internal::Address* p = reinterpret_cast<internal::Address*>(that);
return reinterpret_cast<T*>(V8::GlobalizeTracedReference(
reinterpret_cast<internal::Isolate*>(isolate), p,
reinterpret_cast<internal::Address*>(slot),
- TracedGlobalTrait<TracedGlobal<T>>::kRequiresExplicitDestruction));
+ destruction_mode == kWithDestructor));
}
template <class T>
-void TracedGlobal<T>::Reset() {
+void TracedReferenceBase<T>::Reset() {
if (IsEmpty()) return;
- V8::DisposeTracedGlobal(reinterpret_cast<internal::Address*>(**this));
+ V8::DisposeTracedGlobal(reinterpret_cast<internal::Address*>(val_));
val_ = nullptr;
}
@@ -10193,7 +10388,8 @@ void TracedGlobal<T>::Reset(Isolate* isolate, const Local<S>& other) {
TYPE_CHECK(T, S);
Reset();
if (other.IsEmpty()) return;
- this->val_ = New(isolate, other.val_, &val_);
+ this->val_ = this->New(isolate, other.val_, &this->val_,
+ TracedReferenceBase<T>::kWithDestructor);
}
template <class T>
@@ -10241,28 +10437,83 @@ TracedGlobal<T>& TracedGlobal<T>::operator=(const TracedGlobal& rhs) {
}
template <class T>
-void TracedGlobal<T>::SetWrapperClassId(uint16_t class_id) {
+template <class S>
+void TracedReference<T>::Reset(Isolate* isolate, const Local<S>& other) {
+ TYPE_CHECK(T, S);
+ Reset();
+ if (other.IsEmpty()) return;
+ this->val_ = this->New(isolate, other.val_, &this->val_,
+ TracedReferenceBase<T>::kWithoutDestructor);
+}
+
+template <class T>
+template <class S>
+TracedReference<T>& TracedReference<T>::operator=(TracedReference<S>&& rhs) {
+ TYPE_CHECK(T, S);
+ *this = std::move(rhs.template As<T>());
+ return *this;
+}
+
+template <class T>
+template <class S>
+TracedReference<T>& TracedReference<T>::operator=(
+ const TracedReference<S>& rhs) {
+ TYPE_CHECK(T, S);
+ *this = rhs.template As<T>();
+ return *this;
+}
+
+template <class T>
+TracedReference<T>& TracedReference<T>::operator=(TracedReference&& rhs) {
+ if (this != &rhs) {
+ this->Reset();
+ if (rhs.val_ != nullptr) {
+ this->val_ = rhs.val_;
+ V8::MoveTracedGlobalReference(
+ reinterpret_cast<internal::Address**>(&rhs.val_),
+ reinterpret_cast<internal::Address**>(&this->val_));
+ rhs.val_ = nullptr;
+ }
+ }
+ return *this;
+}
+
+template <class T>
+TracedReference<T>& TracedReference<T>::operator=(const TracedReference& rhs) {
+ if (this != &rhs) {
+ this->Reset();
+ if (rhs.val_ != nullptr) {
+ V8::CopyTracedGlobalReference(
+ reinterpret_cast<const internal::Address* const*>(&rhs.val_),
+ reinterpret_cast<internal::Address**>(&this->val_));
+ }
+ }
+ return *this;
+}
+
+template <class T>
+void TracedReferenceBase<T>::SetWrapperClassId(uint16_t class_id) {
typedef internal::Internals I;
if (IsEmpty()) return;
- internal::Address* obj = reinterpret_cast<internal::Address*>(**this);
+ internal::Address* obj = reinterpret_cast<internal::Address*>(val_);
uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + I::kNodeClassIdOffset;
*reinterpret_cast<uint16_t*>(addr) = class_id;
}
template <class T>
-uint16_t TracedGlobal<T>::WrapperClassId() const {
+uint16_t TracedReferenceBase<T>::WrapperClassId() const {
typedef internal::Internals I;
if (IsEmpty()) return 0;
- internal::Address* obj = reinterpret_cast<internal::Address*>(**this);
+ internal::Address* obj = reinterpret_cast<internal::Address*>(val_);
uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + I::kNodeClassIdOffset;
return *reinterpret_cast<uint16_t*>(addr);
}
template <class T>
-void TracedGlobal<T>::SetFinalizationCallback(
+void TracedReferenceBase<T>::SetFinalizationCallback(
void* parameter, typename WeakCallbackInfo<void>::Callback callback) {
- V8::SetFinalizationCallbackTraced(
- reinterpret_cast<internal::Address*>(**this), parameter, callback);
+ V8::SetFinalizationCallbackTraced(reinterpret_cast<internal::Address*>(val_),
+ parameter, callback);
}
template <typename T>
@@ -10281,12 +10532,12 @@ void ReturnValue<T>::Set(const Global<S>& handle) {
template <typename T>
template <typename S>
-void ReturnValue<T>::Set(const TracedGlobal<S>& handle) {
+void ReturnValue<T>::Set(const TracedReferenceBase<S>& handle) {
TYPE_CHECK(T, S);
if (V8_UNLIKELY(handle.IsEmpty())) {
*value_ = GetDefaultValue();
} else {
- *value_ = *reinterpret_cast<internal::Address*>(*handle);
+ *value_ = *reinterpret_cast<internal::Address*>(handle.val_);
}
}
diff --git a/deps/v8/include/v8config.h b/deps/v8/include/v8config.h
index 7670c0e449..882dc8a23c 100644
--- a/deps/v8/include/v8config.h
+++ b/deps/v8/include/v8config.h
@@ -54,7 +54,7 @@
// -----------------------------------------------------------------------------
-// Operating system detection
+// Operating system detection (host)
//
// V8_OS_ANDROID - Android
// V8_OS_BSD - BSDish (Mac OS X, Net/Free/Open/DragonFlyBSD)
@@ -122,6 +122,67 @@
# define V8_OS_WIN 1
#endif
+// -----------------------------------------------------------------------------
+// Operating system detection (target)
+//
+// V8_TARGET_OS_ANDROID
+// V8_TARGET_OS_FUCHSIA
+// V8_TARGET_OS_IOS
+// V8_TARGET_OS_LINUX
+// V8_TARGET_OS_MACOSX
+// V8_TARGET_OS_WIN
+//
+// If not set explicitly, these fall back to corresponding V8_OS_ values.
+
+#ifdef V8_HAVE_TARGET_OS
+
+// The target OS is provided, just check that at least one known value is set.
+# if !defined(V8_TARGET_OS_ANDROID) \
+ && !defined(V8_TARGET_OS_FUCHSIA) \
+ && !defined(V8_TARGET_OS_IOS) \
+ && !defined(V8_TARGET_OS_LINUX) \
+ && !defined(V8_TARGET_OS_MACOSX) \
+ && !defined(V8_TARGET_OS_WIN)
+# error No known target OS defined.
+# endif
+
+#else // V8_HAVE_TARGET_OS
+
+# if defined(V8_TARGET_OS_ANDROID) \
+ || defined(V8_TARGET_OS_FUCHSIA) \
+ || defined(V8_TARGET_OS_IOS) \
+ || defined(V8_TARGET_OS_LINUX) \
+ || defined(V8_TARGET_OS_MACOSX) \
+ || defined(V8_TARGET_OS_WIN)
+# error A target OS is defined but V8_HAVE_TARGET_OS is unset.
+# endif
+
+// Fall back to the detected host OS.
+#ifdef V8_OS_ANDROID
+# define V8_TARGET_OS_ANDROID
+#endif
+
+#ifdef V8_OS_FUCHSIA
+# define V8_TARGET_OS_FUCHSIA
+#endif
+
+#ifdef V8_OS_IOS
+# define V8_TARGET_OS_IOS
+#endif
+
+#ifdef V8_OS_LINUX
+# define V8_TARGET_OS_LINUX
+#endif
+
+#ifdef V8_OS_MACOSX
+# define V8_TARGET_OS_MACOSX
+#endif
+
+#ifdef V8_OS_WIN
+# define V8_TARGET_OS_WIN
+#endif
+
+#endif // V8_HAVE_TARGET_OS
// -----------------------------------------------------------------------------
// C library detection
@@ -169,7 +230,7 @@
//
// V8_HAS_ATTRIBUTE_ALWAYS_INLINE - __attribute__((always_inline))
// supported
-// V8_HAS_ATTRIBUTE_DEPRECATED - __attribute__((deprecated)) supported
+// V8_HAS_ATTRIBUTE_NONNULL - __attribute__((nonnull)) supported
// V8_HAS_ATTRIBUTE_NOINLINE - __attribute__((noinline)) supported
// V8_HAS_ATTRIBUTE_UNUSED - __attribute__((unused)) supported
// V8_HAS_ATTRIBUTE_VISIBILITY - __attribute__((visibility)) supported
@@ -188,10 +249,8 @@
// V8_HAS_BUILTIN_UADD_OVERFLOW - __builtin_uadd_overflow() supported
// V8_HAS_COMPUTED_GOTO - computed goto/labels as values
// supported
-// V8_HAS_DECLSPEC_DEPRECATED - __declspec(deprecated) supported
// V8_HAS_DECLSPEC_NOINLINE - __declspec(noinline) supported
// V8_HAS_DECLSPEC_SELECTANY - __declspec(selectany) supported
-// V8_HAS_DECLSPEC_NORETURN - __declspec(noreturn) supported
// V8_HAS___FORCEINLINE - __forceinline supported
//
// Note that testing for compilers and/or features must be done using #if
@@ -207,9 +266,7 @@
#endif
# define V8_HAS_ATTRIBUTE_ALWAYS_INLINE (__has_attribute(always_inline))
-# define V8_HAS_ATTRIBUTE_DEPRECATED (__has_attribute(deprecated))
-# define V8_HAS_ATTRIBUTE_DEPRECATED_MESSAGE \
- (__has_extension(attribute_deprecated_with_message))
+# define V8_HAS_ATTRIBUTE_NONNULL (__has_attribute(nonnull))
# define V8_HAS_ATTRIBUTE_NOINLINE (__has_attribute(noinline))
# define V8_HAS_ATTRIBUTE_UNUSED (__has_attribute(unused))
# define V8_HAS_ATTRIBUTE_VISIBILITY (__has_attribute(visibility))
@@ -255,8 +312,6 @@
// Works around "sorry, unimplemented: inlining failed" build errors with
// older compilers.
# define V8_HAS_ATTRIBUTE_ALWAYS_INLINE (V8_GNUC_PREREQ(4, 4, 0))
-# define V8_HAS_ATTRIBUTE_DEPRECATED (V8_GNUC_PREREQ(3, 4, 0))
-# define V8_HAS_ATTRIBUTE_DEPRECATED_MESSAGE (V8_GNUC_PREREQ(4, 5, 0))
# define V8_HAS_ATTRIBUTE_NOINLINE (V8_GNUC_PREREQ(3, 4, 0))
# define V8_HAS_ATTRIBUTE_UNUSED (V8_GNUC_PREREQ(2, 95, 0))
# define V8_HAS_ATTRIBUTE_VISIBILITY (V8_GNUC_PREREQ(4, 3, 0))
@@ -278,10 +333,8 @@
#if defined(_MSC_VER)
# define V8_CC_MSVC 1
-# define V8_HAS_DECLSPEC_DEPRECATED 1
# define V8_HAS_DECLSPEC_NOINLINE 1
# define V8_HAS_DECLSPEC_SELECTANY 1
-# define V8_HAS_DECLSPEC_NORETURN 1
# define V8_HAS___FORCEINLINE 1
@@ -306,9 +359,20 @@
# define V8_ASSUME_ALIGNED(ptr, alignment) \
__builtin_assume_aligned((ptr), (alignment))
#else
-# define V8_ASSUME_ALIGNED(ptr) (ptr)
+# define V8_ASSUME_ALIGNED(ptr, alignment) (ptr)
#endif
+
+// A macro to mark specific arguments as non-null.
+// Use like:
+// int add(int* x, int y, int* z) V8_NONNULL(1, 3) { return *x + y + *z; }
+#if V8_HAS_ATTRIBUTE_NONNULL
+# define V8_NONNULL(...) __attribute__((nonnull(__VA_ARGS__)))
+#else
+# define V8_NONNULL(...) /* NOT SUPPORTED */
+#endif
+
+
// A macro used to tell the compiler to never inline a particular function.
// Don't bother for debug builds.
// Use like:
@@ -323,31 +387,18 @@
// A macro (V8_DEPRECATED) to mark classes or functions as deprecated.
-#if defined(V8_DEPRECATION_WARNINGS) && V8_HAS_ATTRIBUTE_DEPRECATED_MESSAGE
-#define V8_DEPRECATED(message, declarator) \
- declarator __attribute__((deprecated(message)))
-#elif defined(V8_DEPRECATION_WARNINGS) && V8_HAS_ATTRIBUTE_DEPRECATED
-#define V8_DEPRECATED(message, declarator) \
- declarator __attribute__((deprecated))
-#elif defined(V8_DEPRECATION_WARNINGS) && V8_HAS_DECLSPEC_DEPRECATED
-#define V8_DEPRECATED(message, declarator) __declspec(deprecated) declarator
+#if defined(V8_DEPRECATION_WARNINGS)
+# define V8_DEPRECATED(message) [[deprecated(message)]]
#else
-#define V8_DEPRECATED(message, declarator) declarator
+# define V8_DEPRECATED(message)
#endif
// A macro (V8_DEPRECATE_SOON) to make it easier to see what will be deprecated.
-#if defined(V8_IMMINENT_DEPRECATION_WARNINGS) && \
- V8_HAS_ATTRIBUTE_DEPRECATED_MESSAGE
-#define V8_DEPRECATE_SOON(message, declarator) \
- declarator __attribute__((deprecated(message)))
-#elif defined(V8_IMMINENT_DEPRECATION_WARNINGS) && V8_HAS_ATTRIBUTE_DEPRECATED
-#define V8_DEPRECATE_SOON(message, declarator) \
- declarator __attribute__((deprecated))
-#elif defined(V8_IMMINENT_DEPRECATION_WARNINGS) && V8_HAS_DECLSPEC_DEPRECATED
-#define V8_DEPRECATE_SOON(message, declarator) __declspec(deprecated) declarator
+#if defined(V8_IMMINENT_DEPRECATION_WARNINGS)
+# define V8_DEPRECATE_SOON(message) [[deprecated(message)]]
#else
-#define V8_DEPRECATE_SOON(message, declarator) declarator
+# define V8_DEPRECATE_SOON(message)
#endif
diff --git a/deps/v8/infra/mb/mb_config.pyl b/deps/v8/infra/mb/mb_config.pyl
index d5d192fb20..fed7fa24bf 100644
--- a/deps/v8/infra/mb/mb_config.pyl
+++ b/deps/v8/infra/mb/mb_config.pyl
@@ -99,8 +99,6 @@
'release_x64_pointer_compression_without_dchecks',
'V8 Linux64 - arm64 - sim - pointer compression - builder':
'release_simulate_arm64_pointer_compression',
- 'V8 Linux - noembed': 'release_x86_noembed',
- 'V8 Linux - noembed - debug': 'debug_x86_noembed',
'V8 Fuchsia': 'release_x64_fuchsia',
'V8 Fuchsia - debug': 'debug_x64_fuchsia',
'V8 Linux64 - cfi': 'release_x64_cfi',
@@ -162,7 +160,6 @@
'debug_simulate_arm64_no_snap',
'V8 Linux - arm64 - sim - gc stress': 'debug_simulate_arm64',
# Mips.
- 'V8 Mips - builder': 'release_mips_no_snap_no_i18n',
'V8 Linux - mipsel - sim - builder': 'release_simulate_mipsel',
'V8 Linux - mips64el - sim - builder': 'release_simulate_mips64el',
# IBM.
@@ -197,7 +194,6 @@
'v8_android_arm64_n5x_rel_ng': 'release_android_arm64',
'v8_fuchsia_rel_ng': 'release_x64_fuchsia_trybot',
'v8_ios_simulator': 'release_x64_ios_simulator',
- 'v8_linux_noembed_rel_ng': 'release_x86_noembed_trybot',
'v8_linux_rel_ng': 'release_x86_gcmole_trybot',
'v8_linux_optional_rel_ng': 'release_x86_trybot',
'v8_linux_verify_csa_rel_ng': 'release_x86_verify_csa',
@@ -253,7 +249,6 @@
'v8_mac64_dbg': 'debug_x64',
'v8_mac64_dbg_ng': 'debug_x64',
'v8_mac64_asan_rel': 'release_x64_asan_no_lsan',
- 'v8_mips_compile_rel': 'release_mips_no_snap_no_i18n',
'v8_linux_arm_rel_ng': 'release_simulate_arm_trybot',
'v8_linux_arm_lite_rel_ng': 'release_simulate_arm_lite_trybot',
'v8_linux_arm_dbg': 'debug_simulate_arm',
@@ -434,7 +429,7 @@
'release_x64_cfi_clusterfuzz': [
'release_bot', 'x64', 'cfi_clusterfuzz'],
'release_x64_msvc': [
- 'release_bot', 'x64', 'msvc'],
+ 'release_bot_no_goma', 'x64', 'minimal_symbols', 'msvc'],
'release_x64_correctness_fuzzer' : [
'release_bot', 'x64', 'v8_correctness_fuzzer'],
'release_x64_fuchsia': [
@@ -515,8 +510,6 @@
# Debug configs for x86.
'debug_x86': [
'debug_bot', 'x86'],
- 'debug_x86_noembed': [
- 'debug_bot', 'x86', 'v8_no_enable_embedded_builtins'],
'debug_x86_minimal_symbols': [
'debug_bot', 'x86', 'minimal_symbols'],
'debug_x86_no_i18n': [
@@ -535,10 +528,6 @@
# Release configs for x86.
'release_x86': [
'release_bot', 'x86'],
- 'release_x86_noembed': [
- 'release_bot', 'x86', 'v8_no_enable_embedded_builtins'],
- 'release_x86_noembed_trybot': [
- 'release_trybot', 'x86', 'v8_no_enable_embedded_builtins'],
'release_x86_gcc': [
'release_bot', 'x86', 'gcc', 'v8_check_header_includes'],
'release_x86_gcc_minimal_symbols': [
@@ -568,11 +557,6 @@
'release_x86_verify_csa': [
'release_bot', 'x86', 'dcheck_always_on',
'v8_enable_slow_dchecks', 'v8_verify_csa'],
-
- # Release configs for mips.
- 'release_mips_no_snap_no_i18n': [
- 'release', 'mips', 'no_sysroot', 'static', 'v8_no_i18n',
- 'v8_snapshot_none'],
},
'mixins': {
@@ -648,8 +632,7 @@
},
'gcc': {
- # TODO(machenbach): Remove cxx11 restriction when updating gcc version.
- 'gn_args': 'is_clang=false use_cxx11=true',
+ 'gn_args': 'is_clang=false',
},
'gcmole': {
@@ -685,28 +668,14 @@
'gn_args': 'symbol_level=1',
},
- 'mips': {
- 'mixins': ['mips_bundled_toolchain'],
- 'gn_args': 'target_cpu="mips"',
- },
-
- 'mips_bundled_toolchain': {
- 'gn_args': 'custom_toolchain="tools/toolchain:mips-bundled" '
- 'ldso_path="tools/mips_toolchain/sysroot/usr/lib/ld.so.1" '
- 'gcc_target_rpath="tools/mips_toolchain/sysroot/usr/lib:'
- 'tools/mips_toolchain/mips-mti-linux-gnu/lib:\$ORIGIN/."',
- },
-
'msan': {
'mixins': ['v8_enable_test_features'],
- 'gn_args': ('is_msan=true msan_track_origins=2 '
- 'use_prebuilt_instrumented_libraries=true'),
+ 'gn_args': 'is_msan=true msan_track_origins=2',
},
'msan_no_origins': {
'mixins': ['v8_enable_test_features'],
- 'gn_args': ('is_msan=true msan_track_origins=0 '
- 'use_prebuilt_instrumented_libraries=true'),
+ 'gn_args': 'is_msan=true msan_track_origins=0',
},
'msvc': {
@@ -717,6 +686,10 @@
'gn_args': 'use_custom_libcxx=false',
},
+ 'no_goma': {
+ 'gn_args': 'use_goma=false',
+ },
+
'no_sysroot': {
'gn_args': 'use_sysroot=false',
},
@@ -733,6 +706,10 @@
'mixins': ['release', 'static', 'goma'],
},
+ 'release_bot_no_goma': {
+ 'mixins': ['release', 'static', 'no_goma'],
+ },
+
'release_trybot': {
'mixins': ['release_bot', 'minimal_symbols', 'dcheck_always_on'],
},
@@ -803,10 +780,6 @@
'gn_args': 'v8_correctness_fuzzer=true v8_multi_arch_build=true',
},
- 'v8_no_enable_embedded_builtins': {
- 'gn_args': 'v8_enable_embedded_builtins=false',
- },
-
'v8_enable_lite_mode': {
'gn_args': 'v8_enable_lite_mode=true',
},
@@ -852,9 +825,9 @@
'gn_args': 'v8_use_external_startup_data=false',
},
- 'v8_snapshot_none': {
- 'gn_args': 'v8_use_snapshot=false',
- },
+ # TODO(https://crbug.com/v8/8531): Remove this config and all bots, since
+ # no-snapshot was deprecated and removed.
+ 'v8_snapshot_none': {},
'v8_verify_heap': {
'gn_args': 'v8_enable_verify_heap=true',
diff --git a/deps/v8/infra/testing/builders.pyl b/deps/v8/infra/testing/builders.pyl
index 0d340db00e..1b805df959 100644
--- a/deps/v8/infra/testing/builders.pyl
+++ b/deps/v8/infra/testing/builders.pyl
@@ -43,7 +43,7 @@
{'name': 'v8testing', 'variant': 'default', 'shards': 4},
{'name': 'v8testing', 'variant': 'trusted', 'shards': 4},
{'name': 'mozilla', 'variant': 'default'},
- {'name': 'test262', 'variant': 'default', 'shards': 7},
+ {'name': 'test262', 'variant': 'default', 'shards': 9},
],
},
##############################################################################
@@ -60,7 +60,7 @@
{'name': 'mozilla'},
{'name': 'mozilla', 'variant': 'extra'},
{'name': 'test262', 'variant': 'default', 'shards': 2},
- {'name': 'test262_variants', 'variant': 'extra', 'shards': 3},
+ {'name': 'test262', 'variant': 'extra', 'shards': 3},
{'name': 'v8testing', 'shards': 3},
{'name': 'v8testing', 'variant': 'extra', 'shards': 2},
],
@@ -92,20 +92,12 @@
{'name': 'benchmarks', 'variant': 'extra'},
{'name': 'mozilla'},
{'name': 'mozilla', 'variant': 'extra'},
- {'name': 'test262_variants', 'shards': 2},
- {'name': 'test262_variants', 'variant': 'extra', 'shards': 2},
+ {'name': 'test262', 'shards': 2},
+ {'name': 'test262', 'variant': 'extra', 'shards': 2},
{'name': 'v8testing', 'shards': 2},
{'name': 'v8testing', 'variant': 'extra'},
],
},
- 'v8_linux_noembed_rel_ng_triggered': {
- 'swarming_dimensions' : {
- 'os': 'Ubuntu-16.04',
- },
- 'tests': [
- {'name': 'v8testing', 'shards': 2},
- ],
- },
'v8_linux_noi18n_rel_ng_triggered': {
'swarming_dimensions' : {
'os': 'Ubuntu-16.04',
@@ -145,8 +137,8 @@
{'name': 'mozilla'},
{'name': 'mozilla', 'variant': 'extra'},
{'name': 'optimize_for_size'},
- {'name': 'test262_variants', 'shards': 4},
- {'name': 'test262_variants', 'variant': 'extra', 'shards': 3},
+ {'name': 'test262', 'shards': 4},
+ {'name': 'test262', 'variant': 'extra', 'shards': 3},
{'name': 'v8testing', 'shards': 2},
{'name': 'v8testing', 'suffix': 'isolates', 'test_args': ['--isolates'], 'shards': 2},
{'name': 'v8testing', 'variant': 'extra'},
@@ -162,7 +154,7 @@
{'name': 'benchmarks', 'variant': 'code_serializer', 'shards': 1},
{'name': 'd8testing', 'variant': 'code_serializer', 'shards': 1},
{'name': 'mozilla', 'variant': 'code_serializer', 'shards': 1},
- {'name': 'test262_variants', 'variant': 'code_serializer', 'shards': 1},
+ {'name': 'test262', 'variant': 'code_serializer', 'shards': 1},
# No SSE3.
{
'name': 'mozilla',
@@ -229,7 +221,7 @@
{'name': 'mozilla'},
{'name': 'test262', 'variant': 'default'},
{'name': 'v8testing', 'shards': 7},
- {'name': 'v8testing', 'variant': 'extra', 'shards': 3},
+ {'name': 'v8testing', 'variant': 'extra', 'shards': 6},
{'name': 'v8testing', 'variant': 'trusted', 'shards': 2},
],
},
@@ -261,7 +253,7 @@
'os': 'Ubuntu-16.04',
},
'tests': [
- {'name': 'test262_variants', 'shards': 7},
+ {'name': 'test262', 'shards': 7},
{'name': 'v8testing', 'shards': 3},
{'name': 'v8testing', 'variant': 'extra', 'shards': 2},
{'name': 'v8testing', 'variant': 'slow_path'},
@@ -291,7 +283,7 @@
{'name': 'mozilla'},
{'name': 'mozilla', 'variant': 'extra'},
{'name': 'test262', 'variant': 'default'},
- {'name': 'test262_variants', 'variant': 'extra', 'shards': 3},
+ {'name': 'test262', 'variant': 'extra', 'shards': 3},
{'name': 'v8testing', 'shards': 3},
{'name': 'v8testing', 'variant': 'extra', 'shards': 2},
{'name': 'v8testing', 'variant': 'minor_mc', 'shards': 1},
@@ -318,7 +310,7 @@
{'name': 'mjsunit', 'variant': 'stress_sampling'},
{'name': 'webkit', 'variant': 'stress_sampling'},
# Infra staging.
- {'name': 'test262_variants', 'variant': 'infra_staging', 'shards': 2},
+ {'name': 'test262', 'variant': 'infra_staging', 'shards': 2},
{'name': 'v8testing', 'variant': 'infra_staging', 'shards': 2},
],
},
@@ -345,9 +337,9 @@
{'name': 'mozilla', 'variant': 'assert_types'},
{'name': 'mozilla', 'variant': 'extra'},
{'name': 'perf_integration'},
- {'name': 'test262_variants', 'shards': 2},
- {'name': 'test262_variants', 'variant': 'assert_types', 'shards': 2},
- {'name': 'test262_variants', 'variant': 'extra', 'shards': 2},
+ {'name': 'test262', 'shards': 2},
+ {'name': 'test262', 'variant': 'assert_types', 'shards': 2},
+ {'name': 'test262', 'variant': 'extra', 'shards': 2},
{'name': 'v8testing', 'shards': 2},
{'name': 'v8testing', 'variant': 'assert_types'},
{'name': 'v8testing', 'variant': 'extra'},
@@ -379,8 +371,8 @@
# TODO(machenbach): Add mozilla tests.
{'name': 'mjsunit_sp_frame_access'},
{'name': 'optimize_for_size'},
- {'name': 'test262_variants', 'shards': 4},
- {'name': 'test262_variants', 'variant': 'extra', 'shards': 3},
+ {'name': 'test262', 'shards': 4},
+ {'name': 'test262', 'variant': 'extra', 'shards': 3},
{'name': 'v8initializers'},
{'name': 'v8testing', 'shards': 2},
{'name': 'v8testing', 'variant': 'extra'},
@@ -658,8 +650,8 @@
{'name': 'mozilla'},
{'name': 'mozilla', 'variant': 'extra'},
{'name': 'optimize_for_size'},
- {'name': 'test262_variants', 'shards': 2},
- {'name': 'test262_variants', 'variant': 'extra'},
+ {'name': 'test262', 'shards': 2},
+ {'name': 'test262', 'variant': 'extra'},
{'name': 'v8initializers'},
{'name': 'v8testing'},
{'name': 'v8testing', 'suffix': 'isolates', 'test_args': ['--isolates']},
@@ -712,9 +704,9 @@
{'name': 'mozilla', 'variant': 'code_serializer', 'shards': 1},
{'name': 'mozilla', 'variant': 'extra'},
{'name': 'optimize_for_size'},
- {'name': 'test262_variants', 'shards': 6},
- {'name': 'test262_variants', 'variant': 'code_serializer', 'shards': 2},
- {'name': 'test262_variants', 'variant': 'extra', 'shards': 3},
+ {'name': 'test262', 'shards': 6},
+ {'name': 'test262', 'variant': 'code_serializer', 'shards': 2},
+ {'name': 'test262', 'variant': 'extra', 'shards': 3},
{'name': 'v8testing', 'shards': 3},
{
'name': 'v8testing',
@@ -761,22 +753,6 @@
},
],
},
- 'V8 Linux - noembed': {
- 'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
- },
- 'tests': [
- {'name': 'v8testing'},
- ],
- },
- 'V8 Linux - noembed - debug': {
- 'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
- },
- 'tests': [
- {'name': 'v8testing', 'shards': 3},
- ],
- },
'V8 Linux - full debug': {
'swarming_dimensions': {
'os': 'Ubuntu-16.04',
@@ -898,9 +874,9 @@
{'name': 'mozilla', 'variant': 'extra'},
{'name': 'optimize_for_size'},
{'name': 'perf_integration'},
- {'name': 'test262_variants', 'shards': 2},
- {'name': 'test262_variants', 'variant': 'assert_types'},
- {'name': 'test262_variants', 'variant': 'extra'},
+ {'name': 'test262', 'shards': 2},
+ {'name': 'test262', 'variant': 'assert_types'},
+ {'name': 'test262', 'variant': 'extra'},
{'name': 'v8initializers'},
{'name': 'v8testing'},
{'name': 'v8testing', 'variant': 'assert_types'},
@@ -957,8 +933,8 @@
{'name': 'mozilla'},
{'name': 'mozilla', 'variant': 'extra'},
{'name': 'optimize_for_size'},
- {'name': 'test262_variants', 'shards': 5},
- {'name': 'test262_variants', 'variant': 'extra', 'shards': 3},
+ {'name': 'test262', 'shards': 5},
+ {'name': 'test262', 'variant': 'extra', 'shards': 3},
{'name': 'v8testing', 'shards': 2},
{'name': 'v8testing', 'variant': 'extra'},
{'name': 'v8testing', 'variant': 'minor_mc', 'shards': 1},
@@ -1067,7 +1043,7 @@
'os': 'Ubuntu-16.04',
},
'tests': [
- {'name': 'test262_variants', 'shards': 5},
+ {'name': 'test262', 'shards': 5},
{'name': 'v8testing', 'shards': 2},
{'name': 'v8testing', 'variant': 'extra'},
{'name': 'v8testing', 'variant': 'slow_path', 'shards': 1},
@@ -1271,7 +1247,7 @@
},
'tests': [
{'name': 'mozilla', 'variant': 'default'},
- {'name': 'test262', 'variant': 'default', 'shards': 6},
+ {'name': 'test262', 'variant': 'default', 'shards': 8},
{'name': 'v8testing', 'variant': 'default', 'shards': 3},
{'name': 'v8testing', 'variant': 'trusted', 'shards': 3},
],
@@ -1411,7 +1387,7 @@
{'name': 'mozilla', 'shards': 4},
{'name': 'test262', 'variant': 'default'},
{'name': 'v8testing', 'shards': 8},
- {'name': 'v8testing', 'variant': 'extra', 'shards': 4},
+ {'name': 'v8testing', 'variant': 'extra', 'shards': 6},
{'name': 'v8testing', 'variant': 'trusted', 'shards': 2},
# Armv8-a.
{
@@ -1585,19 +1561,6 @@
{'name': 'v8testing', 'shards': 7},
],
},
- 'V8 Mips - big endian - nosnap': {
- 'swarming_dimensions': {
- 'cpu': 'mips-32',
- 'os': 'Debian-8.7',
- },
- 'swarming_task_attrs': {
- 'expiration': 18000,
- 'hard_timeout': 18000,
- },
- 'tests': [
- {'name': 'v8testing', 'variant': 'default', 'shards': 2},
- ],
- },
##############################################################################
# Clusterfuzz.
'V8 NumFuzz': {
@@ -1817,9 +1780,9 @@
'os': 'Ubuntu-16.04',
},
'tests': [
- {'name': 'mozilla'},
- {'name': 'test262', 'variant': 'default'},
- {'name': 'v8testing', 'shards': 8},
+ {'name': 'mozilla', 'shards': 2},
+ {'name': 'test262', 'variant': 'default', 'shards': 2},
+ {'name': 'v8testing', 'shards': 10},
],
},
'V8 arm - sim - stable branch': {
@@ -1837,9 +1800,9 @@
'os': 'Ubuntu-16.04',
},
'tests': [
- {'name': 'mozilla'},
- {'name': 'test262', 'variant': 'default'},
- {'name': 'v8testing', 'shards': 8},
+ {'name': 'mozilla', 'shards': 2},
+ {'name': 'test262', 'variant': 'default', 'shards': 2},
+ {'name': 'v8testing', 'shards': 10},
],
},
'V8 mips64el - sim - beta branch': {
diff --git a/deps/v8/src/api/OWNERS b/deps/v8/src/api/OWNERS
index ef5a56dbfc..f51e220309 100644
--- a/deps/v8/src/api/OWNERS
+++ b/deps/v8/src/api/OWNERS
@@ -1,5 +1,5 @@
file:../../include/OWNERS
-clemensh@chromium.org
+clemensb@chromium.org
ishell@chromium.org
jkummerow@chromium.org
leszeks@chromium.org
diff --git a/deps/v8/src/api/api.cc b/deps/v8/src/api/api.cc
index 8be7f8558c..0d80f986f1 100644
--- a/deps/v8/src/api/api.cc
+++ b/deps/v8/src/api/api.cc
@@ -127,6 +127,11 @@
#endif // V8_OS_WIN64
#endif // V8_OS_WIN
+#define TRACE_BS(...) \
+ do { \
+ if (i::FLAG_trace_backing_store) PrintF(__VA_ARGS__); \
+ } while (false)
+
namespace v8 {
/*
@@ -902,11 +907,6 @@ void V8::SetFlagsFromString(const char* str, size_t length) {
i::FlagList::EnforceFlagImplications();
}
-void V8::SetFlagsFromString(const char* str, int length) {
- CHECK_LE(0, length);
- SetFlagsFromString(str, static_cast<size_t>(length));
-}
-
void V8::SetFlagsFromCommandLine(int* argc, char** argv, bool remove_flags) {
i::FlagList::SetFlagsFromCommandLine(argc, argv, remove_flags);
}
@@ -1314,7 +1314,6 @@ void Context::SetEmbedderData(int index, v8::Local<Value> value) {
void* Context::SlowGetAlignedPointerFromEmbedderData(int index) {
const char* location = "v8::Context::GetAlignedPointerFromEmbedderData()";
- HandleScope handle_scope(GetIsolate());
i::Handle<i::EmbedderDataArray> data =
EmbedderDataFor(this, index, false, location);
if (data.is_null()) return nullptr;
@@ -2363,28 +2362,6 @@ Local<Module> Module::CreateSyntheticModule(
i_module_name, i_export_names, evaluation_steps)));
}
-Maybe<bool> Module::SetSyntheticModuleExport(Isolate* isolate,
- Local<String> export_name,
- Local<v8::Value> export_value) {
- auto i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- i::Handle<i::String> i_export_name = Utils::OpenHandle(*export_name);
- i::Handle<i::Object> i_export_value = Utils::OpenHandle(*export_value);
- i::Handle<i::Module> self = Utils::OpenHandle(this);
- Utils::ApiCheck(self->IsSyntheticModule(),
- "v8::Module::SyntheticModuleSetExport",
- "v8::Module::SyntheticModuleSetExport must only be called on "
- "a SyntheticModule");
- ENTER_V8_NO_SCRIPT(i_isolate, isolate->GetCurrentContext(), Module,
- SetSyntheticModuleExport, Nothing<bool>(), i::HandleScope);
- has_pending_exception =
- i::SyntheticModule::SetExport(i_isolate,
- i::Handle<i::SyntheticModule>::cast(self),
- i_export_name, i_export_value)
- .IsNothing();
- RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
- return Just(true);
-}
-
void Module::SetSyntheticModuleExport(Local<String> export_name,
Local<v8::Value> export_value) {
i::Handle<i::String> i_export_name = Utils::OpenHandle(*export_name);
@@ -2394,9 +2371,9 @@ void Module::SetSyntheticModuleExport(Local<String> export_name,
"v8::Module::SetSyntheticModuleExport",
"v8::Module::SetSyntheticModuleExport must only be called on "
"a SyntheticModule");
- i::SyntheticModule::SetExportStrict(self->GetIsolate(),
- i::Handle<i::SyntheticModule>::cast(self),
- i_export_name, i_export_value);
+ i::SyntheticModule::SetExport(self->GetIsolate(),
+ i::Handle<i::SyntheticModule>::cast(self),
+ i_export_name, i_export_value);
}
namespace {
@@ -2631,7 +2608,7 @@ ScriptCompiler::ScriptStreamingTask* ScriptCompiler::StartStreamingScript(
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
i::ScriptStreamingData* data = source->impl();
std::unique_ptr<i::BackgroundCompileTask> task =
- base::make_unique<i::BackgroundCompileTask>(data, isolate);
+ std::make_unique<i::BackgroundCompileTask>(data, isolate);
data->task = std::move(task);
return new ScriptCompiler::ScriptStreamingTask(data);
}
@@ -3743,6 +3720,42 @@ void v8::WasmModuleObject::CheckCast(Value* that) {
"Could not convert to wasm module object");
}
+v8::BackingStore::~BackingStore() {
+ auto i_this = reinterpret_cast<const i::BackingStore*>(this);
+ i_this->~BackingStore(); // manually call internal destructor
+}
+
+void* v8::BackingStore::Data() const {
+ return reinterpret_cast<const i::BackingStore*>(this)->buffer_start();
+}
+
+size_t v8::BackingStore::ByteLength() const {
+ return reinterpret_cast<const i::BackingStore*>(this)->byte_length();
+}
+
+std::shared_ptr<v8::BackingStore> v8::ArrayBuffer::GetBackingStore() {
+ i::Handle<i::JSArrayBuffer> self = Utils::OpenHandle(this);
+ std::shared_ptr<i::BackingStore> backing_store = self->GetBackingStore();
+ if (!backing_store) {
+ backing_store =
+ i::BackingStore::EmptyBackingStore(i::SharedFlag::kNotShared);
+ }
+ i::GlobalBackingStoreRegistry::Register(backing_store);
+ std::shared_ptr<i::BackingStoreBase> bs_base = backing_store;
+ return std::static_pointer_cast<v8::BackingStore>(bs_base);
+}
+
+std::shared_ptr<v8::BackingStore> v8::SharedArrayBuffer::GetBackingStore() {
+ i::Handle<i::JSArrayBuffer> self = Utils::OpenHandle(this);
+ std::shared_ptr<i::BackingStore> backing_store = self->GetBackingStore();
+ if (!backing_store) {
+ backing_store = i::BackingStore::EmptyBackingStore(i::SharedFlag::kShared);
+ }
+ i::GlobalBackingStoreRegistry::Register(backing_store);
+ std::shared_ptr<i::BackingStoreBase> bs_base = backing_store;
+ return std::static_pointer_cast<v8::BackingStore>(bs_base);
+}
+
void v8::ArrayBuffer::CheckCast(Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
Utils::ApiCheck(
@@ -5307,7 +5320,7 @@ static inline int WriteHelper(i::Isolate* isolate, const String* string,
int end = start + length;
if ((length == -1) || (length > str->length() - start)) end = str->length();
if (end < 0) return 0;
- i::String::WriteToFlat(*str, buffer, start, end);
+ if (start < end) i::String::WriteToFlat(*str, buffer, start, end);
if (!(options & String::NO_NULL_TERMINATION) &&
(length == -1 || end - start < length)) {
buffer[end - start] = '\0';
@@ -5704,6 +5717,11 @@ void v8::V8::InitializeExternalStartupData(const char* natives_blob,
i::InitializeExternalStartupData(natives_blob, snapshot_blob);
}
+// static
+void v8::V8::InitializeExternalStartupDataFromFile(const char* snapshot_blob) {
+ i::InitializeExternalStartupDataFromFile(snapshot_blob);
+}
+
const char* v8::V8::GetVersion() { return i::Version::GetVersion(); }
template <typename ObjectType>
@@ -7070,21 +7088,7 @@ MemorySpan<const uint8_t> CompiledWasmModule::GetWireBytesRef() {
WasmModuleObject::TransferrableModule
WasmModuleObject::GetTransferrableModule() {
- if (i::FLAG_wasm_shared_code) {
- i::Handle<i::WasmModuleObject> obj =
- i::Handle<i::WasmModuleObject>::cast(Utils::OpenHandle(this));
- return TransferrableModule(obj->shared_native_module());
- } else {
- CompiledWasmModule compiled_module = GetCompiledModule();
- OwnedBuffer serialized_module = compiled_module.Serialize();
- MemorySpan<const uint8_t> wire_bytes_ref =
- compiled_module.GetWireBytesRef();
- size_t wire_size = wire_bytes_ref.size();
- std::unique_ptr<uint8_t[]> wire_bytes_copy(new uint8_t[wire_size]);
- memcpy(wire_bytes_copy.get(), wire_bytes_ref.data(), wire_size);
- return TransferrableModule(std::move(serialized_module),
- {std::move(wire_bytes_copy), wire_size});
- }
+ return GetCompiledModule();
}
CompiledWasmModule WasmModuleObject::GetCompiledModule() {
@@ -7096,17 +7100,17 @@ CompiledWasmModule WasmModuleObject::GetCompiledModule() {
MaybeLocal<WasmModuleObject> WasmModuleObject::FromTransferrableModule(
Isolate* isolate,
const WasmModuleObject::TransferrableModule& transferrable_module) {
- if (i::FLAG_wasm_shared_code) {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- i::Handle<i::WasmModuleObject> module_object =
- i_isolate->wasm_engine()->ImportNativeModule(
- i_isolate, transferrable_module.shared_module_);
- return Local<WasmModuleObject>::Cast(
- Utils::ToLocal(i::Handle<i::JSObject>::cast(module_object)));
- } else {
- return Deserialize(isolate, AsReference(transferrable_module.serialized_),
- AsReference(transferrable_module.wire_bytes_));
- }
+ return FromCompiledModule(isolate, transferrable_module);
+}
+
+MaybeLocal<WasmModuleObject> WasmModuleObject::FromCompiledModule(
+ Isolate* isolate, const CompiledWasmModule& compiled_module) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::Handle<i::WasmModuleObject> module_object =
+ i_isolate->wasm_engine()->ImportNativeModule(
+ i_isolate, Utils::Open(compiled_module));
+ return Local<WasmModuleObject>::Cast(
+ Utils::ToLocal(i::Handle<i::JSObject>::cast(module_object)));
}
MaybeLocal<WasmModuleObject> WasmModuleObject::Deserialize(
@@ -7219,20 +7223,78 @@ bool v8::ArrayBuffer::IsDetachable() const {
return Utils::OpenHandle(this)->is_detachable();
}
-v8::ArrayBuffer::Contents v8::ArrayBuffer::Externalize() {
- i::Handle<i::JSArrayBuffer> self = Utils::OpenHandle(this);
- i::Isolate* isolate = self->GetIsolate();
- Utils::ApiCheck(!self->is_external(), "v8_ArrayBuffer_Externalize",
- "ArrayBuffer already externalized");
- self->set_is_external(true);
+namespace {
+// The backing store deleter just deletes the indirection, which downrefs
+// the shared pointer. It will get collected normally.
+void BackingStoreDeleter(void* buffer, size_t length, void* info) {
+ std::shared_ptr<i::BackingStore>* bs_indirection =
+ reinterpret_cast<std::shared_ptr<i::BackingStore>*>(info);
+ if (bs_indirection) {
+ i::BackingStore* backing_store = bs_indirection->get();
+ TRACE_BS("API:delete bs=%p mem=%p (length=%zu)\n", backing_store,
+ backing_store->buffer_start(), backing_store->byte_length());
+ USE(backing_store);
+ }
+ delete bs_indirection;
+}
- const v8::ArrayBuffer::Contents contents = GetContents();
- isolate->heap()->UnregisterArrayBuffer(*self);
+void* MakeDeleterData(std::shared_ptr<i::BackingStore> backing_store) {
+ if (!backing_store) return nullptr;
+ TRACE_BS("API:extern bs=%p mem=%p (length=%zu)\n", backing_store.get(),
+ backing_store->buffer_start(), backing_store->byte_length());
+ return new std::shared_ptr<i::BackingStore>(backing_store);
+}
- // A regular copy is good enough. No move semantics needed.
- return contents;
+std::shared_ptr<i::BackingStore> LookupOrCreateBackingStore(
+ i::Isolate* i_isolate, void* data, size_t byte_length, i::SharedFlag shared,
+ ArrayBufferCreationMode mode) {
+ // "internalized" means that the storage was allocated by the
+ // ArrayBufferAllocator and thus should be freed upon destruction.
+ bool free_on_destruct = mode == ArrayBufferCreationMode::kInternalized;
+
+ // Try to lookup a previously-registered backing store in the global
+ // registry. If found, use that instead of wrapping an embedder allocation.
+ std::shared_ptr<i::BackingStore> backing_store =
+ i::GlobalBackingStoreRegistry::Lookup(data, byte_length);
+
+ if (backing_store) {
+ // Check invariants for a previously-found backing store.
+
+ // 1. We cannot allow an embedder to first allocate a backing store that
+ // should not be freed upon destruct, and then allocate an alias that should
+ // destruct it. The other order is fine.
+ bool changing_destruct_mode =
+ free_on_destruct && !backing_store->free_on_destruct();
+ Utils::ApiCheck(
+ !changing_destruct_mode, "v8_[Shared]ArrayBuffer_New",
+ "previous backing store found that should not be freed on destruct");
+
+ // 2. We cannot allow embedders to use the same backing store for both
+ // SharedArrayBuffers and regular ArrayBuffers.
+ bool changing_shared_flag =
+ (shared == i::SharedFlag::kShared) != backing_store->is_shared();
+ Utils::ApiCheck(
+ !changing_shared_flag, "v8_[Shared]ArrayBuffer_New",
+ "previous backing store found that does not match shared flag");
+ } else {
+ // No previous backing store found.
+ backing_store = i::BackingStore::WrapAllocation(
+ i_isolate, data, byte_length, shared, free_on_destruct);
+
+ // The embedder already has a direct pointer to the buffer start, so
+ // globally register the backing store in case they come back with the
+ // same buffer start and the backing store is marked as free_on_destruct.
+ i::GlobalBackingStoreRegistry::Register(backing_store);
+ }
+ return backing_store;
}
+std::shared_ptr<i::BackingStore> ToInternal(
+ std::shared_ptr<i::BackingStoreBase> backing_store) {
+ return std::static_pointer_cast<i::BackingStore>(backing_store);
+}
+} // namespace
+
v8::ArrayBuffer::Contents::Contents(void* data, size_t byte_length,
void* allocation_base,
size_t allocation_length,
@@ -7249,29 +7311,70 @@ v8::ArrayBuffer::Contents::Contents(void* data, size_t byte_length,
DCHECK_LE(byte_length_, allocation_length_);
}
-void WasmMemoryDeleter(void* buffer, size_t lenght, void* info) {
- internal::wasm::WasmEngine* engine =
- reinterpret_cast<internal::wasm::WasmEngine*>(info);
- CHECK(engine->memory_tracker()->FreeWasmMemory(nullptr, buffer));
+v8::ArrayBuffer::Contents v8::ArrayBuffer::Externalize() {
+ return GetContents(true);
}
-void ArrayBufferDeleter(void* buffer, size_t length, void* info) {
- v8::ArrayBuffer::Allocator* allocator =
- reinterpret_cast<v8::ArrayBuffer::Allocator*>(info);
- allocator->Free(buffer, length);
+void v8::ArrayBuffer::Externalize(
+ const std::shared_ptr<BackingStore>& backing_store) {
+ i::Handle<i::JSArrayBuffer> self = Utils::OpenHandle(this);
+ Utils::ApiCheck(!self->is_external(), "v8_ArrayBuffer_Externalize",
+ "ArrayBuffer already externalized");
+ self->set_is_external(true);
+ DCHECK_EQ(self->backing_store(), backing_store->Data());
}
v8::ArrayBuffer::Contents v8::ArrayBuffer::GetContents() {
+ return GetContents(false);
+}
+
+v8::ArrayBuffer::Contents v8::ArrayBuffer::GetContents(bool externalize) {
+ // TODO(titzer): reduce duplication between shared/unshared GetContents()
+ using BufferType = v8::ArrayBuffer;
+
i::Handle<i::JSArrayBuffer> self = Utils::OpenHandle(this);
- Contents contents(
- self->backing_store(), self->byte_length(), self->allocation_base(),
- self->allocation_length(),
- self->is_wasm_memory() ? Allocator::AllocationMode::kReservation
- : Allocator::AllocationMode::kNormal,
- self->is_wasm_memory() ? WasmMemoryDeleter : ArrayBufferDeleter,
- self->is_wasm_memory()
- ? static_cast<void*>(self->GetIsolate()->wasm_engine())
- : static_cast<void*>(self->GetIsolate()->array_buffer_allocator()));
+
+ std::shared_ptr<i::BackingStore> backing_store = self->GetBackingStore();
+
+ void* deleter_data = nullptr;
+ if (externalize) {
+ Utils::ApiCheck(!self->is_external(), "v8_ArrayBuffer_Externalize",
+ "ArrayBuffer already externalized");
+ self->set_is_external(true);
+ // When externalizing, upref the shared pointer to the backing store
+ // and store that as the deleter data. When the embedder calls the deleter
+ // callback, we will delete the additional (on-heap) shared_ptr.
+ deleter_data = MakeDeleterData(backing_store);
+ }
+
+ if (!backing_store) {
+ // If the array buffer has zero length or was detached, return empty
+ // contents.
+ DCHECK_EQ(0, self->byte_length());
+ BufferType::Contents contents(
+ nullptr, 0, nullptr, 0,
+ v8::ArrayBuffer::Allocator::AllocationMode::kNormal,
+ BackingStoreDeleter, deleter_data);
+ return contents;
+ }
+
+ // Backing stores that given to the embedder might be passed back through
+ // the API using only the start of the buffer. We need to find such
+ // backing stores using global registration until the API is changed.
+ i::GlobalBackingStoreRegistry::Register(backing_store);
+
+ auto allocation_mode =
+ backing_store->is_wasm_memory()
+ ? v8::ArrayBuffer::Allocator::AllocationMode::kReservation
+ : v8::ArrayBuffer::Allocator::AllocationMode::kNormal;
+
+ BufferType::Contents contents(backing_store->buffer_start(), // --
+ backing_store->byte_length(), // --
+ backing_store->buffer_start(), // --
+ backing_store->byte_length(), // --
+ allocation_mode, // --
+ BackingStoreDeleter, // --
+ deleter_data);
return contents;
}
@@ -7296,30 +7399,56 @@ Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, size_t byte_length) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, ArrayBuffer, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
- i::Handle<i::JSArrayBuffer> obj =
- i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kNotShared);
- // TODO(jbroman): It may be useful in the future to provide a MaybeLocal
- // version that throws an exception or otherwise does not crash.
- if (!i::JSArrayBuffer::SetupAllocatingData(obj, i_isolate, byte_length)) {
+ i::MaybeHandle<i::JSArrayBuffer> result =
+ i_isolate->factory()->NewJSArrayBufferAndBackingStore(
+ byte_length, i::InitializedFlag::kZeroInitialized);
+
+ i::Handle<i::JSArrayBuffer> array_buffer;
+ if (!result.ToHandle(&array_buffer)) {
+ // TODO(jbroman): It may be useful in the future to provide a MaybeLocal
+ // version that throws an exception or otherwise does not crash.
i::FatalProcessOutOfMemory(i_isolate, "v8::ArrayBuffer::New");
}
- return Utils::ToLocal(obj);
+
+ return Utils::ToLocal(array_buffer);
}
Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, void* data,
size_t byte_length,
ArrayBufferCreationMode mode) {
// Embedders must guarantee that the external backing store is valid.
- CHECK(byte_length == 0 || data != nullptr);
+ CHECK_IMPLIES(byte_length != 0, data != nullptr);
CHECK_LE(byte_length, i::JSArrayBuffer::kMaxByteLength);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, ArrayBuffer, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
+
+ std::shared_ptr<i::BackingStore> backing_store = LookupOrCreateBackingStore(
+ i_isolate, data, byte_length, i::SharedFlag::kNotShared, mode);
+
+ i::Handle<i::JSArrayBuffer> obj =
+ i_isolate->factory()->NewJSArrayBuffer(std::move(backing_store));
+ if (mode == ArrayBufferCreationMode::kExternalized) {
+ obj->set_is_external(true);
+ }
+ return Utils::ToLocal(obj);
+}
+
+Local<ArrayBuffer> v8::ArrayBuffer::New(
+ Isolate* isolate, std::shared_ptr<BackingStore> backing_store) {
+ CHECK_IMPLIES(backing_store->ByteLength() != 0,
+ backing_store->Data() != nullptr);
+ CHECK_LE(backing_store->ByteLength(), i::JSArrayBuffer::kMaxByteLength);
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ LOG_API(i_isolate, ArrayBuffer, New);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
+ std::shared_ptr<i::BackingStore> i_backing_store(
+ ToInternal(std::move(backing_store)));
+ Utils::ApiCheck(
+ !i_backing_store->is_shared(), "v8_ArrayBuffer_New",
+ "Cannot construct ArrayBuffer with a BackingStore of SharedArrayBuffer");
i::Handle<i::JSArrayBuffer> obj =
- i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kNotShared);
- i::JSArrayBuffer::Setup(obj, i_isolate,
- mode == ArrayBufferCreationMode::kExternalized, data,
- byte_length);
+ i_isolate->factory()->NewJSArrayBuffer(std::move(i_backing_store));
return Utils::ToLocal(obj);
}
@@ -7362,9 +7491,9 @@ size_t v8::ArrayBufferView::CopyContents(void* dest, size_t byte_length) {
bool v8::ArrayBufferView::HasBuffer() const {
i::Handle<i::JSArrayBufferView> self = Utils::OpenHandle(this);
- i::Handle<i::JSArrayBuffer> buffer(i::JSArrayBuffer::cast(self->buffer()),
- self->GetIsolate());
- return buffer->backing_store() != nullptr;
+ if (!self->IsJSTypedArray()) return true;
+ auto typed_array = i::Handle<i::JSTypedArray>::cast(self);
+ return !typed_array->is_on_heap();
}
size_t v8::ArrayBufferView::ByteOffset() {
@@ -7460,13 +7589,16 @@ i::Handle<i::JSArrayBuffer> SetupSharedArrayBuffer(
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, SharedArrayBuffer, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
+
+ std::shared_ptr<i::BackingStore> backing_store = LookupOrCreateBackingStore(
+ i_isolate, data, byte_length, i::SharedFlag::kShared, mode);
+
i::Handle<i::JSArrayBuffer> obj =
- i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kShared);
- bool is_wasm_memory =
- i_isolate->wasm_engine()->memory_tracker()->IsWasmMemory(data);
- i::JSArrayBuffer::Setup(obj, i_isolate,
- mode == ArrayBufferCreationMode::kExternalized, data,
- byte_length, i::SharedFlag::kShared, is_wasm_memory);
+ i_isolate->factory()->NewJSSharedArrayBuffer(std::move(backing_store));
+
+ if (mode == ArrayBufferCreationMode::kExternalized) {
+ obj->set_is_external(true);
+ }
return obj;
}
@@ -7476,20 +7608,6 @@ bool v8::SharedArrayBuffer::IsExternal() const {
return Utils::OpenHandle(this)->is_external();
}
-v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::Externalize() {
- i::Handle<i::JSArrayBuffer> self = Utils::OpenHandle(this);
- i::Isolate* isolate = self->GetIsolate();
- Utils::ApiCheck(!self->is_external(), "v8_SharedArrayBuffer_Externalize",
- "SharedArrayBuffer already externalized");
- self->set_is_external(true);
-
- const v8::SharedArrayBuffer::Contents contents = GetContents();
- isolate->heap()->UnregisterArrayBuffer(*self);
-
- // A regular copy is good enough. No move semantics needed.
- return contents;
-}
-
v8::SharedArrayBuffer::Contents::Contents(
void* data, size_t byte_length, void* allocation_base,
size_t allocation_length, Allocator::AllocationMode allocation_mode,
@@ -7505,20 +7623,72 @@ v8::SharedArrayBuffer::Contents::Contents(
DCHECK_LE(byte_length_, allocation_length_);
}
+v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::Externalize() {
+ return GetContents(true);
+}
+
+void v8::SharedArrayBuffer::Externalize(
+ const std::shared_ptr<BackingStore>& backing_store) {
+ i::Handle<i::JSArrayBuffer> self = Utils::OpenHandle(this);
+ Utils::ApiCheck(!self->is_external(), "v8_SharedArrayBuffer_Externalize",
+ "SharedArrayBuffer already externalized");
+ self->set_is_external(true);
+
+ DCHECK_EQ(self->backing_store(), backing_store->Data());
+}
+
v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::GetContents() {
+ return GetContents(false);
+}
+
+v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::GetContents(
+ bool externalize) {
+ // TODO(titzer): reduce duplication between shared/unshared GetContents()
+ using BufferType = v8::SharedArrayBuffer;
+
i::Handle<i::JSArrayBuffer> self = Utils::OpenHandle(this);
- Contents contents(
- self->backing_store(), self->byte_length(), self->allocation_base(),
- self->allocation_length(),
- self->is_wasm_memory()
- ? ArrayBuffer::Allocator::AllocationMode::kReservation
- : ArrayBuffer::Allocator::AllocationMode::kNormal,
- self->is_wasm_memory()
- ? reinterpret_cast<Contents::DeleterCallback>(WasmMemoryDeleter)
- : reinterpret_cast<Contents::DeleterCallback>(ArrayBufferDeleter),
- self->is_wasm_memory()
- ? static_cast<void*>(self->GetIsolate()->wasm_engine())
- : static_cast<void*>(self->GetIsolate()->array_buffer_allocator()));
+
+ std::shared_ptr<i::BackingStore> backing_store = self->GetBackingStore();
+
+ void* deleter_data = nullptr;
+ if (externalize) {
+ Utils::ApiCheck(!self->is_external(), "v8_SharedArrayBuffer_Externalize",
+ "SharedArrayBuffer already externalized");
+ self->set_is_external(true);
+ // When externalizing, upref the shared pointer to the backing store
+ // and store that as the deleter data. When the embedder calls the deleter
+ // callback, we will delete the additional (on-heap) shared_ptr.
+ deleter_data = MakeDeleterData(backing_store);
+ }
+
+ if (!backing_store) {
+ // If the array buffer has zero length or was detached, return empty
+ // contents.
+ DCHECK_EQ(0, self->byte_length());
+ BufferType::Contents contents(
+ nullptr, 0, nullptr, 0,
+ v8::ArrayBuffer::Allocator::AllocationMode::kNormal,
+ BackingStoreDeleter, deleter_data);
+ return contents;
+ }
+
+ // Backing stores that given to the embedder might be passed back through
+ // the API using only the start of the buffer. We need to find such
+ // backing stores using global registration until the API is changed.
+ i::GlobalBackingStoreRegistry::Register(backing_store);
+
+ auto allocation_mode =
+ backing_store->is_wasm_memory()
+ ? v8::ArrayBuffer::Allocator::AllocationMode::kReservation
+ : v8::ArrayBuffer::Allocator::AllocationMode::kNormal;
+
+ BufferType::Contents contents(backing_store->buffer_start(), // --
+ backing_store->byte_length(), // --
+ backing_store->buffer_start(), // --
+ backing_store->byte_length(), // --
+ allocation_mode, // --
+ BackingStoreDeleter, // --
+ deleter_data);
return contents;
}
@@ -7533,14 +7703,19 @@ Local<SharedArrayBuffer> v8::SharedArrayBuffer::New(Isolate* isolate,
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, SharedArrayBuffer, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
- i::Handle<i::JSArrayBuffer> obj =
- i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kShared);
- // TODO(jbroman): It may be useful in the future to provide a MaybeLocal
- // version that throws an exception or otherwise does not crash.
- if (!i::JSArrayBuffer::SetupAllocatingData(obj, i_isolate, byte_length, true,
- i::SharedFlag::kShared)) {
+
+ std::unique_ptr<i::BackingStore> backing_store =
+ i::BackingStore::Allocate(i_isolate, byte_length, i::SharedFlag::kShared,
+ i::InitializedFlag::kZeroInitialized);
+
+ if (!backing_store) {
+ // TODO(jbroman): It may be useful in the future to provide a MaybeLocal
+ // version that throws an exception or otherwise does not crash.
i::FatalProcessOutOfMemory(i_isolate, "v8::SharedArrayBuffer::New");
}
+
+ i::Handle<i::JSArrayBuffer> obj =
+ i_isolate->factory()->NewJSSharedArrayBuffer(std::move(backing_store));
return Utils::ToLocalShared(obj);
}
@@ -7553,6 +7728,24 @@ Local<SharedArrayBuffer> v8::SharedArrayBuffer::New(
}
Local<SharedArrayBuffer> v8::SharedArrayBuffer::New(
+ Isolate* isolate, std::shared_ptr<BackingStore> backing_store) {
+ CHECK(i::FLAG_harmony_sharedarraybuffer);
+ CHECK_IMPLIES(backing_store->ByteLength() != 0,
+ backing_store->Data() != nullptr);
+ CHECK_LE(backing_store->ByteLength(), i::JSArrayBuffer::kMaxByteLength);
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ LOG_API(i_isolate, SharedArrayBuffer, New);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
+ std::shared_ptr<i::BackingStore> i_backing_store(ToInternal(backing_store));
+ Utils::ApiCheck(
+ i_backing_store->is_shared(), "v8_SharedArrayBuffer_New",
+ "Cannot construct SharedArrayBuffer with BackingStore of ArrayBuffer");
+ i::Handle<i::JSArrayBuffer> obj =
+ i_isolate->factory()->NewJSSharedArrayBuffer(std::move(i_backing_store));
+ return Utils::ToLocalShared(obj);
+}
+
+Local<SharedArrayBuffer> v8::SharedArrayBuffer::New(
Isolate* isolate, const SharedArrayBuffer::Contents& contents,
ArrayBufferCreationMode mode) {
i::Handle<i::JSArrayBuffer> buffer = SetupSharedArrayBuffer(
@@ -8235,6 +8428,15 @@ bool Isolate::GetHeapCodeAndMetadataStatistics(
return true;
}
+v8::MaybeLocal<v8::Promise> Isolate::MeasureMemory(
+ v8::Local<v8::Context> context, MeasureMemoryMode mode) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ i::Handle<i::NativeContext> native_context =
+ handle(Utils::OpenHandle(*context)->native_context(), isolate);
+ return v8::Utils::PromiseToLocal(
+ isolate->heap()->MeasureMemory(native_context, mode));
+}
+
void Isolate::GetStackSample(const RegisterState& state, void** frames,
size_t frames_limit, SampleInfo* sample_info) {
RegisterState regs = state;
@@ -9062,9 +9264,9 @@ bool debug::Script::GetPossibleBreakpoints(
i::Handle<i::Script> script = Utils::OpenHandle(this);
if (script->type() == i::Script::TYPE_WASM &&
this->SourceMappingURL().IsEmpty()) {
- i::WasmModuleObject module_object =
- i::WasmModuleObject::cast(script->wasm_module_object());
- return module_object.GetPossibleBreakpoints(start, end, locations);
+ i::wasm::NativeModule* native_module = script->wasm_native_module();
+ return i::WasmModuleObject::GetPossibleBreakpoints(native_module, start,
+ end, locations);
}
i::Script::InitLineEnds(script);
@@ -9113,8 +9315,9 @@ int debug::Script::GetSourceOffset(const debug::Location& location) const {
i::Handle<i::Script> script = Utils::OpenHandle(this);
if (script->type() == i::Script::TYPE_WASM) {
if (this->SourceMappingURL().IsEmpty()) {
- return i::WasmModuleObject::cast(script->wasm_module_object())
- .GetFunctionOffset(location.GetLineNumber()) +
+ i::wasm::NativeModule* native_module = script->wasm_native_module();
+ const i::wasm::WasmModule* module = native_module->module();
+ return i::wasm::GetWasmFunctionOffset(module, location.GetLineNumber()) +
location.GetColumnNumber();
}
DCHECK_EQ(0, location.GetLineNumber());
@@ -9202,9 +9405,8 @@ int debug::WasmScript::NumFunctions() const {
i::DisallowHeapAllocation no_gc;
i::Handle<i::Script> script = Utils::OpenHandle(this);
DCHECK_EQ(i::Script::TYPE_WASM, script->type());
- i::WasmModuleObject module_object =
- i::WasmModuleObject::cast(script->wasm_module_object());
- const i::wasm::WasmModule* module = module_object.module();
+ i::wasm::NativeModule* native_module = script->wasm_native_module();
+ const i::wasm::WasmModule* module = native_module->module();
DCHECK_GE(i::kMaxInt, module->functions.size());
return static_cast<int>(module->functions.size());
}
@@ -9213,21 +9415,26 @@ int debug::WasmScript::NumImportedFunctions() const {
i::DisallowHeapAllocation no_gc;
i::Handle<i::Script> script = Utils::OpenHandle(this);
DCHECK_EQ(i::Script::TYPE_WASM, script->type());
- i::WasmModuleObject module_object =
- i::WasmModuleObject::cast(script->wasm_module_object());
- const i::wasm::WasmModule* module = module_object.module();
+ i::wasm::NativeModule* native_module = script->wasm_native_module();
+ const i::wasm::WasmModule* module = native_module->module();
DCHECK_GE(i::kMaxInt, module->num_imported_functions);
return static_cast<int>(module->num_imported_functions);
}
+MemorySpan<const uint8_t> debug::WasmScript::Bytecode() const {
+ i::Handle<i::Script> script = Utils::OpenHandle(this);
+ i::Vector<const uint8_t> wire_bytes =
+ script->wasm_native_module()->wire_bytes();
+ return {wire_bytes.begin(), wire_bytes.size()};
+}
+
std::pair<int, int> debug::WasmScript::GetFunctionRange(
int function_index) const {
i::DisallowHeapAllocation no_gc;
i::Handle<i::Script> script = Utils::OpenHandle(this);
DCHECK_EQ(i::Script::TYPE_WASM, script->type());
- i::WasmModuleObject module_object =
- i::WasmModuleObject::cast(script->wasm_module_object());
- const i::wasm::WasmModule* module = module_object.module();
+ i::wasm::NativeModule* native_module = script->wasm_native_module();
+ const i::wasm::WasmModule* module = native_module->module();
DCHECK_LE(0, function_index);
DCHECK_GT(module->functions.size(), function_index);
const i::wasm::WasmFunction& func = module->functions[function_index];
@@ -9241,14 +9448,12 @@ uint32_t debug::WasmScript::GetFunctionHash(int function_index) {
i::DisallowHeapAllocation no_gc;
i::Handle<i::Script> script = Utils::OpenHandle(this);
DCHECK_EQ(i::Script::TYPE_WASM, script->type());
- i::WasmModuleObject module_object =
- i::WasmModuleObject::cast(script->wasm_module_object());
- const i::wasm::WasmModule* module = module_object.module();
+ i::wasm::NativeModule* native_module = script->wasm_native_module();
+ const i::wasm::WasmModule* module = native_module->module();
DCHECK_LE(0, function_index);
DCHECK_GT(module->functions.size(), function_index);
const i::wasm::WasmFunction& func = module->functions[function_index];
- i::wasm::ModuleWireBytes wire_bytes(
- module_object.native_module()->wire_bytes());
+ i::wasm::ModuleWireBytes wire_bytes(native_module->wire_bytes());
i::Vector<const i::byte> function_bytes = wire_bytes.GetFunctionBytes(&func);
// TODO(herhut): Maybe also take module, name and signature into account.
return i::StringHasher::HashSequentialString(function_bytes.begin(),
@@ -9260,9 +9465,10 @@ debug::WasmDisassembly debug::WasmScript::DisassembleFunction(
i::DisallowHeapAllocation no_gc;
i::Handle<i::Script> script = Utils::OpenHandle(this);
DCHECK_EQ(i::Script::TYPE_WASM, script->type());
- i::WasmModuleObject module_object =
- i::WasmModuleObject::cast(script->wasm_module_object());
- return module_object.DisassembleFunction(function_index);
+ i::wasm::NativeModule* native_module = script->wasm_native_module();
+ const i::wasm::WasmModule* module = native_module->module();
+ i::wasm::ModuleWireBytes wire_bytes(native_module->wire_bytes());
+ return DisassembleWasmFunction(module, wire_bytes, function_index);
}
debug::Location::Location(int line_number, int column_number)
@@ -9438,7 +9644,7 @@ debug::ConsoleCallArguments::ConsoleCallArguments(
}
debug::ConsoleCallArguments::ConsoleCallArguments(
- internal::BuiltinArguments& args)
+ const internal::BuiltinArguments& args)
: v8::FunctionCallbackInfo<v8::Value>(
nullptr,
// Drop the first argument (receiver, i.e. the "console" object).
@@ -9501,14 +9707,14 @@ v8::Local<debug::GeneratorObject> debug::GeneratorObject::Cast(
MaybeLocal<v8::Value> debug::EvaluateGlobal(v8::Isolate* isolate,
v8::Local<v8::String> source,
- bool throw_on_side_effect) {
+ EvaluateGlobalMode mode) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
PREPARE_FOR_DEBUG_INTERFACE_EXECUTION_WITH_ISOLATE(internal_isolate, Value);
Local<Value> result;
- has_pending_exception = !ToLocal<Value>(
- i::DebugEvaluate::Global(internal_isolate, Utils::OpenHandle(*source),
- throw_on_side_effect),
- &result);
+ has_pending_exception =
+ !ToLocal<Value>(i::DebugEvaluate::Global(
+ internal_isolate, Utils::OpenHandle(*source), mode),
+ &result);
RETURN_ON_FAILED_EXECUTION(Value);
RETURN_ESCAPED(result);
}
@@ -9933,10 +10139,6 @@ void CpuProfiler::SetUsePreciseSampling(bool use_precise_sampling) {
use_precise_sampling);
}
-void CpuProfiler::CollectSample() {
- reinterpret_cast<i::CpuProfiler*>(this)->CollectSample();
-}
-
void CpuProfiler::StartProfiling(Local<String> title,
CpuProfilingOptions options) {
reinterpret_cast<i::CpuProfiler*>(this)->StartProfiling(
@@ -9964,12 +10166,6 @@ CpuProfile* CpuProfiler::StopProfiling(Local<String> title) {
*Utils::OpenHandle(*title)));
}
-void CpuProfiler::SetIdle(bool is_idle) {
- i::CpuProfiler* profiler = reinterpret_cast<i::CpuProfiler*>(this);
- i::Isolate* isolate = profiler->isolate();
- isolate->SetIdle(is_idle);
-}
-
void CpuProfiler::UseDetailedSourcePositionsForProfiling(Isolate* isolate) {
reinterpret_cast<i::Isolate*>(isolate)
->set_detailed_source_positions_for_profiling(true);
@@ -10009,6 +10205,10 @@ const char* CodeEvent::GetComment() {
return reinterpret_cast<i::CodeEvent*>(this)->comment;
}
+uintptr_t CodeEvent::GetPreviousCodeStartAddress() {
+ return reinterpret_cast<i::CodeEvent*>(this)->previous_code_start_address;
+}
+
const char* CodeEvent::GetCodeEventTypeName(CodeEventType code_event_type) {
switch (code_event_type) {
case kUnknownType:
@@ -10303,17 +10503,6 @@ void Testing::DeoptimizeAll(Isolate* isolate) {
i::Deoptimizer::DeoptimizeAll(i_isolate);
}
-void EmbedderHeapTracer::TracePrologue(TraceFlags flags) {
-#if __clang__
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wdeprecated"
-#endif
- TracePrologue();
-#if __clang__
-#pragma clang diagnostic pop
-#endif
-}
-
void EmbedderHeapTracer::TraceEpilogue(TraceSummary* trace_summary) {
#if __clang__
#pragma clang diagnostic push
@@ -10369,11 +10558,21 @@ void EmbedderHeapTracer::DecreaseAllocatedSize(size_t bytes) {
}
void EmbedderHeapTracer::RegisterEmbedderReference(
- const TracedGlobal<v8::Value>& ref) {
+ const TracedReferenceBase<v8::Data>& ref) {
if (ref.IsEmpty()) return;
i::Heap* const heap = reinterpret_cast<i::Isolate*>(isolate_)->heap();
- heap->RegisterExternallyReferencedObject(reinterpret_cast<i::Address*>(*ref));
+ heap->RegisterExternallyReferencedObject(
+ reinterpret_cast<i::Address*>(ref.val_));
+}
+
+void EmbedderHeapTracer::RegisterEmbedderReference(
+ const TracedReferenceBase<v8::Value>& ref) {
+ if (ref.IsEmpty()) return;
+
+ i::Heap* const heap = reinterpret_cast<i::Isolate*>(isolate_)->heap();
+ heap->RegisterExternallyReferencedObject(
+ reinterpret_cast<i::Address*>(ref.val_));
}
void EmbedderHeapTracer::IterateTracedGlobalHandles(
@@ -10383,6 +10582,26 @@ void EmbedderHeapTracer::IterateTracedGlobalHandles(
isolate->global_handles()->IterateTracedNodes(visitor);
}
+bool EmbedderHeapTracer::IsRootForNonTracingGC(
+ const v8::TracedReference<v8::Value>& handle) {
+ return true;
+}
+
+bool EmbedderHeapTracer::IsRootForNonTracingGC(
+ const v8::TracedGlobal<v8::Value>& handle) {
+ return true;
+}
+
+void EmbedderHeapTracer::ResetHandleInNonTracingGC(
+ const v8::TracedReference<v8::Value>& handle) {
+ UNREACHABLE();
+}
+
+void EmbedderHeapTracer::ResetHandleInNonTracingGC(
+ const v8::TracedGlobal<v8::Value>& handle) {
+ UNREACHABLE();
+}
+
namespace internal {
const size_t HandleScopeImplementer::kEnteredContextsOffset =
@@ -10473,9 +10692,10 @@ char* HandleScopeImplementer::Iterate(RootVisitor* v, char* storage) {
return storage + ArchiveSpacePerThread();
}
-DeferredHandles* HandleScopeImplementer::Detach(Address* prev_limit) {
- DeferredHandles* deferred =
- new DeferredHandles(isolate()->handle_scope_data()->next, isolate());
+std::unique_ptr<DeferredHandles> HandleScopeImplementer::Detach(
+ Address* prev_limit) {
+ std::unique_ptr<DeferredHandles> deferred(
+ new DeferredHandles(isolate()->handle_scope_data()->next, isolate()));
while (!blocks_.empty()) {
Address* block_start = blocks_.back();
@@ -10584,3 +10804,5 @@ void InvokeFunctionCallback(const v8::FunctionCallbackInfo<v8::Value>& info,
} // namespace internal
} // namespace v8
+
+#undef TRACE_BS
diff --git a/deps/v8/src/api/api.h b/deps/v8/src/api/api.h
index 21bbb3a101..a518670c79 100644
--- a/deps/v8/src/api/api.h
+++ b/deps/v8/src/api/api.h
@@ -5,6 +5,8 @@
#ifndef V8_API_API_H_
#define V8_API_API_H_
+#include <memory>
+
#include "include/v8-testing.h"
#include "src/execution/isolate.h"
#include "src/heap/factory.h"
@@ -274,6 +276,11 @@ class Utils {
return CompiledWasmModule{std::move(native_module)};
}
+ static inline const std::shared_ptr<i::wasm::NativeModule>& Open(
+ const CompiledWasmModule& compiled_module) {
+ return compiled_module.native_module_;
+ }
+
private:
static void ReportApiFailure(const char* location, const char* message);
};
@@ -431,7 +438,7 @@ class HandleScopeImplementer {
}
void BeginDeferredScope();
- DeferredHandles* Detach(Address* prev_limit);
+ std::unique_ptr<DeferredHandles> Detach(Address* prev_limit);
Isolate* isolate_;
DetachableVector<Address*> blocks_;
diff --git a/deps/v8/src/asmjs/OWNERS b/deps/v8/src/asmjs/OWNERS
index 08f39f8d6a..072ba58211 100644
--- a/deps/v8/src/asmjs/OWNERS
+++ b/deps/v8/src/asmjs/OWNERS
@@ -1,5 +1,5 @@
ahaas@chromium.org
-clemensh@chromium.org
+clemensb@chromium.org
mstarzinger@chromium.org
titzer@chromium.org
diff --git a/deps/v8/src/asmjs/asm-js.cc b/deps/v8/src/asmjs/asm-js.cc
index 7433b6a12c..22714ac16b 100644
--- a/deps/v8/src/asmjs/asm-js.cc
+++ b/deps/v8/src/asmjs/asm-js.cc
@@ -322,7 +322,7 @@ void AsmJsCompilationJob::RecordHistograms(Isolate* isolate) {
std::unique_ptr<UnoptimizedCompilationJob> AsmJs::NewCompilationJob(
ParseInfo* parse_info, FunctionLiteral* literal,
AccountingAllocator* allocator) {
- return base::make_unique<AsmJsCompilationJob>(parse_info, literal, allocator);
+ return std::make_unique<AsmJsCompilationJob>(parse_info, literal, allocator);
}
namespace {
@@ -387,7 +387,18 @@ MaybeHandle<Object> AsmJs::InstantiateAsmWasm(Isolate* isolate,
ReportInstantiationFailure(script, position, "Requires heap buffer");
return MaybeHandle<Object>();
}
- wasm_engine->memory_tracker()->MarkWasmMemoryNotGrowable(memory);
+ // AsmJs memory must be an ArrayBuffer.
+ if (memory->is_shared()) {
+ ReportInstantiationFailure(script, position,
+ "Invalid heap type: SharedArrayBuffer");
+ return MaybeHandle<Object>();
+ }
+ // Mark the buffer as being used as an asm.js memory. This implies two
+ // things: 1) if the buffer is from a Wasm memory, that memory can no longer
+ // be grown, since that would detach this buffer, and 2) the buffer cannot
+ // be postMessage()'d, as that also detaches the buffer.
+ memory->set_is_asmjs_memory(true);
+ memory->set_is_detachable(false);
size_t size = memory->byte_length();
// Check the asm.js heap size against the valid limits.
if (!IsValidAsmjsMemorySize(size)) {
diff --git a/deps/v8/src/asmjs/asm-js.h b/deps/v8/src/asmjs/asm-js.h
index 3e714cba7a..80a75d0372 100644
--- a/deps/v8/src/asmjs/asm-js.h
+++ b/deps/v8/src/asmjs/asm-js.h
@@ -7,6 +7,8 @@
// Clients of this interface shouldn't depend on lots of asmjs internals.
// Do not include anything from src/asmjs here!
+#include <memory>
+
#include "src/common/globals.h"
namespace v8 {
diff --git a/deps/v8/src/asmjs/asm-parser.cc b/deps/v8/src/asmjs/asm-parser.cc
index 6ac39dc89c..3387239926 100644
--- a/deps/v8/src/asmjs/asm-parser.cc
+++ b/deps/v8/src/asmjs/asm-parser.cc
@@ -387,8 +387,8 @@ void AsmJsParser::ValidateModule() {
uint32_t import_index = module_builder_->AddGlobalImport(
global_import.import_name, global_import.value_type,
false /* mutability */);
- start->EmitWithI32V(kExprGetGlobal, import_index);
- start->EmitWithI32V(kExprSetGlobal, VarIndex(global_import.var_info));
+ start->EmitWithI32V(kExprGlobalGet, import_index);
+ start->EmitWithI32V(kExprGlobalSet, VarIndex(global_import.var_info));
}
start->Emit(kExprEnd);
FunctionSig::Builder b(zone(), 0, 0);
@@ -952,8 +952,8 @@ void AsmJsParser::ValidateFunctionLocals(size_t param_count,
} else {
FAIL("Bad local variable definition");
}
- current_function_builder_->EmitWithI32V(kExprGetGlobal,
- VarIndex(sinfo));
+ current_function_builder_->EmitWithI32V(kExprGlobalGet,
+ VarIndex(sinfo));
current_function_builder_->EmitSetLocal(info->index);
} else if (sinfo->type->IsA(stdlib_fround_)) {
EXPECT_TOKEN('(');
@@ -1447,7 +1447,7 @@ AsmType* AsmJsParser::Identifier() {
if (info->kind != VarKind::kGlobal) {
FAILn("Undefined global variable");
}
- current_function_builder_->EmitWithI32V(kExprGetGlobal, VarIndex(info));
+ current_function_builder_->EmitWithI32V(kExprGlobalGet, VarIndex(info));
return info->type;
}
UNREACHABLE();
@@ -1558,8 +1558,8 @@ AsmType* AsmJsParser::AssignmentExpression() {
if (info->kind == VarKind::kLocal) {
current_function_builder_->EmitTeeLocal(info->index);
} else if (info->kind == VarKind::kGlobal) {
- current_function_builder_->EmitWithU32V(kExprSetGlobal, VarIndex(info));
- current_function_builder_->EmitWithU32V(kExprGetGlobal, VarIndex(info));
+ current_function_builder_->EmitWithU32V(kExprGlobalSet, VarIndex(info));
+ current_function_builder_->EmitWithU32V(kExprGlobalGet, VarIndex(info));
} else {
UNREACHABLE();
}
@@ -2489,7 +2489,7 @@ void AsmJsParser::ValidateFloatCoercion() {
// because imported functions are not allowed to have float return type.
call_coercion_position_ = scanner_.Position();
AsmType* ret;
- RECURSE(ret = ValidateExpression());
+ RECURSE(ret = AssignmentExpression());
if (ret->IsA(AsmType::Floatish())) {
// Do nothing, as already a float.
} else if (ret->IsA(AsmType::DoubleQ())) {
diff --git a/deps/v8/src/ast/ast-function-literal-id-reindexer.cc b/deps/v8/src/ast/ast-function-literal-id-reindexer.cc
index 95bd94d8d4..b583b5e421 100644
--- a/deps/v8/src/ast/ast-function-literal-id-reindexer.cc
+++ b/deps/v8/src/ast/ast-function-literal-id-reindexer.cc
@@ -46,15 +46,28 @@ void AstFunctionLiteralIdReindexer::VisitClassLiteral(ClassLiteral* expr) {
if (expr->instance_members_initializer_function() != nullptr) {
Visit(expr->instance_members_initializer_function());
}
- ZonePtrList<ClassLiteral::Property>* props = expr->properties();
+ ZonePtrList<ClassLiteral::Property>* private_members =
+ expr->private_members();
+ for (int i = 0; i < private_members->length(); ++i) {
+ ClassLiteralProperty* prop = private_members->at(i);
+
+ // Private fields have their key and value present in
+ // instance_members_initializer_function, so they will
+ // already have been visited.
+ if (prop->value()->IsFunctionLiteral()) {
+ Visit(prop->value());
+ } else {
+ CheckVisited(prop->value());
+ }
+ }
+ ZonePtrList<ClassLiteral::Property>* props = expr->public_members();
for (int i = 0; i < props->length(); ++i) {
ClassLiteralProperty* prop = props->at(i);
- // Private fields and public fields with computed names have both their key
+ // Public fields with computed names have their key
// and value present in instance_members_initializer_function, so they will
// already have been visited.
- if ((prop->is_computed_name() || prop->is_private()) &&
- !prop->value()->IsFunctionLiteral()) {
+ if (prop->is_computed_name() && !prop->value()->IsFunctionLiteral()) {
if (!prop->key()->IsLiteral()) {
CheckVisited(prop->key());
}
diff --git a/deps/v8/src/ast/ast-traversal-visitor.h b/deps/v8/src/ast/ast-traversal-visitor.h
index 2796e59a8d..a52c5f946e 100644
--- a/deps/v8/src/ast/ast-traversal-visitor.h
+++ b/deps/v8/src/ast/ast-traversal-visitor.h
@@ -490,7 +490,13 @@ void AstTraversalVisitor<Subclass>::VisitClassLiteral(ClassLiteral* expr) {
if (expr->instance_members_initializer_function() != nullptr) {
RECURSE_EXPRESSION(Visit(expr->instance_members_initializer_function()));
}
- ZonePtrList<ClassLiteral::Property>* props = expr->properties();
+ ZonePtrList<ClassLiteral::Property>* private_members =
+ expr->private_members();
+ for (int i = 0; i < private_members->length(); ++i) {
+ ClassLiteralProperty* prop = private_members->at(i);
+ RECURSE_EXPRESSION(Visit(prop->value()));
+ }
+ ZonePtrList<ClassLiteral::Property>* props = expr->public_members();
for (int i = 0; i < props->length(); ++i) {
ClassLiteralProperty* prop = props->at(i);
if (!prop->key()->IsLiteral()) {
diff --git a/deps/v8/src/ast/ast.cc b/deps/v8/src/ast/ast.cc
index 4b6c4805de..130d34dffa 100644
--- a/deps/v8/src/ast/ast.cc
+++ b/deps/v8/src/ast/ast.cc
@@ -293,6 +293,10 @@ bool FunctionLiteral::requires_brand_initialization() const {
return outer->AsClassScope()->brand() != nullptr;
}
+bool FunctionLiteral::private_name_lookup_skips_outer_class() const {
+ return scope()->private_name_lookup_skips_outer_class();
+}
+
ObjectLiteralProperty::ObjectLiteralProperty(Expression* key, Expression* value,
Kind kind, bool is_computed_name)
: LiteralProperty(key, value, is_computed_name),
@@ -886,7 +890,7 @@ Handle<Object> Literal::BuildValue(Isolate* isolate) const {
case kSmi:
return handle(Smi::FromInt(smi_), isolate);
case kHeapNumber:
- return isolate->factory()->NewNumber(number_, AllocationType::kOld);
+ return isolate->factory()->NewNumber<AllocationType::kOld>(number_);
case kString:
return string_->string();
case kSymbol:
diff --git a/deps/v8/src/ast/ast.h b/deps/v8/src/ast/ast.h
index ced9f775dd..d706dbc37f 100644
--- a/deps/v8/src/ast/ast.h
+++ b/deps/v8/src/ast/ast.h
@@ -724,11 +724,14 @@ class BreakStatement final : public JumpStatement {
class ReturnStatement final : public JumpStatement {
public:
- enum Type { kNormal, kAsyncReturn };
+ enum Type { kNormal, kAsyncReturn, kSyntheticAsyncReturn };
Expression* expression() const { return expression_; }
Type type() const { return TypeField::decode(bit_field_); }
- bool is_async_return() const { return type() == kAsyncReturn; }
+ bool is_async_return() const { return type() != kNormal; }
+ bool is_synthetic_async_return() const {
+ return type() == kSyntheticAsyncReturn;
+ }
int end_position() const { return end_position_; }
@@ -745,7 +748,7 @@ class ReturnStatement final : public JumpStatement {
Expression* expression_;
int end_position_;
- using TypeField = JumpStatement::NextBitField<Type, 1>;
+ using TypeField = JumpStatement::NextBitField<Type, 2>;
};
@@ -917,6 +920,10 @@ class TryCatchStatement final : public TryStatement {
outer_catch_prediction != HandlerTable::UNCAUGHT;
}
+ bool is_try_catch_for_async() {
+ return catch_prediction_ == HandlerTable::ASYNC_AWAIT;
+ }
+
private:
friend class AstNodeFactory;
@@ -2343,6 +2350,8 @@ class FunctionLiteral final : public Expression {
bool requires_brand_initialization() const;
+ bool private_name_lookup_skips_outer_class() const;
+
ProducedPreparseData* produced_preparse_data() const {
return produced_preparse_data_;
}
@@ -2481,10 +2490,10 @@ class ClassLiteral final : public Expression {
using Property = ClassLiteralProperty;
ClassScope* scope() const { return scope_; }
- Variable* class_variable() const { return class_variable_; }
Expression* extends() const { return extends_; }
FunctionLiteral* constructor() const { return constructor_; }
- ZonePtrList<Property>* properties() const { return properties_; }
+ ZonePtrList<Property>* public_members() const { return public_members_; }
+ ZonePtrList<Property>* private_members() const { return private_members_; }
int start_position() const { return position(); }
int end_position() const { return end_position_; }
bool has_name_static_property() const {
@@ -2497,6 +2506,9 @@ class ClassLiteral final : public Expression {
bool is_anonymous_expression() const {
return IsAnonymousExpression::decode(bit_field_);
}
+ bool has_private_methods() const {
+ return HasPrivateMethods::decode(bit_field_);
+ }
bool IsAnonymousFunctionDefinition() const {
return is_anonymous_expression();
}
@@ -2512,39 +2524,43 @@ class ClassLiteral final : public Expression {
private:
friend class AstNodeFactory;
- ClassLiteral(ClassScope* scope, Variable* class_variable, Expression* extends,
- FunctionLiteral* constructor, ZonePtrList<Property>* properties,
+ ClassLiteral(ClassScope* scope, Expression* extends,
+ FunctionLiteral* constructor,
+ ZonePtrList<Property>* public_members,
+ ZonePtrList<Property>* private_members,
FunctionLiteral* static_fields_initializer,
FunctionLiteral* instance_members_initializer_function,
int start_position, int end_position,
bool has_name_static_property, bool has_static_computed_names,
- bool is_anonymous)
+ bool is_anonymous, bool has_private_methods)
: Expression(start_position, kClassLiteral),
end_position_(end_position),
scope_(scope),
- class_variable_(class_variable),
extends_(extends),
constructor_(constructor),
- properties_(properties),
+ public_members_(public_members),
+ private_members_(private_members),
static_fields_initializer_(static_fields_initializer),
instance_members_initializer_function_(
instance_members_initializer_function) {
bit_field_ |= HasNameStaticProperty::encode(has_name_static_property) |
HasStaticComputedNames::encode(has_static_computed_names) |
- IsAnonymousExpression::encode(is_anonymous);
+ IsAnonymousExpression::encode(is_anonymous) |
+ HasPrivateMethods::encode(has_private_methods);
}
int end_position_;
ClassScope* scope_;
- Variable* class_variable_;
Expression* extends_;
FunctionLiteral* constructor_;
- ZonePtrList<Property>* properties_;
+ ZonePtrList<Property>* public_members_;
+ ZonePtrList<Property>* private_members_;
FunctionLiteral* static_fields_initializer_;
FunctionLiteral* instance_members_initializer_function_;
using HasNameStaticProperty = Expression::NextBitField<bool, 1>;
using HasStaticComputedNames = HasNameStaticProperty::Next<bool, 1>;
using IsAnonymousExpression = HasStaticComputedNames::Next<bool, 1>;
+ using HasPrivateMethods = IsAnonymousExpression::Next<bool, 1>;
};
@@ -2885,6 +2901,12 @@ class AstNodeFactory final {
expression, ReturnStatement::kAsyncReturn, pos, end_position);
}
+ ReturnStatement* NewSyntheticAsyncReturnStatement(
+ Expression* expression, int pos, int end_position = kNoSourcePosition) {
+ return new (zone_) ReturnStatement(
+ expression, ReturnStatement::kSyntheticAsyncReturn, pos, end_position);
+ }
+
WithStatement* NewWithStatement(Scope* scope,
Expression* expression,
Statement* statement,
@@ -3244,18 +3266,19 @@ class AstNodeFactory final {
}
ClassLiteral* NewClassLiteral(
- ClassScope* scope, Variable* variable, Expression* extends,
- FunctionLiteral* constructor,
- ZonePtrList<ClassLiteral::Property>* properties,
+ ClassScope* scope, Expression* extends, FunctionLiteral* constructor,
+ ZonePtrList<ClassLiteral::Property>* public_members,
+ ZonePtrList<ClassLiteral::Property>* private_members,
FunctionLiteral* static_fields_initializer,
FunctionLiteral* instance_members_initializer_function,
int start_position, int end_position, bool has_name_static_property,
- bool has_static_computed_names, bool is_anonymous) {
+ bool has_static_computed_names, bool is_anonymous,
+ bool has_private_methods) {
return new (zone_) ClassLiteral(
- scope, variable, extends, constructor, properties,
+ scope, extends, constructor, public_members, private_members,
static_fields_initializer, instance_members_initializer_function,
start_position, end_position, has_name_static_property,
- has_static_computed_names, is_anonymous);
+ has_static_computed_names, is_anonymous, has_private_methods);
}
NativeFunctionLiteral* NewNativeFunctionLiteral(const AstRawString* name,
diff --git a/deps/v8/src/ast/modules.cc b/deps/v8/src/ast/modules.cc
index dbd20f50a8..9c122fca86 100644
--- a/deps/v8/src/ast/modules.cc
+++ b/deps/v8/src/ast/modules.cc
@@ -84,11 +84,11 @@ void SourceTextModuleDescriptor::AddStarExport(
}
namespace {
-Handle<HeapObject> ToStringOrUndefined(Isolate* isolate,
- const AstRawString* s) {
- return (s == nullptr)
- ? Handle<HeapObject>::cast(isolate->factory()->undefined_value())
- : Handle<HeapObject>::cast(s->string());
+Handle<PrimitiveHeapObject> ToStringOrUndefined(Isolate* isolate,
+ const AstRawString* s) {
+ return (s == nullptr) ? Handle<PrimitiveHeapObject>::cast(
+ isolate->factory()->undefined_value())
+ : Handle<PrimitiveHeapObject>::cast(s->string());
}
} // namespace
diff --git a/deps/v8/src/ast/prettyprinter.cc b/deps/v8/src/ast/prettyprinter.cc
index 581517ee4e..5bf9362fb8 100644
--- a/deps/v8/src/ast/prettyprinter.cc
+++ b/deps/v8/src/ast/prettyprinter.cc
@@ -217,8 +217,11 @@ void CallPrinter::VisitFunctionLiteral(FunctionLiteral* node) {
void CallPrinter::VisitClassLiteral(ClassLiteral* node) {
if (node->extends()) Find(node->extends());
- for (int i = 0; i < node->properties()->length(); i++) {
- Find(node->properties()->at(i)->value());
+ for (int i = 0; i < node->public_members()->length(); i++) {
+ Find(node->public_members()->at(i)->value());
+ }
+ for (int i = 0; i < node->private_members()->length(); i++) {
+ Find(node->private_members()->at(i)->value());
}
}
@@ -1106,7 +1109,8 @@ void AstPrinter::VisitClassLiteral(ClassLiteral* node) {
PrintIndentedVisit("INSTANCE MEMBERS INITIALIZER",
node->instance_members_initializer_function());
}
- PrintClassProperties(node->properties());
+ PrintClassProperties(node->private_members());
+ PrintClassProperties(node->public_members());
}
void AstPrinter::VisitInitializeClassMembersStatement(
diff --git a/deps/v8/src/ast/prettyprinter.h b/deps/v8/src/ast/prettyprinter.h
index 322fd9fb14..795436d422 100644
--- a/deps/v8/src/ast/prettyprinter.h
+++ b/deps/v8/src/ast/prettyprinter.h
@@ -5,10 +5,12 @@
#ifndef V8_AST_PRETTYPRINTER_H_
#define V8_AST_PRETTYPRINTER_H_
+#include <memory>
+
#include "src/ast/ast.h"
#include "src/base/compiler-specific.h"
-#include "src/utils/allocation.h"
#include "src/objects/function-kind.h"
+#include "src/utils/allocation.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc
index c8002dd088..3e1f8f53ae 100644
--- a/deps/v8/src/ast/scopes.cc
+++ b/deps/v8/src/ast/scopes.cc
@@ -40,7 +40,7 @@ Variable* VariableMap::Declare(Zone* zone, Scope* scope,
VariableKind kind,
InitializationFlag initialization_flag,
MaybeAssignedFlag maybe_assigned_flag,
- bool* was_added) {
+ IsStaticFlag is_static_flag, bool* was_added) {
// AstRawStrings are unambiguous, i.e., the same string is always represented
// by the same AstRawString*.
// FIXME(marja): fix the type of Lookup.
@@ -51,8 +51,9 @@ Variable* VariableMap::Declare(Zone* zone, Scope* scope,
if (*was_added) {
// The variable has not been declared yet -> insert it.
DCHECK_EQ(name, p->key);
- Variable* variable = new (zone) Variable(
- scope, name, mode, kind, initialization_flag, maybe_assigned_flag);
+ Variable* variable =
+ new (zone) Variable(scope, name, mode, kind, initialization_flag,
+ maybe_assigned_flag, is_static_flag);
p->value = variable;
}
return reinterpret_cast<Variable*>(p->value);
@@ -102,6 +103,9 @@ Scope::Scope(Zone* zone, Scope* outer_scope, ScopeType scope_type)
DCHECK_NE(SCRIPT_SCOPE, scope_type);
SetDefaults();
set_language_mode(outer_scope->language_mode());
+ private_name_lookup_skips_outer_class_ =
+ outer_scope->is_class_scope() &&
+ outer_scope->AsClassScope()->IsParsingHeritage();
outer_scope_->AddInnerScope(this);
}
@@ -140,14 +144,18 @@ ModuleScope::ModuleScope(Isolate* isolate, Handle<ScopeInfo> scope_info,
set_language_mode(LanguageMode::kStrict);
}
-ClassScope::ClassScope(Zone* zone, Scope* outer_scope)
- : Scope(zone, outer_scope, CLASS_SCOPE) {
+ClassScope::ClassScope(Zone* zone, Scope* outer_scope, bool is_anonymous)
+ : Scope(zone, outer_scope, CLASS_SCOPE),
+ rare_data_and_is_parsing_heritage_(nullptr),
+ is_anonymous_class_(is_anonymous) {
set_language_mode(LanguageMode::kStrict);
}
-ClassScope::ClassScope(Zone* zone, AstValueFactory* ast_value_factory,
+ClassScope::ClassScope(Isolate* isolate, Zone* zone,
+ AstValueFactory* ast_value_factory,
Handle<ScopeInfo> scope_info)
- : Scope(zone, CLASS_SCOPE, scope_info) {
+ : Scope(zone, CLASS_SCOPE, scope_info),
+ rare_data_and_is_parsing_heritage_(nullptr) {
set_language_mode(LanguageMode::kStrict);
if (scope_info->HasClassBrand()) {
Variable* brand =
@@ -155,6 +163,25 @@ ClassScope::ClassScope(Zone* zone, AstValueFactory* ast_value_factory,
DCHECK_NOT_NULL(brand);
EnsureRareData()->brand = brand;
}
+
+ // If the class variable is context-allocated and its index is
+ // saved for deserialization, deserialize it.
+ if (scope_info->HasSavedClassVariableIndex()) {
+ int index = scope_info->SavedClassVariableContextLocalIndex();
+ DCHECK_GE(index, 0);
+ DCHECK_LT(index, scope_info->ContextLocalCount());
+ String name = scope_info->ContextLocalName(index);
+ DCHECK_EQ(scope_info->ContextLocalMode(index), VariableMode::kConst);
+ DCHECK_EQ(scope_info->ContextLocalInitFlag(index),
+ InitializationFlag::kNeedsInitialization);
+ DCHECK_EQ(scope_info->ContextLocalMaybeAssignedFlag(index),
+ MaybeAssignedFlag::kMaybeAssigned);
+ Variable* var = DeclareClassVariable(
+ ast_value_factory, ast_value_factory->GetString(handle(name, isolate)),
+ kNoSourcePosition);
+ var->AllocateTo(VariableLocation::CONTEXT,
+ Context::MIN_CONTEXT_SLOTS + index);
+ }
}
Scope::Scope(Zone* zone, ScopeType scope_type, Handle<ScopeInfo> scope_info)
@@ -171,6 +198,8 @@ Scope::Scope(Zone* zone, ScopeType scope_type, Handle<ScopeInfo> scope_info)
set_language_mode(scope_info->language_mode());
num_heap_slots_ = scope_info->ContextLength();
DCHECK_LE(Context::MIN_CONTEXT_SLOTS, num_heap_slots_);
+ private_name_lookup_skips_outer_class_ =
+ scope_info->PrivateNameLookupSkipsOuterClass();
// We don't really need to use the preparsed scope data; this is just to
// shorten the recursion in SetMustUsePreparseData.
must_use_preparsed_scope_data_ = true;
@@ -222,6 +251,7 @@ void DeclarationScope::SetDefaults() {
has_this_reference_ = false;
has_this_declaration_ =
(is_function_scope() && !is_arrow_scope()) || is_module_scope();
+ needs_private_name_context_chain_recalc_ = false;
has_rest_ = false;
receiver_ = nullptr;
new_target_ = nullptr;
@@ -270,6 +300,8 @@ void Scope::SetDefaults() {
is_declaration_scope_ = false;
+ private_name_lookup_skips_outer_class_ = false;
+
must_use_preparsed_scope_data_ = false;
}
@@ -343,8 +375,8 @@ Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone,
outer_scope = new (zone)
DeclarationScope(zone, EVAL_SCOPE, handle(scope_info, isolate));
} else if (scope_info.scope_type() == CLASS_SCOPE) {
- outer_scope = new (zone)
- ClassScope(zone, ast_value_factory, handle(scope_info, isolate));
+ outer_scope = new (zone) ClassScope(isolate, zone, ast_value_factory,
+ handle(scope_info, isolate));
} else if (scope_info.scope_type() == BLOCK_SCOPE) {
if (scope_info.is_declaration_scope()) {
outer_scope = new (zone)
@@ -546,7 +578,8 @@ bool DeclarationScope::Analyze(ParseInfo* info) {
if (scope->must_use_preparsed_scope_data_) {
DCHECK_EQ(scope->scope_type_, ScopeType::FUNCTION_SCOPE);
allow_deref.emplace();
- info->consumed_preparse_data()->RestoreScopeAllocationData(scope);
+ info->consumed_preparse_data()->RestoreScopeAllocationData(
+ scope, info->ast_value_factory());
}
if (!scope->AllocateVariables(info)) return false;
@@ -787,11 +820,13 @@ Variable* Scope::LookupInScopeInfo(const AstRawString* name, Scope* cache) {
VariableMode mode;
InitializationFlag init_flag;
MaybeAssignedFlag maybe_assigned_flag;
+ IsStaticFlag is_static_flag;
{
location = VariableLocation::CONTEXT;
index = ScopeInfo::ContextSlotIndex(*scope_info_, name_handle, &mode,
- &init_flag, &maybe_assigned_flag);
+ &init_flag, &maybe_assigned_flag,
+ &is_static_flag);
found = index >= 0;
}
@@ -816,9 +851,9 @@ Variable* Scope::LookupInScopeInfo(const AstRawString* name, Scope* cache) {
}
bool was_added;
- Variable* var =
- cache->variables_.Declare(zone(), this, name, mode, NORMAL_VARIABLE,
- init_flag, maybe_assigned_flag, &was_added);
+ Variable* var = cache->variables_.Declare(
+ zone(), this, name, mode, NORMAL_VARIABLE, init_flag, maybe_assigned_flag,
+ IsStaticFlag::kNotStatic, &was_added);
DCHECK(was_added);
var->AllocateTo(location, index);
return var;
@@ -1047,7 +1082,7 @@ Variable* DeclarationScope::DeclareDynamicGlobal(const AstRawString* name,
bool was_added;
return cache->variables_.Declare(
zone(), this, name, VariableMode::kDynamicGlobal, kind,
- kCreatedInitialized, kNotAssigned, &was_added);
+ kCreatedInitialized, kNotAssigned, IsStaticFlag::kNotStatic, &was_added);
// TODO(neis): Mark variable as maybe-assigned?
}
@@ -1165,9 +1200,9 @@ bool DeclarationScope::AllocateVariables(ParseInfo* info) {
// to ensure that UpdateNeedsHoleCheck() can detect import variables.
if (is_module_scope()) AsModuleScope()->AllocateModuleVariables();
- ClassScope* closest_class_scope = GetClassScope();
- if (closest_class_scope != nullptr &&
- !closest_class_scope->ResolvePrivateNames(info)) {
+ PrivateNameScopeIterator private_name_scope_iter(this);
+ if (!private_name_scope_iter.Done() &&
+ !private_name_scope_iter.GetScope()->ResolvePrivateNames(info)) {
DCHECK(info->pending_error_handler()->has_pending_error());
return false;
}
@@ -1177,7 +1212,7 @@ bool DeclarationScope::AllocateVariables(ParseInfo* info) {
return false;
}
- // // Don't allocate variables of preparsed scopes.
+ // Don't allocate variables of preparsed scopes.
if (!was_lazily_parsed()) AllocateVariablesRecursively();
return true;
@@ -1254,17 +1289,6 @@ int Scope::ContextChainLengthUntilOutermostSloppyEval() const {
return result;
}
-ClassScope* Scope::GetClassScope() {
- Scope* scope = this;
- while (scope != nullptr && !scope->is_class_scope()) {
- scope = scope->outer_scope();
- }
- if (scope != nullptr && scope->is_class_scope()) {
- return scope->AsClassScope();
- }
- return nullptr;
-}
-
DeclarationScope* Scope::GetDeclarationScope() {
Scope* scope = this;
while (!scope->is_declaration_scope()) {
@@ -1688,11 +1712,17 @@ void Scope::Print(int n) {
if (is_declaration_scope() && AsDeclarationScope()->NeedsHomeObject()) {
Indent(n1, "// scope needs home object\n");
}
+ if (private_name_lookup_skips_outer_class()) {
+ Indent(n1, "// scope skips outer class for #-names\n");
+ }
if (inner_scope_calls_eval_) Indent(n1, "// inner scope calls 'eval'\n");
if (is_declaration_scope()) {
DeclarationScope* scope = AsDeclarationScope();
if (scope->was_lazily_parsed()) Indent(n1, "// lazily parsed\n");
if (scope->ShouldEagerCompile()) Indent(n1, "// will be compiled\n");
+ if (scope->needs_private_name_context_chain_recalc()) {
+ Indent(n1, "// needs #-name context chain recalc\n");
+ }
}
if (num_stack_slots_ > 0) {
Indent(n1, "// ");
@@ -1729,15 +1759,24 @@ void Scope::Print(int n) {
if (is_class_scope()) {
ClassScope* class_scope = AsClassScope();
- if (class_scope->rare_data_ != nullptr) {
+ if (class_scope->GetRareData() != nullptr) {
PrintMap(n1, "// private name vars:\n",
- &(class_scope->rare_data_->private_name_map), true, function);
+ &(class_scope->GetRareData()->private_name_map), true, function);
Variable* brand = class_scope->brand();
if (brand != nullptr) {
Indent(n1, "// brand var:\n");
PrintVar(n1, brand);
}
}
+ if (class_scope->class_variable() != nullptr) {
+ Indent(n1, "// class var");
+ PrintF("%s%s:\n",
+ class_scope->class_variable()->is_used() ? ", used" : ", unused",
+ class_scope->should_save_class_variable_index()
+ ? ", index saved"
+ : ", index not saved");
+ PrintVar(n1, class_scope->class_variable());
+ }
}
// Print inner scopes (disable by providing negative n).
@@ -1780,9 +1819,9 @@ Variable* Scope::NonLocal(const AstRawString* name, VariableMode mode) {
// Declare a new non-local.
DCHECK(IsDynamicVariableMode(mode));
bool was_added;
- Variable* var =
- variables_.Declare(zone(), this, name, mode, NORMAL_VARIABLE,
- kCreatedInitialized, kNotAssigned, &was_added);
+ Variable* var = variables_.Declare(zone(), this, name, mode, NORMAL_VARIABLE,
+ kCreatedInitialized, kNotAssigned,
+ IsStaticFlag::kNotStatic, &was_added);
// Allocate it by giving it a dynamic lookup.
var->AllocateTo(VariableLocation::LOOKUP, -1);
return var;
@@ -2103,8 +2142,7 @@ bool Scope::MustAllocateInContext(Variable* var) {
if (mode == VariableMode::kTemporary) return false;
if (is_catch_scope()) return true;
if (is_script_scope() || is_eval_scope()) {
- if (IsLexicalVariableMode(mode) ||
- IsPrivateMethodOrAccessorVariableMode(mode)) {
+ if (IsLexicalVariableMode(mode)) {
return true;
}
}
@@ -2308,6 +2346,47 @@ void Scope::AllocateScopeInfosRecursively(Isolate* isolate,
}
}
+void DeclarationScope::RecalcPrivateNameContextChain() {
+ // The outermost scope in a class heritage expression is marked to skip the
+ // class scope during private name resolution. It is possible, however, that
+ // either the class scope won't require a Context and ScopeInfo, or the
+ // outermost scope in the heritage position won't. Simply copying the bit from
+ // full parse into the ScopeInfo will break lazy compilation. In the former
+ // case the scope that is marked to skip its outer scope will incorrectly skip
+ // a different class scope than the one we intended to skip. In the latter
+ // case variables resolved through an inner scope will incorrectly check the
+ // class scope since we lost the skip bit from the outermost heritage scope.
+ //
+ // This method fixes both cases by, in outermost to innermost order, copying
+ // the value of the skip bit from outer scopes that don't require a Context.
+ DCHECK(needs_private_name_context_chain_recalc_);
+ this->ForEach([](Scope* scope) {
+ Scope* outer = scope->outer_scope();
+ if (!outer) return Iteration::kDescend;
+ if (!outer->NeedsContext()) {
+ scope->private_name_lookup_skips_outer_class_ =
+ outer->private_name_lookup_skips_outer_class();
+ }
+ if (!scope->is_function_scope() ||
+ scope->AsDeclarationScope()->ShouldEagerCompile()) {
+ return Iteration::kDescend;
+ }
+ return Iteration::kContinue;
+ });
+}
+
+void DeclarationScope::RecordNeedsPrivateNameContextChainRecalc() {
+ DCHECK_EQ(GetClosureScope(), this);
+ DeclarationScope* scope;
+ for (scope = this; scope != nullptr;
+ scope = scope->outer_scope() != nullptr
+ ? scope->outer_scope()->GetClosureScope()
+ : nullptr) {
+ if (scope->needs_private_name_context_chain_recalc_) return;
+ scope->needs_private_name_context_chain_recalc_ = true;
+ }
+}
+
// static
void DeclarationScope::AllocateScopeInfos(ParseInfo* info, Isolate* isolate) {
DeclarationScope* scope = info->literal()->scope();
@@ -2318,6 +2397,9 @@ void DeclarationScope::AllocateScopeInfos(ParseInfo* info, Isolate* isolate) {
outer_scope = scope->outer_scope_->scope_info_;
}
+ if (scope->needs_private_name_context_chain_recalc()) {
+ scope->RecalcPrivateNameContextChain();
+ }
scope->AllocateScopeInfosRecursively(isolate, outer_scope);
// The debugger expects all shared function infos to contain a scope info.
@@ -2359,14 +2441,20 @@ bool IsComplementaryAccessorPair(VariableMode a, VariableMode b) {
}
Variable* ClassScope::DeclarePrivateName(const AstRawString* name,
- VariableMode mode, bool* was_added) {
+ VariableMode mode,
+ IsStaticFlag is_static_flag,
+ bool* was_added) {
Variable* result = EnsureRareData()->private_name_map.Declare(
zone(), this, name, mode, NORMAL_VARIABLE,
InitializationFlag::kNeedsInitialization,
- MaybeAssignedFlag::kMaybeAssigned, was_added);
+ MaybeAssignedFlag::kMaybeAssigned, is_static_flag, was_added);
if (*was_added) {
locals_.Add(result);
- } else if (IsComplementaryAccessorPair(result->mode(), mode)) {
+ has_static_private_methods_ |=
+ (result->is_static() &&
+ IsPrivateMethodOrAccessorVariableMode(result->mode()));
+ } else if (IsComplementaryAccessorPair(result->mode(), mode) &&
+ result->is_static_flag() == is_static_flag) {
*was_added = true;
result->set_mode(VariableMode::kPrivateGetterAndSetter);
}
@@ -2375,38 +2463,42 @@ Variable* ClassScope::DeclarePrivateName(const AstRawString* name,
}
Variable* ClassScope::LookupLocalPrivateName(const AstRawString* name) {
- if (rare_data_ == nullptr) {
+ RareData* rare_data = GetRareData();
+ if (rare_data == nullptr) {
return nullptr;
}
- return rare_data_->private_name_map.Lookup(name);
+ return rare_data->private_name_map.Lookup(name);
}
UnresolvedList::Iterator ClassScope::GetUnresolvedPrivateNameTail() {
- if (rare_data_ == nullptr) {
+ RareData* rare_data = GetRareData();
+ if (rare_data == nullptr) {
return UnresolvedList::Iterator();
}
- return rare_data_->unresolved_private_names.end();
+ return rare_data->unresolved_private_names.end();
}
void ClassScope::ResetUnresolvedPrivateNameTail(UnresolvedList::Iterator tail) {
- if (rare_data_ == nullptr ||
- rare_data_->unresolved_private_names.end() == tail) {
+ RareData* rare_data = GetRareData();
+ if (rare_data == nullptr ||
+ rare_data->unresolved_private_names.end() == tail) {
return;
}
bool tail_is_empty = tail == UnresolvedList::Iterator();
if (tail_is_empty) {
// If the saved tail is empty, the list used to be empty, so clear it.
- rare_data_->unresolved_private_names.Clear();
+ rare_data->unresolved_private_names.Clear();
} else {
- rare_data_->unresolved_private_names.Rewind(tail);
+ rare_data->unresolved_private_names.Rewind(tail);
}
}
void ClassScope::MigrateUnresolvedPrivateNameTail(
AstNodeFactory* ast_node_factory, UnresolvedList::Iterator tail) {
- if (rare_data_ == nullptr ||
- rare_data_->unresolved_private_names.end() == tail) {
+ RareData* rare_data = GetRareData();
+ if (rare_data == nullptr ||
+ rare_data->unresolved_private_names.end() == tail) {
return;
}
UnresolvedList migrated_names;
@@ -2415,9 +2507,9 @@ void ClassScope::MigrateUnresolvedPrivateNameTail(
// migrate everything after the head.
bool tail_is_empty = tail == UnresolvedList::Iterator();
UnresolvedList::Iterator it =
- tail_is_empty ? rare_data_->unresolved_private_names.begin() : tail;
+ tail_is_empty ? rare_data->unresolved_private_names.begin() : tail;
- for (; it != rare_data_->unresolved_private_names.end(); ++it) {
+ for (; it != rare_data->unresolved_private_names.end(); ++it) {
VariableProxy* proxy = *it;
VariableProxy* copy = ast_node_factory->CopyVariableProxy(proxy);
migrated_names.Add(copy);
@@ -2425,20 +2517,11 @@ void ClassScope::MigrateUnresolvedPrivateNameTail(
// Replace with the migrated copies.
if (tail_is_empty) {
- rare_data_->unresolved_private_names.Clear();
+ rare_data->unresolved_private_names.Clear();
} else {
- rare_data_->unresolved_private_names.Rewind(tail);
+ rare_data->unresolved_private_names.Rewind(tail);
}
- rare_data_->unresolved_private_names.Append(std::move(migrated_names));
-}
-
-void ClassScope::AddUnresolvedPrivateName(VariableProxy* proxy) {
- // During a reparse, already_resolved_ may be true here, because
- // the class scope is deserialized while the function scope inside may
- // be new.
- DCHECK(!proxy->is_resolved());
- DCHECK(proxy->IsPrivateName());
- EnsureRareData()->unresolved_private_names.Add(proxy);
+ rare_data->unresolved_private_names.Append(std::move(migrated_names));
}
Variable* ClassScope::LookupPrivateNameInScopeInfo(const AstRawString* name) {
@@ -2450,8 +2533,10 @@ Variable* ClassScope::LookupPrivateNameInScopeInfo(const AstRawString* name) {
VariableMode mode;
InitializationFlag init_flag;
MaybeAssignedFlag maybe_assigned_flag;
- int index = ScopeInfo::ContextSlotIndex(*scope_info_, name_handle, &mode,
- &init_flag, &maybe_assigned_flag);
+ IsStaticFlag is_static_flag;
+ int index =
+ ScopeInfo::ContextSlotIndex(*scope_info_, name_handle, &mode, &init_flag,
+ &maybe_assigned_flag, &is_static_flag);
if (index < 0) {
return nullptr;
}
@@ -2463,7 +2548,7 @@ Variable* ClassScope::LookupPrivateNameInScopeInfo(const AstRawString* name) {
// Add the found private name to the map to speed up subsequent
// lookups for the same name.
bool was_added;
- Variable* var = DeclarePrivateName(name, mode, &was_added);
+ Variable* var = DeclarePrivateName(name, mode, is_static_flag, &was_added);
DCHECK(was_added);
var->AllocateTo(VariableLocation::CONTEXT, index);
return var;
@@ -2472,15 +2557,14 @@ Variable* ClassScope::LookupPrivateNameInScopeInfo(const AstRawString* name) {
Variable* ClassScope::LookupPrivateName(VariableProxy* proxy) {
DCHECK(!proxy->is_resolved());
- for (Scope* scope = this; !scope->is_script_scope();
- scope = scope->outer_scope_) {
- if (!scope->is_class_scope()) continue; // Only search in class scopes
- ClassScope* class_scope = scope->AsClassScope();
+ for (PrivateNameScopeIterator scope_iter(this); !scope_iter.Done();
+ scope_iter.Next()) {
+ ClassScope* scope = scope_iter.GetScope();
// Try finding it in the private name map first, if it can't be found,
// try the deseralized scope info.
- Variable* var = class_scope->LookupLocalPrivateName(proxy->raw_name());
- if (var == nullptr && !class_scope->scope_info_.is_null()) {
- var = class_scope->LookupPrivateNameInScopeInfo(proxy->raw_name());
+ Variable* var = scope->LookupLocalPrivateName(proxy->raw_name());
+ if (var == nullptr && !scope->scope_info_.is_null()) {
+ var = scope->LookupPrivateNameInScopeInfo(proxy->raw_name());
}
if (var != nullptr) {
return var;
@@ -2490,22 +2574,24 @@ Variable* ClassScope::LookupPrivateName(VariableProxy* proxy) {
}
bool ClassScope::ResolvePrivateNames(ParseInfo* info) {
- if (rare_data_ == nullptr ||
- rare_data_->unresolved_private_names.is_empty()) {
+ RareData* rare_data = GetRareData();
+ if (rare_data == nullptr || rare_data->unresolved_private_names.is_empty()) {
return true;
}
- UnresolvedList& list = rare_data_->unresolved_private_names;
+ UnresolvedList& list = rare_data->unresolved_private_names;
for (VariableProxy* proxy : list) {
Variable* var = LookupPrivateName(proxy);
if (var == nullptr) {
+ // It's only possible to fail to resolve private names here if
+ // this is at the top level or the private name is accessed through eval.
+ DCHECK(info->is_eval() || outer_scope_->is_script_scope());
Scanner::Location loc = proxy->location();
info->pending_error_handler()->ReportMessageAt(
loc.beg_pos, loc.end_pos,
MessageTemplate::kInvalidPrivateFieldResolution, proxy->raw_name());
return false;
} else {
- var->set_is_used();
proxy->BindTo(var);
}
}
@@ -2517,20 +2603,20 @@ bool ClassScope::ResolvePrivateNames(ParseInfo* info) {
}
VariableProxy* ClassScope::ResolvePrivateNamesPartially() {
- if (rare_data_ == nullptr ||
- rare_data_->unresolved_private_names.is_empty()) {
+ RareData* rare_data = GetRareData();
+ if (rare_data == nullptr || rare_data->unresolved_private_names.is_empty()) {
return nullptr;
}
- ClassScope* outer_class_scope =
- outer_scope_ == nullptr ? nullptr : outer_scope_->GetClassScope();
- UnresolvedList& unresolved = rare_data_->unresolved_private_names;
- bool has_private_names = rare_data_->private_name_map.capacity() > 0;
+ PrivateNameScopeIterator private_name_scope_iter(this);
+ private_name_scope_iter.Next();
+ UnresolvedList& unresolved = rare_data->unresolved_private_names;
+ bool has_private_names = rare_data->private_name_map.capacity() > 0;
// If the class itself does not have private names, nor does it have
- // an outer class scope, then we are certain any private name access
+ // an outer private name scope, then we are certain any private name access
// inside cannot be resolved.
- if (!has_private_names && outer_class_scope == nullptr &&
+ if (!has_private_names && private_name_scope_iter.Done() &&
!unresolved.is_empty()) {
return unresolved.first();
}
@@ -2548,21 +2634,27 @@ VariableProxy* ClassScope::ResolvePrivateNamesPartially() {
if (var != nullptr) {
var->set_is_used();
proxy->BindTo(var);
+ // If the variable being accessed is a static private method, we need to
+ // save the class variable in the context to check that the receiver is
+ // the class during runtime.
+ has_explicit_static_private_methods_access_ |=
+ (var->is_static() &&
+ IsPrivateMethodOrAccessorVariableMode(var->mode()));
}
}
// If the current scope does not have declared private names,
// try looking from the outer class scope later.
if (var == nullptr) {
- // There's no outer class scope so we are certain that the variable
+ // There's no outer private name scope so we are certain that the variable
// cannot be resolved later.
- if (outer_class_scope == nullptr) {
+ if (private_name_scope_iter.Done()) {
return proxy;
}
- // The private name may be found later in the outer class scope,
- // so push it to the outer sopce.
- outer_class_scope->AddUnresolvedPrivateName(proxy);
+ // The private name may be found later in the outer private name scope, so
+ // push it to the outer sopce.
+ private_name_scope_iter.AddUnresolvedPrivateName(proxy);
}
proxy = next;
@@ -2573,14 +2665,16 @@ VariableProxy* ClassScope::ResolvePrivateNamesPartially() {
}
Variable* ClassScope::DeclareBrandVariable(AstValueFactory* ast_value_factory,
+ IsStaticFlag is_static_flag,
int class_token_pos) {
- DCHECK_IMPLIES(rare_data_ != nullptr, rare_data_->brand == nullptr);
+ DCHECK_IMPLIES(GetRareData() != nullptr, GetRareData()->brand == nullptr);
bool was_added;
Variable* brand = Declare(zone(), ast_value_factory->dot_brand_string(),
VariableMode::kConst, NORMAL_VARIABLE,
InitializationFlag::kNeedsInitialization,
MaybeAssignedFlag::kMaybeAssigned, &was_added);
DCHECK(was_added);
+ brand->set_is_static_flag(is_static_flag);
brand->ForceContextAllocation();
brand->set_is_used();
EnsureRareData()->brand = brand;
@@ -2588,5 +2682,61 @@ Variable* ClassScope::DeclareBrandVariable(AstValueFactory* ast_value_factory,
return brand;
}
+Variable* ClassScope::DeclareClassVariable(AstValueFactory* ast_value_factory,
+ const AstRawString* name,
+ int class_token_pos) {
+ DCHECK_NULL(class_variable_);
+ bool was_added;
+ class_variable_ =
+ Declare(zone(), name == nullptr ? ast_value_factory->dot_string() : name,
+ VariableMode::kConst, NORMAL_VARIABLE,
+ InitializationFlag::kNeedsInitialization,
+ MaybeAssignedFlag::kMaybeAssigned, &was_added);
+ DCHECK(was_added);
+ class_variable_->set_initializer_position(class_token_pos);
+ return class_variable_;
+}
+
+PrivateNameScopeIterator::PrivateNameScopeIterator(Scope* start)
+ : start_scope_(start), current_scope_(start) {
+ if (!start->is_class_scope() || start->AsClassScope()->IsParsingHeritage()) {
+ Next();
+ }
+}
+
+void PrivateNameScopeIterator::Next() {
+ DCHECK(!Done());
+ Scope* inner = current_scope_;
+ Scope* scope = inner->outer_scope();
+ while (scope != nullptr) {
+ if (scope->is_class_scope()) {
+ if (!inner->private_name_lookup_skips_outer_class()) {
+ current_scope_ = scope;
+ return;
+ }
+ skipped_any_scopes_ = true;
+ }
+ inner = scope;
+ scope = scope->outer_scope();
+ }
+ current_scope_ = nullptr;
+}
+
+void PrivateNameScopeIterator::AddUnresolvedPrivateName(VariableProxy* proxy) {
+ // During a reparse, current_scope_->already_resolved_ may be true here,
+ // because the class scope is deserialized while the function scope inside may
+ // be new.
+ DCHECK(!proxy->is_resolved());
+ DCHECK(proxy->IsPrivateName());
+ GetScope()->EnsureRareData()->unresolved_private_names.Add(proxy);
+ // Any closure scope that contain uses of private names that skips over a
+ // class scope due to heritage expressions need private name context chain
+ // recalculation, since not all scopes require a Context or ScopeInfo. See
+ // comment in DeclarationScope::RecalcPrivateNameContextChain.
+ if (V8_UNLIKELY(skipped_any_scopes_)) {
+ start_scope_->GetClosureScope()->RecordNeedsPrivateNameContextChainRecalc();
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ast/scopes.h b/deps/v8/src/ast/scopes.h
index 73e6e8fd89..30838db28b 100644
--- a/deps/v8/src/ast/scopes.h
+++ b/deps/v8/src/ast/scopes.h
@@ -44,7 +44,7 @@ class VariableMap : public ZoneHashMap {
VariableMode mode, VariableKind kind,
InitializationFlag initialization_flag,
MaybeAssignedFlag maybe_assigned_flag,
- bool* was_added);
+ IsStaticFlag is_static_flag, bool* was_added);
V8_EXPORT_PRIVATE Variable* Lookup(const AstRawString* name);
void Remove(Variable* var);
@@ -360,6 +360,9 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
bool is_class_scope() const { return scope_type_ == CLASS_SCOPE; }
bool inner_scope_calls_eval() const { return inner_scope_calls_eval_; }
+ bool private_name_lookup_skips_outer_class() const {
+ return private_name_lookup_skips_outer_class_;
+ }
bool IsAsmModule() const;
// Returns true if this scope or any inner scopes that might be eagerly
// compiled are asm modules.
@@ -464,10 +467,6 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// sloppy eval call. One if this->sloppy_eval_can_extend_vars().
int ContextChainLengthUntilOutermostSloppyEval() const;
- // Find the closest class scope in the current scope and outer scopes. If no
- // class scope is found, nullptr will be returned.
- ClassScope* GetClassScope();
-
// Find the first function, script, eval or (declaration) block scope. This is
// the scope where var declarations will be hoisted to in the implementation.
DeclarationScope* GetDeclarationScope();
@@ -557,9 +556,10 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
Variable* Declare(Zone* zone, const AstRawString* name, VariableMode mode,
VariableKind kind, InitializationFlag initialization_flag,
MaybeAssignedFlag maybe_assigned_flag, bool* was_added) {
- Variable* result =
- variables_.Declare(zone, this, name, mode, kind, initialization_flag,
- maybe_assigned_flag, was_added);
+ // Static variables can only be declared using ClassScope methods.
+ Variable* result = variables_.Declare(
+ zone, this, name, mode, kind, initialization_flag, maybe_assigned_flag,
+ IsStaticFlag::kNotStatic, was_added);
if (*was_added) locals_.Add(result);
return result;
}
@@ -713,7 +713,8 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// This scope's declarations might not be executed in order (e.g., switch).
bool scope_nonlinear_ : 1;
bool is_hidden_ : 1;
- // Temporary workaround that allows masking of 'this' in debug-evalute scopes.
+ // Temporary workaround that allows masking of 'this' in debug-evaluate
+ // scopes.
bool is_debug_evaluate_scope_ : 1;
// True if one of the inner scopes or the scope itself calls eval.
@@ -723,6 +724,11 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// True if it holds 'var' declarations.
bool is_declaration_scope_ : 1;
+ // True if the outer scope is a class scope and should be skipped when
+ // resolving private names, i.e. if the scope is in a class heritage
+ // expression.
+ bool private_name_lookup_skips_outer_class_ : 1;
+
bool must_use_preparsed_scope_data_ : 1;
};
@@ -859,6 +865,11 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
return IsClassMembersInitializerFunction(function_kind());
}
+ void set_is_async_module() {
+ DCHECK(IsModule(function_kind_));
+ function_kind_ = kAsyncModule;
+ }
+
void DeclareThis(AstValueFactory* ast_value_factory);
void DeclareArguments(AstValueFactory* ast_value_factory);
void DeclareDefaultFunctionVariables(AstValueFactory* ast_value_factory);
@@ -1082,6 +1093,11 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
GetReceiverScope()->receiver()->ForceContextAllocation();
}
+ bool needs_private_name_context_chain_recalc() const {
+ return needs_private_name_context_chain_recalc_;
+ }
+ void RecordNeedsPrivateNameContextChainRecalc();
+
private:
V8_INLINE void AllocateParameter(Variable* var, int index);
@@ -1099,6 +1115,12 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
void SetDefaults();
+ // Recalculate the private name context chain from the existing skip bit in
+ // preparation for AllocateScopeInfos. Because the private name scope is
+ // implemented with a skip bit for scopes in heritage position, that bit may
+ // need to be recomputed due scopes that do not need contexts.
+ void RecalcPrivateNameContextChain();
+
bool has_simple_parameters_ : 1;
// This scope contains an "use asm" annotation.
bool is_asm_module_ : 1;
@@ -1120,9 +1142,10 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
bool has_checked_syntax_ : 1;
bool has_this_reference_ : 1;
bool has_this_declaration_ : 1;
+ bool needs_private_name_context_chain_recalc_ : 1;
// If the scope is a function scope, this is the function kind.
- const FunctionKind function_kind_;
+ FunctionKind function_kind_;
int num_parameters_ = 0;
@@ -1220,17 +1243,26 @@ class ModuleScope final : public DeclarationScope {
class V8_EXPORT_PRIVATE ClassScope : public Scope {
public:
- ClassScope(Zone* zone, Scope* outer_scope);
+ ClassScope(Zone* zone, Scope* outer_scope, bool is_anonymous);
// Deserialization.
- ClassScope(Zone* zone, AstValueFactory* ast_value_factory,
+ ClassScope(Isolate* isolate, Zone* zone, AstValueFactory* ast_value_factory,
Handle<ScopeInfo> scope_info);
+ struct HeritageParsingScope {
+ explicit HeritageParsingScope(ClassScope* class_scope)
+ : class_scope_(class_scope) {
+ class_scope_->SetIsParsingHeritage(true);
+ }
+ ~HeritageParsingScope() { class_scope_->SetIsParsingHeritage(false); }
+
+ private:
+ ClassScope* class_scope_;
+ };
+
// Declare a private name in the private name map and add it to the
// local variables of this scope.
Variable* DeclarePrivateName(const AstRawString* name, VariableMode mode,
- bool* was_added);
-
- void AddUnresolvedPrivateName(VariableProxy* proxy);
+ IsStaticFlag is_static_flag, bool* was_added);
// Try resolving all unresolved private names found in the current scope.
// Called from DeclarationScope::AllocateVariables() when reparsing a
@@ -1261,13 +1293,53 @@ class V8_EXPORT_PRIVATE ClassScope : public Scope {
void MigrateUnresolvedPrivateNameTail(AstNodeFactory* ast_node_factory,
UnresolvedList::Iterator tail);
Variable* DeclareBrandVariable(AstValueFactory* ast_value_factory,
+ IsStaticFlag is_static_flag,
int class_token_pos);
+
+ Variable* DeclareClassVariable(AstValueFactory* ast_value_factory,
+ const AstRawString* name, int class_token_pos);
+
Variable* brand() {
- return rare_data_ == nullptr ? nullptr : rare_data_->brand;
+ return GetRareData() == nullptr ? nullptr : GetRareData()->brand;
+ }
+
+ Variable* class_variable() { return class_variable_; }
+
+ V8_INLINE bool IsParsingHeritage() {
+ return rare_data_and_is_parsing_heritage_.GetPayload();
+ }
+
+ // Only maintained when the scope is parsed, not when the scope is
+ // deserialized.
+ bool has_static_private_methods() const {
+ return has_static_private_methods_;
+ }
+
+ // Returns whether the index of class variable of this class scope should be
+ // recorded in the ScopeInfo.
+ // If any inner scope accesses static private names directly, the class
+ // variable will be forced to be context-allocated.
+ // The inner scope may also calls eval which may results in access to
+ // static private names.
+ // Only maintained when the scope is parsed.
+ bool should_save_class_variable_index() const {
+ return should_save_class_variable_index_ ||
+ has_explicit_static_private_methods_access_ ||
+ (has_static_private_methods_ && inner_scope_calls_eval_);
+ }
+
+ // Only maintained when the scope is parsed.
+ bool is_anonymous_class() const { return is_anonymous_class_; }
+
+ // Overriden during reparsing
+ void set_should_save_class_variable_index() {
+ should_save_class_variable_index_ = true;
}
private:
friend class Scope;
+ friend class PrivateNameScopeIterator;
+
// Find the private name declared in the private name map first,
// if it cannot be found there, try scope info if there is any.
// Returns nullptr if it cannot be found.
@@ -1285,14 +1357,53 @@ class V8_EXPORT_PRIVATE ClassScope : public Scope {
Variable* brand = nullptr;
};
+ V8_INLINE RareData* GetRareData() {
+ return rare_data_and_is_parsing_heritage_.GetPointer();
+ }
V8_INLINE RareData* EnsureRareData() {
- if (rare_data_ == nullptr) {
- rare_data_ = new (zone_) RareData(zone_);
+ if (GetRareData() == nullptr) {
+ rare_data_and_is_parsing_heritage_.SetPointer(new (zone_)
+ RareData(zone_));
}
- return rare_data_;
+ return GetRareData();
+ }
+ V8_INLINE void SetIsParsingHeritage(bool v) {
+ rare_data_and_is_parsing_heritage_.SetPayload(v);
}
- RareData* rare_data_ = nullptr;
+ PointerWithPayload<RareData, bool, 1> rare_data_and_is_parsing_heritage_;
+ Variable* class_variable_ = nullptr;
+ // These are only maintained when the scope is parsed, not when the
+ // scope is deserialized.
+ bool has_static_private_methods_ = false;
+ bool has_explicit_static_private_methods_access_ = false;
+ bool is_anonymous_class_ = false;
+ // This is only maintained during reparsing, restored from the
+ // preparsed data.
+ bool should_save_class_variable_index_ = false;
+};
+
+// Iterate over the private name scope chain. The iteration proceeds from the
+// innermost private name scope outwards.
+class PrivateNameScopeIterator {
+ public:
+ explicit PrivateNameScopeIterator(Scope* start);
+
+ bool Done() const { return current_scope_ == nullptr; }
+ void Next();
+
+ // Add an unresolved private name to the current scope.
+ void AddUnresolvedPrivateName(VariableProxy* proxy);
+
+ ClassScope* GetScope() const {
+ DCHECK(!Done());
+ return current_scope_->AsClassScope();
+ }
+
+ private:
+ bool skipped_any_scopes_ = false;
+ Scope* start_scope_;
+ Scope* current_scope_;
};
} // namespace internal
diff --git a/deps/v8/src/ast/source-range-ast-visitor.cc b/deps/v8/src/ast/source-range-ast-visitor.cc
index 2fcf151999..7470991615 100644
--- a/deps/v8/src/ast/source-range-ast-visitor.cc
+++ b/deps/v8/src/ast/source-range-ast-visitor.cc
@@ -39,6 +39,11 @@ void SourceRangeAstVisitor::VisitFunctionLiteral(FunctionLiteral* expr) {
MaybeRemoveLastContinuationRange(stmts);
}
+void SourceRangeAstVisitor::VisitTryCatchStatement(TryCatchStatement* stmt) {
+ AstTraversalVisitor::VisitTryCatchStatement(stmt);
+ MaybeRemoveContinuationRangeOfAsyncReturn(stmt);
+}
+
bool SourceRangeAstVisitor::VisitNode(AstNode* node) {
AstNodeSourceRanges* range = source_range_map_->Find(node);
@@ -59,11 +64,8 @@ bool SourceRangeAstVisitor::VisitNode(AstNode* node) {
return true;
}
-void SourceRangeAstVisitor::MaybeRemoveLastContinuationRange(
- ZonePtrList<Statement>* statements) {
- if (statements->is_empty()) return;
-
- Statement* last_statement = statements->last();
+void SourceRangeAstVisitor::MaybeRemoveContinuationRange(
+ Statement* last_statement) {
AstNodeSourceRanges* last_range = nullptr;
if (last_statement->IsExpressionStatement() &&
@@ -83,5 +85,38 @@ void SourceRangeAstVisitor::MaybeRemoveLastContinuationRange(
}
}
+void SourceRangeAstVisitor::MaybeRemoveLastContinuationRange(
+ ZonePtrList<Statement>* statements) {
+ if (statements->is_empty()) return;
+ MaybeRemoveContinuationRange(statements->last());
+}
+
+namespace {
+Statement* FindLastNonSyntheticReturn(ZonePtrList<Statement>* statements) {
+ for (int i = statements->length() - 1; i >= 0; --i) {
+ Statement* stmt = statements->at(i);
+ if (!stmt->IsReturnStatement()) break;
+ if (stmt->AsReturnStatement()->is_synthetic_async_return()) continue;
+ return stmt;
+ }
+ return nullptr;
+}
+} // namespace
+
+void SourceRangeAstVisitor::MaybeRemoveContinuationRangeOfAsyncReturn(
+ TryCatchStatement* try_catch_stmt) {
+ // Detect try-catch inserted by NewTryCatchStatementForAsyncAwait in the
+ // parser (issued for async functions, including async generators), and
+ // remove the continuation ranges of return statements corresponding to
+ // returns at function end in the untransformed source.
+ if (try_catch_stmt->is_try_catch_for_async()) {
+ Statement* last_non_synthetic =
+ FindLastNonSyntheticReturn(try_catch_stmt->try_block()->statements());
+ if (last_non_synthetic) {
+ MaybeRemoveContinuationRange(last_non_synthetic);
+ }
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ast/source-range-ast-visitor.h b/deps/v8/src/ast/source-range-ast-visitor.h
index 4ba5feb2d2..8b6b99c434 100644
--- a/deps/v8/src/ast/source-range-ast-visitor.h
+++ b/deps/v8/src/ast/source-range-ast-visitor.h
@@ -37,8 +37,11 @@ class SourceRangeAstVisitor final
void VisitSwitchStatement(SwitchStatement* stmt);
void VisitFunctionLiteral(FunctionLiteral* expr);
bool VisitNode(AstNode* node);
+ void VisitTryCatchStatement(TryCatchStatement* stmt);
+ void MaybeRemoveContinuationRange(Statement* last_statement);
void MaybeRemoveLastContinuationRange(ZonePtrList<Statement>* stmts);
+ void MaybeRemoveContinuationRangeOfAsyncReturn(TryCatchStatement* stmt);
SourceRangeMap* source_range_map_ = nullptr;
std::unordered_set<int> continuation_positions_;
diff --git a/deps/v8/src/ast/variables.h b/deps/v8/src/ast/variables.h
index 1ff6f9f422..7be99adc7c 100644
--- a/deps/v8/src/ast/variables.h
+++ b/deps/v8/src/ast/variables.h
@@ -21,7 +21,8 @@ class Variable final : public ZoneObject {
public:
Variable(Scope* scope, const AstRawString* name, VariableMode mode,
VariableKind kind, InitializationFlag initialization_flag,
- MaybeAssignedFlag maybe_assigned_flag = kNotAssigned)
+ MaybeAssignedFlag maybe_assigned_flag = kNotAssigned,
+ IsStaticFlag is_static_flag = IsStaticFlag::kNotStatic)
: scope_(scope),
name_(name),
local_if_not_shadowed_(nullptr),
@@ -35,10 +36,13 @@ class Variable final : public ZoneObject {
ForceContextAllocationField::encode(false) |
ForceHoleInitializationField::encode(false) |
LocationField::encode(VariableLocation::UNALLOCATED) |
- VariableKindField::encode(kind)) {
+ VariableKindField::encode(kind) |
+ IsStaticFlagField::encode(is_static_flag)) {
// Var declared variables never need initialization.
DCHECK(!(mode == VariableMode::kVar &&
initialization_flag == kNeedsInitialization));
+ DCHECK_IMPLIES(is_static_flag == IsStaticFlag::kStatic,
+ IsConstVariableMode(mode));
}
explicit Variable(Variable* other);
@@ -59,6 +63,14 @@ class Variable final : public ZoneObject {
void set_mode(VariableMode mode) {
bit_field_ = VariableModeField::update(bit_field_, mode);
}
+ void set_is_static_flag(IsStaticFlag is_static_flag) {
+ bit_field_ = IsStaticFlagField::update(bit_field_, is_static_flag);
+ }
+ IsStaticFlag is_static_flag() const {
+ return IsStaticFlagField::decode(bit_field_);
+ }
+ bool is_static() const { return is_static_flag() == IsStaticFlag::kStatic; }
+
bool has_forced_context_allocation() const {
return ForceContextAllocationField::decode(bit_field_);
}
@@ -72,6 +84,9 @@ class Variable final : public ZoneObject {
MaybeAssignedFlag maybe_assigned() const {
return MaybeAssignedFlagField::decode(bit_field_);
}
+ void clear_maybe_assigned() {
+ bit_field_ = MaybeAssignedFlagField::update(bit_field_, kNotAssigned);
+ }
void SetMaybeAssigned() {
if (mode() == VariableMode::kConst) return;
@@ -249,6 +264,7 @@ class Variable final : public ZoneObject {
using ForceHoleInitializationField = InitializationFlagField::Next<bool, 1>;
using MaybeAssignedFlagField =
ForceHoleInitializationField::Next<MaybeAssignedFlag, 1>;
+ using IsStaticFlagField = MaybeAssignedFlagField::Next<IsStaticFlag, 1>;
Variable** next() { return &next_; }
friend List;
diff --git a/deps/v8/src/base/OWNERS b/deps/v8/src/base/OWNERS
index 9c6fd3c859..3654b400ad 100644
--- a/deps/v8/src/base/OWNERS
+++ b/deps/v8/src/base/OWNERS
@@ -1,4 +1,4 @@
-clemensh@chromium.org
+clemensb@chromium.org
mlippautz@chromium.org
# COMPONENT: Blink>JavaScript
diff --git a/deps/v8/src/base/adapters.h b/deps/v8/src/base/adapters.h
deleted file mode 100644
index f684b52ccb..0000000000
--- a/deps/v8/src/base/adapters.h
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Slightly adapted for inclusion in V8.
-// Copyright 2014 the V8 project authors. All rights reserved.
-
-#ifndef V8_BASE_ADAPTERS_H_
-#define V8_BASE_ADAPTERS_H_
-
-#include <iterator>
-
-#include "src/base/macros.h"
-
-namespace v8 {
-namespace base {
-
-// Internal adapter class for implementing base::Reversed.
-template <typename T>
-class ReversedAdapter {
- public:
- using Iterator =
- std::reverse_iterator<decltype(std::begin(std::declval<T>()))>;
-
- explicit ReversedAdapter(T& t) : t_(t) {}
- ReversedAdapter(const ReversedAdapter& ra) V8_NOEXCEPT = default;
-
- // TODO(clemensh): Use std::rbegin/std::rend once we have C++14 support.
- Iterator begin() const { return Iterator(std::end(t_)); }
- Iterator end() const { return Iterator(std::begin(t_)); }
-
- private:
- T& t_;
-
- DISALLOW_ASSIGN(ReversedAdapter);
-};
-
-// Reversed returns a container adapter usable in a range-based "for" statement
-// for iterating a reversible container in reverse order.
-//
-// Example:
-//
-// std::vector<int> v = ...;
-// for (int i : base::Reversed(v)) {
-// // iterates through v from back to front
-// }
-template <typename T>
-ReversedAdapter<T> Reversed(T&& t) {
- return ReversedAdapter<T>(t);
-}
-
-} // namespace base
-} // namespace v8
-
-#endif // V8_BASE_ADAPTERS_H_
diff --git a/deps/v8/src/base/cpu.cc b/deps/v8/src/base/cpu.cc
index 6ab0ffee29..4f4ac2b328 100644
--- a/deps/v8/src/base/cpu.cc
+++ b/deps/v8/src/base/cpu.cc
@@ -9,6 +9,7 @@
#endif
#if V8_OS_LINUX
#include <linux/auxvec.h> // AT_HWCAP
+extern "C" char** environ;
#endif
#if V8_GLIBC_PREREQ(2, 16)
#include <sys/auxv.h> // getauxval()
@@ -16,7 +17,7 @@
#if V8_OS_QNX
#include <sys/syspage.h> // cpuinfo
#endif
-#if V8_OS_LINUX && V8_HOST_ARCH_PPC
+#if (V8_OS_LINUX && V8_HOST_ARCH_PPC) || V8_OS_ANDROID
#include <elf.h>
#endif
#if V8_OS_AIX
@@ -109,28 +110,25 @@ static V8_INLINE void __cpuid(int cpu_info[4], int info_type) {
#define HWCAP_LPAE (1 << 20)
static uint32_t ReadELFHWCaps() {
- uint32_t result = 0;
#if V8_GLIBC_PREREQ(2, 16)
- result = static_cast<uint32_t>(getauxval(AT_HWCAP));
+ return static_cast<uint32_t>(getauxval(AT_HWCAP));
#else
- // Read the ELF HWCAP flags by parsing /proc/self/auxv.
- FILE* fp = fopen("/proc/self/auxv", "r");
- if (fp != nullptr) {
- struct { uint32_t tag; uint32_t value; } entry;
- for (;;) {
- size_t n = fread(&entry, sizeof(entry), 1, fp);
- if (n == 0 || (entry.tag == 0 && entry.value == 0)) {
- break;
- }
- if (entry.tag == AT_HWCAP) {
- result = entry.value;
- break;
- }
+ char** head = environ;
+ while (*head++ != nullptr) {
+ }
+#ifdef __LP64__
+ using elf_auxv_t = Elf64_auxv_t;
+#else
+ using elf_auxv_t = Elf32_auxv_t;
+#endif
+ for (elf_auxv_t* entry = reinterpret_cast<elf_auxv_t*>(head);
+ entry->a_type != AT_NULL; ++entry) {
+ if (entry->a_type == AT_HWCAP) {
+ return entry->a_un.a_val;
}
- fclose(fp);
}
+ return 0u;
#endif
- return result;
}
#endif // V8_HOST_ARCH_ARM
@@ -608,33 +606,28 @@ CPU::CPU()
#ifndef USE_SIMULATOR
#if V8_OS_LINUX
- // Read processor info from /proc/self/auxv.
char* auxv_cpu_type = nullptr;
- FILE* fp = fopen("/proc/self/auxv", "r");
- if (fp != nullptr) {
+ char** head = environ;
+ while (*head++ != nullptr) {
+ }
#if V8_TARGET_ARCH_PPC64
- Elf64_auxv_t entry;
+ using elf_auxv_t = Elf64_auxv_t;
#else
- Elf32_auxv_t entry;
+ using elf_auxv_t = Elf32_auxv_t;
#endif
- for (;;) {
- size_t n = fread(&entry, sizeof(entry), 1, fp);
- if (n == 0 || entry.a_type == AT_NULL) {
+ for (elf_auxv_t* entry = reinterpret_cast<elf_auxv_t*>(head);
+ entry->a_type != AT_NULL; ++entry) {
+ switch (entry->a_type) {
+ case AT_PLATFORM:
+ auxv_cpu_type = reinterpret_cast<char*>(entry->a_un.a_val);
+ break;
+ case AT_ICACHEBSIZE:
+ icache_line_size_ = entry->a_un.a_val;
+ break;
+ case AT_DCACHEBSIZE:
+ dcache_line_size_ = entry->a_un.a_val;
break;
- }
- switch (entry.a_type) {
- case AT_PLATFORM:
- auxv_cpu_type = reinterpret_cast<char*>(entry.a_un.a_val);
- break;
- case AT_ICACHEBSIZE:
- icache_line_size_ = entry.a_un.a_val;
- break;
- case AT_DCACHEBSIZE:
- dcache_line_size_ = entry.a_un.a_val;
- break;
- }
}
- fclose(fp);
}
part_ = -1;
diff --git a/deps/v8/src/base/file-utils.cc b/deps/v8/src/base/file-utils.cc
index 31b1b41190..6e1c492144 100644
--- a/deps/v8/src/base/file-utils.cc
+++ b/deps/v8/src/base/file-utils.cc
@@ -12,24 +12,18 @@
namespace v8 {
namespace base {
-char* RelativePath(char** buffer, const char* exec_path, const char* name) {
+std::unique_ptr<char[]> RelativePath(const char* exec_path, const char* name) {
DCHECK(exec_path);
- int path_separator = static_cast<int>(strlen(exec_path)) - 1;
- while (path_separator >= 0 &&
- !OS::isDirectorySeparator(exec_path[path_separator])) {
- path_separator--;
+ size_t basename_start = strlen(exec_path);
+ while (basename_start > 0 &&
+ !OS::isDirectorySeparator(exec_path[basename_start - 1])) {
+ --basename_start;
}
- if (path_separator >= 0) {
- int name_length = static_cast<int>(strlen(name));
- *buffer =
- reinterpret_cast<char*>(calloc(path_separator + name_length + 2, 1));
- *buffer[0] = '\0';
- strncat(*buffer, exec_path, path_separator + 1);
- strncat(*buffer, name, name_length);
- } else {
- *buffer = strdup(name);
- }
- return *buffer;
+ size_t name_length = strlen(name);
+ auto buffer = std::make_unique<char[]>(basename_start + name_length + 1);
+ if (basename_start > 0) memcpy(buffer.get(), exec_path, basename_start);
+ memcpy(buffer.get() + basename_start, name, name_length);
+ return buffer;
}
} // namespace base
diff --git a/deps/v8/src/base/file-utils.h b/deps/v8/src/base/file-utils.h
index afd9a1fc25..84b57fb40b 100644
--- a/deps/v8/src/base/file-utils.h
+++ b/deps/v8/src/base/file-utils.h
@@ -5,6 +5,8 @@
#ifndef V8_BASE_FILE_UTILS_H_
#define V8_BASE_FILE_UTILS_H_
+#include <memory>
+
#include "src/base/base-export.h"
namespace v8 {
@@ -12,8 +14,8 @@ namespace base {
// Helper functions to manipulate file paths.
-V8_BASE_EXPORT char* RelativePath(char** buffer, const char* exec_path,
- const char* name);
+V8_BASE_EXPORT
+std::unique_ptr<char[]> RelativePath(const char* exec_path, const char* name);
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/free_deleter.h b/deps/v8/src/base/free_deleter.h
index 77e4f0ed14..a556926685 100644
--- a/deps/v8/src/base/free_deleter.h
+++ b/deps/v8/src/base/free_deleter.h
@@ -9,6 +9,7 @@
#define V8_BASE_FREE_DELETER_H_
#include <stdlib.h>
+#include <memory>
namespace v8 {
namespace base {
diff --git a/deps/v8/src/base/iterator.h b/deps/v8/src/base/iterator.h
index b081af62ae..baaf324e21 100644
--- a/deps/v8/src/base/iterator.h
+++ b/deps/v8/src/base/iterator.h
@@ -59,6 +59,26 @@ class iterator_range {
const_iterator const end_;
};
+template <typename ForwardIterator>
+auto make_iterator_range(ForwardIterator&& begin, ForwardIterator&& end) {
+ return iterator_range<ForwardIterator>{std::forward<ForwardIterator>(begin),
+ std::forward<ForwardIterator>(end)};
+}
+
+// {Reversed} returns a container adapter usable in a range-based "for"
+// statement for iterating a reversible container in reverse order.
+//
+// Example:
+//
+// std::vector<int> v = ...;
+// for (int i : base::Reversed(v)) {
+// // iterates through v from back to front
+// }
+template <typename T>
+auto Reversed(T& t) { // NOLINT(runtime/references): match {rbegin} and {rend}
+ return make_iterator_range(std::rbegin(t), std::rend(t));
+}
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/macros.h b/deps/v8/src/base/macros.h
index ad70e9820d..72ef64cfbe 100644
--- a/deps/v8/src/base/macros.h
+++ b/deps/v8/src/base/macros.h
@@ -232,35 +232,16 @@ struct is_trivially_copyable {
// the standard does not, so let's skip this check.)
// Trivial non-deleted destructor.
std::is_trivially_destructible<T>::value;
-
-#elif defined(__GNUC__) && __GNUC__ < 5
- // WARNING:
- // On older libstdc++ versions, there is no way to correctly implement
- // is_trivially_copyable. The workaround below is an approximation (neither
- // over- nor underapproximation). E.g. it wrongly returns true if the move
- // constructor is non-trivial, and it wrongly returns false if the copy
- // constructor is deleted, but copy assignment is trivial.
- // TODO(rongjie) Remove this workaround once we require gcc >= 5.0
- static constexpr bool value =
- __has_trivial_copy(T) && __has_trivial_destructor(T);
-
#else
static constexpr bool value = std::is_trivially_copyable<T>::value;
#endif
};
-#if defined(__GNUC__) && __GNUC__ < 5
-// On older libstdc++ versions, base::is_trivially_copyable<T>::value is only an
-// approximation (see above), so make ASSERT_{NOT_,}TRIVIALLY_COPYABLE a noop.
-#define ASSERT_TRIVIALLY_COPYABLE(T) static_assert(true, "check disabled")
-#define ASSERT_NOT_TRIVIALLY_COPYABLE(T) static_assert(true, "check disabled")
-#else
#define ASSERT_TRIVIALLY_COPYABLE(T) \
static_assert(::v8::base::is_trivially_copyable<T>::value, \
#T " should be trivially copyable")
#define ASSERT_NOT_TRIVIALLY_COPYABLE(T) \
static_assert(!::v8::base::is_trivially_copyable<T>::value, \
#T " should not be trivially copyable")
-#endif
// The USE(x, ...) template is used to silence C++ compiler warnings
// issued for (yet) unused variables (typically parameters).
@@ -407,6 +388,9 @@ bool is_inbounds(float_t v) {
constexpr bool kUpperBoundIsMax =
static_cast<biggest_int_t>(kUpperBound) ==
static_cast<biggest_int_t>(std::numeric_limits<int_t>::max());
+ // Using USE(var) is only a workaround for a GCC 8.1 bug.
+ USE(kLowerBoundIsMin);
+ USE(kUpperBoundIsMax);
return (kLowerBoundIsMin ? (kLowerBound <= v) : (kLowerBound < v)) &&
(kUpperBoundIsMax ? (v <= kUpperBound) : (v < kUpperBound));
}
diff --git a/deps/v8/src/base/optional.h b/deps/v8/src/base/optional.h
index b8df88d844..6610c7ffc3 100644
--- a/deps/v8/src/base/optional.h
+++ b/deps/v8/src/base/optional.h
@@ -131,21 +131,8 @@ struct OptionalStorageBase<T, true /* trivially destructible */> {
// the condition of constexpr-ness is satisfied because the base class also has
// compiler generated constexpr {copy,move} constructors). Note that
// placement-new is prohibited in constexpr.
-#if defined(__GNUC__) && __GNUC__ < 5
-// gcc <5 does not implement std::is_trivially_copy_constructible.
-// Conservatively assume false for this configuration.
-// TODO(clemensh): Remove this once we drop support for gcc <5.
-#define TRIVIALLY_COPY_CONSTRUCTIBLE(T) false
-#define TRIVIALLY_MOVE_CONSTRUCTIBLE(T) false
-#else
-#define TRIVIALLY_COPY_CONSTRUCTIBLE(T) \
- std::is_trivially_copy_constructible<T>::value
-#define TRIVIALLY_MOVE_CONSTRUCTIBLE(T) \
- std::is_trivially_move_constructible<T>::value
-#endif
-template <typename T, bool = TRIVIALLY_COPY_CONSTRUCTIBLE(T),
- bool = TRIVIALLY_MOVE_CONSTRUCTIBLE(T)>
-#undef TRIVIALLY_COPY_CONSTRUCTIBLE
+template <typename T, bool = std::is_trivially_copy_constructible<T>::value,
+ bool = std::is_trivially_move_constructible<T>::value>
struct OptionalStorage : OptionalStorageBase<T> {
// This is no trivially {copy,move} constructible case. Other cases are
// defined below as specializations.
diff --git a/deps/v8/src/base/platform/mutex.h b/deps/v8/src/base/platform/mutex.h
index c48cf8d339..5b3b31ec1e 100644
--- a/deps/v8/src/base/platform/mutex.h
+++ b/deps/v8/src/base/platform/mutex.h
@@ -290,6 +290,7 @@ class LockGuard final {
};
using MutexGuard = LockGuard<Mutex>;
+using RecursiveMutexGuard = LockGuard<RecursiveMutex>;
enum MutexSharedType : bool { kShared = true, kExclusive = false };
diff --git a/deps/v8/src/base/platform/platform-openbsd.cc b/deps/v8/src/base/platform/platform-openbsd.cc
index c133ffb68d..e4a3cb6f35 100644
--- a/deps/v8/src/base/platform/platform-openbsd.cc
+++ b/deps/v8/src/base/platform/platform-openbsd.cc
@@ -107,7 +107,7 @@ void OS::SignalCodeMovingGC() {
// it. This injects a GC marker into the stream of events generated
// by the kernel and allows us to synchronize V8 code log and the
// kernel log.
- int size = sysconf(_SC_PAGESIZE);
+ long size = sysconf(_SC_PAGESIZE); // NOLINT: type more fit than uint64_t
FILE* f = fopen(OS::GetGCFakeMMapFile(), "w+");
if (f == nullptr) {
OS::PrintError("Failed to open %s\n", OS::GetGCFakeMMapFile());
@@ -116,7 +116,7 @@ void OS::SignalCodeMovingGC() {
void* addr =
mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE, fileno(f), 0);
DCHECK(addr != MAP_FAILED);
- OS::Free(addr, size);
+ CHECK(OS::Free(addr, size));
fclose(f);
}
diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc
index c50cdd7a98..99abcd5568 100644
--- a/deps/v8/src/base/platform/platform-posix.cc
+++ b/deps/v8/src/base/platform/platform-posix.cc
@@ -48,6 +48,7 @@
#if V8_OS_MACOSX
#include <dlfcn.h>
+#include <mach/mach.h>
#endif
#if V8_OS_LINUX
diff --git a/deps/v8/src/base/platform/semaphore.cc b/deps/v8/src/base/platform/semaphore.cc
index a7e50f5880..66464d8258 100644
--- a/deps/v8/src/base/platform/semaphore.cc
+++ b/deps/v8/src/base/platform/semaphore.cc
@@ -5,8 +5,7 @@
#include "src/base/platform/semaphore.h"
#if V8_OS_MACOSX
-#include <mach/mach_init.h>
-#include <mach/task.h>
+#include <dispatch/dispatch.h>
#endif
#include <errno.h>
@@ -21,53 +20,23 @@ namespace base {
#if V8_OS_MACOSX
Semaphore::Semaphore(int count) {
- kern_return_t result = semaphore_create(
- mach_task_self(), &native_handle_, SYNC_POLICY_FIFO, count);
- DCHECK_EQ(KERN_SUCCESS, result);
- USE(result);
+ native_handle_ = dispatch_semaphore_create(count);
+ DCHECK(native_handle_);
}
+Semaphore::~Semaphore() { dispatch_release(native_handle_); }
-Semaphore::~Semaphore() {
- kern_return_t result = semaphore_destroy(mach_task_self(), native_handle_);
- DCHECK_EQ(KERN_SUCCESS, result);
- USE(result);
-}
-
-void Semaphore::Signal() {
- kern_return_t result = semaphore_signal(native_handle_);
- DCHECK_EQ(KERN_SUCCESS, result);
- USE(result);
-}
-
+void Semaphore::Signal() { dispatch_semaphore_signal(native_handle_); }
void Semaphore::Wait() {
- while (true) {
- kern_return_t result = semaphore_wait(native_handle_);
- if (result == KERN_SUCCESS) return; // Semaphore was signalled.
- DCHECK_EQ(KERN_ABORTED, result);
- }
+ dispatch_semaphore_wait(native_handle_, DISPATCH_TIME_FOREVER);
}
bool Semaphore::WaitFor(const TimeDelta& rel_time) {
- TimeTicks now = TimeTicks::Now();
- TimeTicks end = now + rel_time;
- while (true) {
- mach_timespec_t ts;
- if (now >= end) {
- // Return immediately if semaphore was not signalled.
- ts.tv_sec = 0;
- ts.tv_nsec = 0;
- } else {
- ts = (end - now).ToMachTimespec();
- }
- kern_return_t result = semaphore_timedwait(native_handle_, ts);
- if (result == KERN_SUCCESS) return true; // Semaphore was signalled.
- if (result == KERN_OPERATION_TIMED_OUT) return false; // Timeout.
- DCHECK_EQ(KERN_ABORTED, result);
- now = TimeTicks::Now();
- }
+ dispatch_time_t timeout =
+ dispatch_time(DISPATCH_TIME_NOW, rel_time.InNanoseconds());
+ return dispatch_semaphore_wait(native_handle_, timeout) == 0;
}
#elif V8_OS_POSIX
diff --git a/deps/v8/src/base/platform/semaphore.h b/deps/v8/src/base/platform/semaphore.h
index 11ff0b9199..c4937acadd 100644
--- a/deps/v8/src/base/platform/semaphore.h
+++ b/deps/v8/src/base/platform/semaphore.h
@@ -12,7 +12,7 @@
#endif
#if V8_OS_MACOSX
-#include <mach/semaphore.h> // NOLINT
+#include <dispatch/dispatch.h> // NOLINT
#elif V8_OS_POSIX
#include <semaphore.h> // NOLINT
#endif
@@ -50,7 +50,7 @@ class V8_BASE_EXPORT Semaphore final {
bool WaitFor(const TimeDelta& rel_time) V8_WARN_UNUSED_RESULT;
#if V8_OS_MACOSX
- using NativeHandle = semaphore_t;
+ using NativeHandle = dispatch_semaphore_t;
#elif V8_OS_POSIX
using NativeHandle = sem_t;
#elif V8_OS_WIN
diff --git a/deps/v8/src/base/template-utils.h b/deps/v8/src/base/template-utils.h
index 530114a8e2..146f8d6e6a 100644
--- a/deps/v8/src/base/template-utils.h
+++ b/deps/v8/src/base/template-utils.h
@@ -6,32 +6,20 @@
#define V8_BASE_TEMPLATE_UTILS_H_
#include <array>
-#include <memory>
+#include <type_traits>
+#include <utility>
namespace v8 {
namespace base {
namespace detail {
-// make_array_helper statically iteratively creates the index list 0 .. Size-1.
-// A specialization for the base case (first index is 0) finally constructs the
-// array.
-// TODO(clemensh): Use std::index_sequence once we have C++14 support.
-template <class Function, std::size_t... Indexes>
-struct make_array_helper;
-
-template <class Function, std::size_t... Indexes>
-struct make_array_helper<Function, 0, Indexes...> {
- constexpr static std::array<typename std::result_of<Function(size_t)>::type,
- sizeof...(Indexes) + 1>
- make_array(Function f) {
- return {{f(0), f(Indexes)...}};
- }
-};
-
-template <class Function, std::size_t FirstIndex, std::size_t... Indexes>
-struct make_array_helper<Function, FirstIndex, Indexes...>
- : make_array_helper<Function, FirstIndex - 1, FirstIndex, Indexes...> {};
+template <typename Function, std::size_t... Indexes>
+constexpr inline auto make_array_helper(Function f,
+ std::index_sequence<Indexes...>)
+ -> std::array<decltype(f(0)), sizeof...(Indexes)> {
+ return {{f(Indexes)...}};
+}
} // namespace detail
@@ -42,18 +30,8 @@ struct make_array_helper<Function, FirstIndex, Indexes...>
// [](std::size_t i) { return static_cast<int>(2 * i); });
// The resulting array will be constexpr if the passed function is constexpr.
template <std::size_t Size, class Function>
-constexpr std::array<typename std::result_of<Function(size_t)>::type, Size>
-make_array(Function f) {
- static_assert(Size > 0, "Can only create non-empty arrays");
- return detail::make_array_helper<Function, Size - 1>::make_array(f);
-}
-
-// base::make_unique<T>: Construct an object of type T and wrap it in a
-// std::unique_ptr.
-// Replacement for C++14's std::make_unique.
-template <typename T, typename... Args>
-std::unique_ptr<T> make_unique(Args&&... args) {
- return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
+constexpr auto make_array(Function f) {
+ return detail::make_array_helper(f, std::make_index_sequence<Size>{});
}
// Helper to determine how to pass values: Pass scalars and arrays by value,
@@ -80,38 +58,17 @@ struct has_output_operator<T, decltype(void(std::declval<std::ostream&>()
<< std::declval<T>()))>
: std::true_type {};
-namespace detail {
-
-template <typename Func, typename T, typename... Ts>
-struct fold_helper {
- static_assert(sizeof...(Ts) == 0, "this is the base case");
- using result_t = typename std::remove_reference<T>::type;
- static constexpr T&& fold(Func func, T&& first) {
- return std::forward<T>(first);
- }
-};
+// Fold all arguments from left to right with a given function.
+template <typename Func, typename T>
+constexpr auto fold(Func func, T&& t) {
+ return std::forward<T>(t);
+}
template <typename Func, typename T1, typename T2, typename... Ts>
-struct fold_helper<Func, T1, T2, Ts...> {
- using folded_t = typename std::result_of<Func(T1, T2)>::type;
- using next_fold_helper = fold_helper<Func, folded_t&&, Ts...>;
- using result_t = typename next_fold_helper::result_t;
- static constexpr result_t fold(Func func, T1&& first, T2&& second,
- Ts&&... more) {
- return next_fold_helper::fold(
- func, func(std::forward<T1>(first), std::forward<T2>(second)),
- std::forward<Ts>(more)...);
- }
-};
-
-} // namespace detail
-
-// Fold all arguments from left to right with a given function.
-template <typename Func, typename... Ts>
-constexpr auto fold(Func func, Ts&&... more) ->
- typename detail::fold_helper<Func, Ts...>::result_t {
- return detail::fold_helper<Func, Ts...>::fold(func,
- std::forward<Ts>(more)...);
+constexpr auto fold(Func func, T1&& first, T2&& second, Ts&&... more) {
+ auto&& folded = func(std::forward<T1>(first), std::forward<T2>(second));
+ return fold(std::move(func), std::forward<decltype(folded)>(folded),
+ std::forward<Ts>(more)...);
}
// {is_same<Ts...>::value} is true if all Ts are the same, false otherwise.
diff --git a/deps/v8/src/base/ubsan.cc b/deps/v8/src/base/ubsan.cc
new file mode 100644
index 0000000000..fc77156eb1
--- /dev/null
+++ b/deps/v8/src/base/ubsan.cc
@@ -0,0 +1,50 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdint.h>
+#include <limits>
+
+#include "src/base/build_config.h"
+
+#if !defined(UNDEFINED_SANITIZER) || !defined(V8_TARGET_ARCH_32_BIT)
+#error "This file is only needed for 32-bit UBSan builds."
+#endif
+
+// Compiling with -fsanitize=undefined on 32-bit platforms requires __mulodi4
+// to be available. Usually it comes from libcompiler_rt, which our build
+// doesn't provide, so here is a custom implementation (inspired by digit_mul
+// in src/objects/bigint.cc).
+extern "C" int64_t __mulodi4(int64_t a, int64_t b, int* overflow) {
+ // Multiply in 32-bit chunks.
+ // For inputs [AH AL]*[BH BL], the result is:
+ //
+ // [AL*BL] // r_low
+ // + [AL*BH] // r_mid1
+ // + [AH*BL] // r_mid2
+ // + [AH*BH] // r_high
+ // = [R4 R3 R2 R1] // high = [R4 R3], low = [R2 R1]
+ //
+ // Where of course we must be careful with carries between the columns.
+ uint64_t a_low = a & 0xFFFFFFFFu;
+ uint64_t a_high = static_cast<uint64_t>(a) >> 32;
+ uint64_t b_low = b & 0xFFFFFFFFu;
+ uint64_t b_high = static_cast<uint64_t>(b) >> 32;
+
+ uint64_t r_low = a_low * b_low;
+ uint64_t r_mid1 = a_low * b_high;
+ uint64_t r_mid2 = a_high * b_low;
+ uint64_t r_high = a_high * b_high;
+
+ uint64_t result1 = r_low + (r_mid1 << 32);
+ if (result1 < r_low) r_high++;
+ uint64_t result2 = result1 + (r_mid2 << 32);
+ if (result2 < result1) r_high++;
+ r_high += (r_mid1 >> 32) + (r_mid2 >> 32);
+ int64_t result = static_cast<int64_t>(result2);
+ uint64_t result_sign = (result >> 63);
+ uint64_t expected_result_sign = (a >> 63) ^ (b >> 63);
+
+ *overflow = (r_high > 0 || result_sign != expected_result_sign) ? 1 : 0;
+ return result;
+}
diff --git a/deps/v8/src/builtins/accessors.cc b/deps/v8/src/builtins/accessors.cc
index ea6308622d..fa39142cb4 100644
--- a/deps/v8/src/builtins/accessors.cc
+++ b/deps/v8/src/builtins/accessors.cc
@@ -16,6 +16,7 @@
#include "src/objects/contexts.h"
#include "src/objects/field-index-inl.h"
#include "src/objects/js-array-inl.h"
+#include "src/objects/js-regexp-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/property-details.h"
#include "src/objects/prototype.h"
@@ -840,5 +841,25 @@ Handle<AccessorInfo> Accessors::MakeErrorStackInfo(Isolate* isolate) {
&ErrorStackGetter, &ErrorStackSetter);
}
+//
+// Accessors::RegExpResultIndices
+//
+
+void Accessors::RegExpResultIndicesGetter(
+ v8::Local<v8::Name> key, const v8::PropertyCallbackInfo<v8::Value>& info) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+ HandleScope scope(isolate);
+ Handle<JSRegExpResult> regexp_result(
+ Handle<JSRegExpResult>::cast(Utils::OpenHandle(*info.Holder())));
+ Handle<Object> indices(
+ JSRegExpResult::GetAndCacheIndices(isolate, regexp_result));
+ info.GetReturnValue().Set(Utils::ToLocal(indices));
+}
+
+Handle<AccessorInfo> Accessors::MakeRegExpResultIndicesInfo(Isolate* isolate) {
+ return MakeAccessor(isolate, isolate->factory()->indices_string(),
+ &RegExpResultIndicesGetter, nullptr);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/accessors.h b/deps/v8/src/builtins/accessors.h
index 43a6534296..b6a8e65446 100644
--- a/deps/v8/src/builtins/accessors.h
+++ b/deps/v8/src/builtins/accessors.h
@@ -43,6 +43,8 @@ class JavaScriptFrame;
kHasSideEffectToReceiver) \
V(_, function_prototype, FunctionPrototype, kHasNoSideEffect, \
kHasSideEffectToReceiver) \
+ V(_, regexp_result_indices, RegExpResultIndices, kHasSideEffectToReceiver, \
+ kHasSideEffectToReceiver) \
V(_, string_length, StringLength, kHasNoSideEffect, kHasSideEffectToReceiver)
#define ACCESSOR_SETTER_LIST(V) \
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index e9b562620f..164c09db25 100644
--- a/deps/v8/src/builtins/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -885,102 +885,70 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
__ bind(&no_match);
}
-static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
- Register feedback_vector,
- Register scratch1,
- Register scratch2) {
+static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
+ Register optimized_code_entry,
+ Register scratch) {
// ----------- S t a t e -------------
// -- r3 : new target (preserved for callee if needed, and caller)
// -- r1 : target function (preserved for callee if needed, and caller)
- // -- feedback vector (preserved for caller if needed)
// -----------------------------------
- DCHECK(!AreAliased(feedback_vector, r1, r3, scratch1, scratch2));
-
- Label optimized_code_slot_is_weak_ref, fallthrough;
+ DCHECK(!AreAliased(r1, r3, optimized_code_entry, scratch));
Register closure = r1;
- Register optimized_code_entry = scratch1;
-
- __ ldr(
- optimized_code_entry,
- FieldMemOperand(feedback_vector,
- FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
- // Check if the code entry is a Smi. If yes, we interpret it as an
- // optimisation marker. Otherwise, interpret it as a weak reference to a code
- // object.
- __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref);
-
- {
- // Optimized code slot is a Smi optimization marker.
-
- // Fall through if no optimization trigger.
- __ cmp(optimized_code_entry,
- Operand(Smi::FromEnum(OptimizationMarker::kNone)));
- __ b(eq, &fallthrough);
-
- // TODO(v8:8394): The logging of first execution will break if
- // feedback vectors are not allocated. We need to find a different way of
- // logging these events if required.
- TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
- OptimizationMarker::kLogFirstExecution,
- Runtime::kFunctionFirstExecution);
- TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
- OptimizationMarker::kCompileOptimized,
- Runtime::kCompileOptimized_NotConcurrent);
- TailCallRuntimeIfMarkerEquals(
- masm, optimized_code_entry,
- OptimizationMarker::kCompileOptimizedConcurrent,
- Runtime::kCompileOptimized_Concurrent);
+ // Check if the optimized code is marked for deopt. If it is, call the
+ // runtime to clear it.
+ Label found_deoptimized_code;
+ __ ldr(scratch,
+ FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
+ __ ldr(scratch,
+ FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
+ __ tst(scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
+ __ b(ne, &found_deoptimized_code);
- {
- // Otherwise, the marker is InOptimizationQueue, so fall through hoping
- // that an interrupt will eventually update the slot with optimized code.
- if (FLAG_debug_code) {
- __ cmp(
- optimized_code_entry,
- Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
- __ Assert(eq, AbortReason::kExpectedOptimizationSentinel);
- }
- __ jmp(&fallthrough);
- }
- }
+ // Optimized code is good, get it into the closure and link the closure
+ // into the optimized functions list, then tail call the optimized code.
+ ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure);
+ static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
+ __ LoadCodeObjectEntry(r2, optimized_code_entry);
+ __ Jump(r2);
- {
- // Optimized code slot is a weak reference.
- __ bind(&optimized_code_slot_is_weak_ref);
-
- __ LoadWeakValue(optimized_code_entry, optimized_code_entry, &fallthrough);
-
- // Check if the optimized code is marked for deopt. If it is, call the
- // runtime to clear it.
- Label found_deoptimized_code;
- __ ldr(scratch2, FieldMemOperand(optimized_code_entry,
- Code::kCodeDataContainerOffset));
- __ ldr(
- scratch2,
- FieldMemOperand(scratch2, CodeDataContainer::kKindSpecificFlagsOffset));
- __ tst(scratch2, Operand(1 << Code::kMarkedForDeoptimizationBit));
- __ b(ne, &found_deoptimized_code);
-
- // Optimized code is good, get it into the closure and link the closure into
- // the optimized functions list, then tail call the optimized code.
- // The feedback vector is no longer used, so re-use it as a scratch
- // register.
- ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure);
- static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
- __ LoadCodeObjectEntry(r2, optimized_code_entry);
- __ Jump(r2);
+ // Optimized code slot contains deoptimized code, evict it and re-enter
+ // the closure's code.
+ __ bind(&found_deoptimized_code);
+ GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
+}
- // Optimized code slot contains deoptimized code, evict it and re-enter the
- // closure's code.
- __ bind(&found_deoptimized_code);
- GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
+static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
+ Register optimization_marker) {
+ // ----------- S t a t e -------------
+ // -- r3 : new target (preserved for callee if needed, and caller)
+ // -- r1 : target function (preserved for callee if needed, and caller)
+ // -- feedback vector (preserved for caller if needed)
+ // -- optimization_marker : a Smi containing a non-zero optimization marker.
+ // -----------------------------------
+ DCHECK(!AreAliased(feedback_vector, r1, r3, optimization_marker));
+
+ // TODO(v8:8394): The logging of first execution will break if
+ // feedback vectors are not allocated. We need to find a different way of
+ // logging these events if required.
+ TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
+ OptimizationMarker::kLogFirstExecution,
+ Runtime::kFunctionFirstExecution);
+ TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
+ OptimizationMarker::kCompileOptimized,
+ Runtime::kCompileOptimized_NotConcurrent);
+ TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
+ OptimizationMarker::kCompileOptimizedConcurrent,
+ Runtime::kCompileOptimized_Concurrent);
+
+ // Otherwise, the marker is InOptimizationQueue, so fall through hoping
+ // that an interrupt will eventually update the slot with optimized code.
+ if (FLAG_debug_code) {
+ __ cmp(optimization_marker,
+ Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
+ __ Assert(eq, AbortReason::kExpectedOptimizationSentinel);
}
-
- // Fall-through if the optimized code cell is clear and there is no
- // optimization marker.
- __ bind(&fallthrough);
}
// Advance the current bytecode offset. This simulates what all bytecode
@@ -999,7 +967,7 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
ExternalReference::bytecode_size_table_address());
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
- Label process_bytecode, extra_wide;
+ Label process_bytecode;
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
STATIC_ASSERT(2 == static_cast<int>(interpreter::Bytecode::kDebugBreakWide));
@@ -1008,31 +976,34 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
__ cmp(bytecode, Operand(0x3));
__ b(hi, &process_bytecode);
__ tst(bytecode, Operand(0x1));
- __ b(ne, &extra_wide);
-
- // Load the next bytecode and update table to the wide scaled table.
+ // Load the next bytecode.
__ add(bytecode_offset, bytecode_offset, Operand(1));
__ ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset));
+
+ // Update table to the wide scaled table.
__ add(bytecode_size_table, bytecode_size_table,
Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
- __ jmp(&process_bytecode);
-
- __ bind(&extra_wide);
- // Load the next bytecode and update table to the extra wide scaled table.
- __ add(bytecode_offset, bytecode_offset, Operand(1));
- __ ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset));
+ // Conditionally update table to the extra wide scaled table. We are taking
+ // advantage of the fact that the extra wide follows the wide one.
__ add(bytecode_size_table, bytecode_size_table,
- Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
+ Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount), LeaveCC,
+ ne);
__ bind(&process_bytecode);
// Bailout to the return label if this is a return bytecode.
-#define JUMP_IF_EQUAL(NAME) \
- __ cmp(bytecode, Operand(static_cast<int>(interpreter::Bytecode::k##NAME))); \
- __ b(if_return, eq);
+
+ // Create cmp, cmpne, ..., cmpne to check for a return bytecode.
+ Condition flag = al;
+#define JUMP_IF_EQUAL(NAME) \
+ __ cmp(bytecode, Operand(static_cast<int>(interpreter::Bytecode::k##NAME)), \
+ flag); \
+ flag = ne;
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
#undef JUMP_IF_EQUAL
+ __ b(if_return, eq);
+
// Otherwise, load the size of the current bytecode and advance the offset.
__ ldr(scratch1, MemOperand(bytecode_size_table, bytecode, LSL, 2));
__ add(bytecode_offset, bytecode_offset, scratch1);
@@ -1084,9 +1055,21 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ cmp(r4, Operand(FEEDBACK_VECTOR_TYPE));
__ b(ne, &push_stack_frame);
- // Read off the optimized code slot in the feedback vector, and if there
- // is optimized code or an optimization marker, call that instead.
- MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r4, r6);
+ Register optimized_code_entry = r4;
+
+ // Read off the optimized code slot in the feedback vector.
+ __ ldr(optimized_code_entry,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
+
+ // Check if the optimized code slot is not empty.
+ Label optimized_code_slot_not_empty;
+ __ cmp(optimized_code_entry,
+ Operand(Smi::FromEnum(OptimizationMarker::kNone)));
+ __ b(ne, &optimized_code_slot_not_empty);
+
+ Label not_optimized;
+ __ bind(&not_optimized);
// Increment invocation count for the function.
__ ldr(r9, FieldMemOperand(feedback_vector,
@@ -1121,28 +1104,26 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Push(kInterpreterBytecodeArrayRegister, r0);
// Allocate the local and temporary register file on the stack.
+ Label stack_overflow;
{
// Load frame size from the BytecodeArray object.
__ ldr(r4, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kFrameSizeOffset));
// Do a stack check to ensure we don't go over the limit.
- Label ok;
__ sub(r9, sp, Operand(r4));
LoadRealStackLimit(masm, r2);
__ cmp(r9, Operand(r2));
- __ b(hs, &ok);
- __ CallRuntime(Runtime::kThrowStackOverflow);
- __ bind(&ok);
+ __ b(lo, &stack_overflow);
// If ok, push undefined as the initial value for all register file entries.
Label loop_header;
Label loop_check;
- __ LoadRoot(r9, RootIndex::kUndefinedValue);
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
__ b(&loop_check, al);
__ bind(&loop_header);
// TODO(rmcilroy): Consider doing more than one push per loop iteration.
- __ push(r9);
+ __ push(kInterpreterAccumulatorRegister);
// Continue loop if not done.
__ bind(&loop_check);
__ sub(r4, r4, Operand(kPointerSize), SetCC);
@@ -1157,8 +1138,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ cmp(r9, Operand::Zero());
__ str(r3, MemOperand(fp, r9, LSL, kPointerSizeLog2), ne);
- // Load accumulator with undefined.
- __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+ // The accumulator is already loaded with undefined.
// Load the dispatch table into a register and dispatch to the bytecode
// handler at the current bytecode offset.
@@ -1199,8 +1179,26 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, r2);
__ Jump(lr);
+ __ bind(&optimized_code_slot_not_empty);
+ Label maybe_has_optimized_code;
+ // Check if optimized code marker is actually a weak reference to the
+ // optimized code.
+ __ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code);
+ MaybeOptimizeCode(masm, feedback_vector, optimized_code_entry);
+ // Fall through if there's no runnable optimized code.
+ __ jmp(&not_optimized);
+
+ __ bind(&maybe_has_optimized_code);
+ // Load code entry from the weak reference, if it was cleared, resume
+ // execution of unoptimized code.
+ __ LoadWeakValue(optimized_code_entry, optimized_code_entry, &not_optimized);
+ TailCallOptimizedCodeSlot(masm, optimized_code_entry, r6);
+
__ bind(&compile_lazy);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
+
+ __ bind(&stack_overflow);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bkpt(0); // Should not return.
}
@@ -1565,14 +1563,8 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
}
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
- // Lookup the function in the JavaScript frame.
- __ ldr(r0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(r0, MemOperand(r0, JavaScriptFrameConstants::kFunctionOffset));
-
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- // Pass function as argument.
- __ push(r0);
__ CallRuntime(Runtime::kCompileForOnStackReplacement);
}
@@ -2182,7 +2174,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// -- r1 : the target to call (can be any Object).
// -----------------------------------
- Label non_callable, non_function, non_smi;
+ Label non_callable, non_smi;
__ JumpIfSmi(r1, &non_callable);
__ bind(&non_smi);
__ CompareObjectType(r1, r4, r5, JS_FUNCTION_TYPE);
@@ -2199,12 +2191,10 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// Check if target is a proxy and call CallProxy external builtin
__ cmp(r5, Operand(JS_PROXY_TYPE));
- __ b(ne, &non_function);
- __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET);
+ __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq);
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
- __ bind(&non_function);
// Overwrite the original receiver the (original) target.
__ str(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
// Let the "call_as_function_delegate" take care of the rest.
@@ -3167,51 +3157,6 @@ void Builtins::Generate_MemCopyUint8Uint8(MacroAssembler* masm) {
__ Ret();
}
-void Builtins::Generate_MemCopyUint16Uint8(MacroAssembler* masm) {
- Register dest = r0;
- Register src = r1;
- Register chars = r2;
-
- {
- UseScratchRegisterScope temps(masm);
-
- Register temp1 = r3;
- Register temp2 = temps.Acquire();
- Register temp3 = lr;
- Register temp4 = r4;
- Label loop;
- Label not_two;
-
- __ Push(lr, r4);
- __ bic(temp2, chars, Operand(0x3));
- __ add(temp2, dest, Operand(temp2, LSL, 1));
-
- __ bind(&loop);
- __ ldr(temp1, MemOperand(src, 4, PostIndex));
- __ uxtb16(temp3, temp1);
- __ uxtb16(temp4, temp1, 8);
- __ pkhbt(temp1, temp3, Operand(temp4, LSL, 16));
- __ str(temp1, MemOperand(dest));
- __ pkhtb(temp1, temp4, Operand(temp3, ASR, 16));
- __ str(temp1, MemOperand(dest, 4));
- __ add(dest, dest, Operand(8));
- __ cmp(dest, temp2);
- __ b(&loop, ne);
-
- __ mov(chars, Operand(chars, LSL, 31), SetCC); // bit0 => ne, bit1 => cs
- __ b(&not_two, cc);
- __ ldrh(temp1, MemOperand(src, 2, PostIndex));
- __ uxtb(temp3, temp1, 8);
- __ mov(temp3, Operand(temp3, LSL, 16));
- __ uxtab(temp3, temp3, temp1);
- __ str(temp3, MemOperand(dest, 4, PostIndex));
- __ bind(&not_two);
- __ ldrb(temp1, MemOperand(src), ne);
- __ strh(temp1, MemOperand(dest), ne);
- __ Pop(pc, r4);
- }
-}
-
#undef __
} // namespace internal
diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc
index 4e159a69b7..9edd074e3d 100644
--- a/deps/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc
@@ -1001,108 +1001,78 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
OptimizationMarker marker,
Runtime::FunctionId function_id) {
Label no_match;
- __ CompareAndBranch(smi_entry, Operand(Smi::FromEnum(marker)), ne, &no_match);
+ __ CompareTaggedAndBranch(smi_entry, Operand(Smi::FromEnum(marker)), ne,
+ &no_match);
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
}
-static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
- Register feedback_vector,
- Register scratch1,
- Register scratch2) {
+static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
+ Register optimized_code_entry,
+ Register scratch) {
// ----------- S t a t e -------------
// -- x3 : new target (preserved for callee if needed, and caller)
// -- x1 : target function (preserved for callee if needed, and caller)
- // -- feedback vector (preserved for caller if needed)
// -----------------------------------
- DCHECK(!AreAliased(feedback_vector, x1, x3, scratch1, scratch2));
-
- Label optimized_code_slot_is_weak_ref, fallthrough;
+ DCHECK(!AreAliased(x1, x3, optimized_code_entry, scratch));
Register closure = x1;
- Register optimized_code_entry = scratch1;
-
- __ LoadAnyTaggedField(
- optimized_code_entry,
- FieldMemOperand(feedback_vector,
- FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
-
- // Check if the code entry is a Smi. If yes, we interpret it as an
- // optimisation marker. Otherwise, interpret is at a weak reference to a code
- // object.
- __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref);
-
- {
- // Optimized code slot is a Smi optimization marker.
-
- // Fall through if no optimization trigger.
- __ CompareAndBranch(optimized_code_entry,
- Operand(Smi::FromEnum(OptimizationMarker::kNone)), eq,
- &fallthrough);
-
- // TODO(v8:8394): The logging of first execution will break if
- // feedback vectors are not allocated. We need to find a different way of
- // logging these events if required.
- TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
- OptimizationMarker::kLogFirstExecution,
- Runtime::kFunctionFirstExecution);
- TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
- OptimizationMarker::kCompileOptimized,
- Runtime::kCompileOptimized_NotConcurrent);
- TailCallRuntimeIfMarkerEquals(
- masm, optimized_code_entry,
- OptimizationMarker::kCompileOptimizedConcurrent,
- Runtime::kCompileOptimized_Concurrent);
-
- {
- // Otherwise, the marker is InOptimizationQueue, so fall through hoping
- // that an interrupt will eventually update the slot with optimized code.
- if (FLAG_debug_code) {
- __ Cmp(
- optimized_code_entry,
- Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
- __ Assert(eq, AbortReason::kExpectedOptimizationSentinel);
- }
- __ B(&fallthrough);
- }
- }
- {
- // Optimized code slot is a weak reference.
- __ bind(&optimized_code_slot_is_weak_ref);
-
- __ LoadWeakValue(optimized_code_entry, optimized_code_entry, &fallthrough);
+ // Check if the optimized code is marked for deopt. If it is, call the
+ // runtime to clear it.
+ Label found_deoptimized_code;
+ __ LoadTaggedPointerField(
+ scratch,
+ FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
+ __ Ldr(scratch.W(),
+ FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
+ __ Tbnz(scratch.W(), Code::kMarkedForDeoptimizationBit,
+ &found_deoptimized_code);
+
+ // Optimized code is good, get it into the closure and link the closure into
+ // the optimized functions list, then tail call the optimized code.
+ ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure);
+ static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
+ __ LoadCodeObjectEntry(x2, optimized_code_entry);
+ __ Jump(x2);
- // Check if the optimized code is marked for deopt. If it is, call the
- // runtime to clear it.
- Label found_deoptimized_code;
- __ LoadTaggedPointerField(
- scratch2,
- FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
- __ Ldr(
- scratch2.W(),
- FieldMemOperand(scratch2, CodeDataContainer::kKindSpecificFlagsOffset));
- __ Tbnz(scratch2.W(), Code::kMarkedForDeoptimizationBit,
- &found_deoptimized_code);
-
- // Optimized code is good, get it into the closure and link the closure into
- // the optimized functions list, then tail call the optimized code.
- // The feedback vector is no longer used, so re-use it as a scratch
- // register.
- ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure);
- static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
- __ LoadCodeObjectEntry(x2, optimized_code_entry);
- __ Jump(x2);
+ // Optimized code slot contains deoptimized code, evict it and re-enter the
+ // closure's code.
+ __ bind(&found_deoptimized_code);
+ GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
+}
- // Optimized code slot contains deoptimized code, evict it and re-enter the
- // closure's code.
- __ bind(&found_deoptimized_code);
- GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
+static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
+ Register optimization_marker) {
+ // ----------- S t a t e -------------
+ // -- x3 : new target (preserved for callee if needed, and caller)
+ // -- x1 : target function (preserved for callee if needed, and caller)
+ // -- feedback vector (preserved for caller if needed)
+ // -- optimization_marker : a Smi containing a non-zero optimization marker.
+ // -----------------------------------
+ DCHECK(!AreAliased(feedback_vector, x1, x3, optimization_marker));
+
+ // TODO(v8:8394): The logging of first execution will break if
+ // feedback vectors are not allocated. We need to find a different way of
+ // logging these events if required.
+ TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
+ OptimizationMarker::kLogFirstExecution,
+ Runtime::kFunctionFirstExecution);
+ TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
+ OptimizationMarker::kCompileOptimized,
+ Runtime::kCompileOptimized_NotConcurrent);
+ TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
+ OptimizationMarker::kCompileOptimizedConcurrent,
+ Runtime::kCompileOptimized_Concurrent);
+
+ // Otherwise, the marker is InOptimizationQueue, so fall through hoping
+ // that an interrupt will eventually update the slot with optimized code.
+ if (FLAG_debug_code) {
+ __ CmpTagged(
+ optimization_marker,
+ Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
+ __ Assert(eq, AbortReason::kExpectedOptimizationSentinel);
}
-
- // Fall-through if the optimized code cell is clear and there is no
- // optimization marker.
- __ bind(&fallthrough);
}
// Advance the current bytecode offset. This simulates what all bytecode
@@ -1129,19 +1099,19 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
__ Cmp(bytecode, Operand(0x3));
__ B(hi, &process_bytecode);
__ Tst(bytecode, Operand(0x1));
- __ B(ne, &extra_wide);
-
- // Load the next bytecode and update table to the wide scaled table.
+ // The code to load the next bytecode is common to both wide and extra wide.
+ // We can hoist them up here since they do not modify the flags after Tst.
__ Add(bytecode_offset, bytecode_offset, Operand(1));
__ Ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset));
+ __ B(ne, &extra_wide);
+
+ // Update table to the wide scaled table.
__ Add(bytecode_size_table, bytecode_size_table,
Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
__ B(&process_bytecode);
__ Bind(&extra_wide);
- // Load the next bytecode and update table to the extra wide scaled table.
- __ Add(bytecode_offset, bytecode_offset, Operand(1));
- __ Ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset));
+ // Update table to the extra wide scaled table.
__ Add(bytecode_size_table, bytecode_size_table,
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
@@ -1211,7 +1181,20 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
- MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, x7, x4);
+ Register optimized_code_entry = x7;
+ __ LoadAnyTaggedField(
+ optimized_code_entry,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
+
+ // Check if the optimized code slot is not empty.
+ Label optimized_code_slot_not_empty;
+ __ CompareTaggedAndBranch(optimized_code_entry,
+ Operand(Smi::FromEnum(OptimizationMarker::kNone)),
+ ne, &optimized_code_slot_not_empty);
+
+ Label not_optimized;
+ __ bind(&not_optimized);
// Increment invocation count for the function.
// MaybeTailCallOptimizedCodeSlot preserves feedback_vector, so safe to reuse
@@ -1248,13 +1231,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Push(kInterpreterBytecodeArrayRegister, x0);
// Allocate the local and temporary register file on the stack.
+ Label stack_overflow;
{
// Load frame size from the BytecodeArray object.
__ Ldr(w11, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kFrameSizeOffset));
// Do a stack check to ensure we don't go over the limit.
- Label ok;
__ Sub(x10, sp, Operand(x11));
{
UseScratchRegisterScope temps(masm);
@@ -1262,21 +1245,19 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LoadRealStackLimit(masm, scratch);
__ Cmp(x10, scratch);
}
- __ B(hs, &ok);
- __ CallRuntime(Runtime::kThrowStackOverflow);
- __ Bind(&ok);
+ __ B(lo, &stack_overflow);
// If ok, push undefined as the initial value for all register file entries.
// Note: there should always be at least one stack slot for the return
// register in the register file.
Label loop_header;
- __ LoadRoot(x10, RootIndex::kUndefinedValue);
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
__ Lsr(x11, x11, kSystemPointerSizeLog2);
// Round up the number of registers to a multiple of 2, to align the stack
// to 16 bytes.
__ Add(x11, x11, 1);
__ Bic(x11, x11, 1);
- __ PushMultipleTimes(x10, x11);
+ __ PushMultipleTimes(kInterpreterAccumulatorRegister, x11);
__ Bind(&loop_header);
}
@@ -1291,8 +1272,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Str(x3, MemOperand(fp, x10, LSL, kSystemPointerSizeLog2));
__ Bind(&no_incoming_new_target_or_generator_register);
- // Load accumulator with undefined.
- __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+ // The accumulator is already loaded with undefined.
// Load the dispatch table into a register and dispatch to the bytecode
// handler at the current bytecode offset.
@@ -1315,9 +1295,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Get bytecode array and bytecode offset from the stack frame.
__ Ldr(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
- __ Ldr(kInterpreterBytecodeOffsetRegister,
- MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
- __ SmiUntag(kInterpreterBytecodeOffsetRegister);
+ __ SmiUntag(kInterpreterBytecodeOffsetRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
// Either return, or advance to the next bytecode and dispatch.
Label do_return;
@@ -1333,9 +1312,28 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, x2);
__ Ret();
+ __ bind(&optimized_code_slot_not_empty);
+ Label maybe_has_optimized_code;
+ // Check if optimized code marker is actually a weak reference to the
+ // optimized code as opposed to an optimization marker.
+ __ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code);
+ MaybeOptimizeCode(masm, feedback_vector, optimized_code_entry);
+ // Fall through if there's no runnable optimized code.
+ __ jmp(&not_optimized);
+
+ __ bind(&maybe_has_optimized_code);
+ // Load code entry from the weak reference, if it was cleared, resume
+ // execution of unoptimized code.
+ __ LoadWeakValue(optimized_code_entry, optimized_code_entry, &not_optimized);
+ TailCallOptimizedCodeSlot(masm, optimized_code_entry, x4);
+
__ bind(&compile_lazy);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
__ Unreachable(); // Should not return.
+
+ __ bind(&stack_overflow);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ __ Unreachable(); // Should not return.
}
static void Generate_InterpreterPushArgs(MacroAssembler* masm,
@@ -1543,9 +1541,8 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
}
// Get the target bytecode offset from the frame.
- __ Ldr(kInterpreterBytecodeOffsetRegister,
- MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
- __ SmiUntag(kInterpreterBytecodeOffsetRegister);
+ __ SmiUntag(kInterpreterBytecodeOffsetRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
// Dispatch to the target bytecode.
__ Ldrb(x23, MemOperand(kInterpreterBytecodeArrayRegister,
@@ -1560,9 +1557,8 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
// Get bytecode array and bytecode offset from the stack frame.
__ ldr(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
- __ ldr(kInterpreterBytecodeOffsetRegister,
- MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
- __ SmiUntag(kInterpreterBytecodeOffsetRegister);
+ __ SmiUntag(kInterpreterBytecodeOffsetRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
// Load the current bytecode.
__ Ldrb(x1, MemOperand(kInterpreterBytecodeArrayRegister,
@@ -1633,7 +1629,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// Set flags for determining the value of smi-tagged argc.
// lt => 1, eq => 2, gt => 3.
- __ Cmp(argc, Smi::FromInt(2));
+ __ CmpTagged(argc, Smi::FromInt(2));
__ B(gt, &three_args);
// One or two arguments.
@@ -1769,20 +1765,14 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
}
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
- // Lookup the function in the JavaScript frame.
- __ Ldr(x0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ Ldr(x0, MemOperand(x0, JavaScriptFrameConstants::kFunctionOffset));
-
{
FrameScope scope(masm, StackFrame::INTERNAL);
- // Pass function as argument.
- __ PushArgument(x0);
__ CallRuntime(Runtime::kCompileForOnStackReplacement);
}
// If the code object is null, just return to the caller.
Label skip;
- __ CompareAndBranch(x0, Smi::zero(), ne, &skip);
+ __ CompareTaggedAndBranch(x0, Smi::zero(), ne, &skip);
__ Ret();
__ Bind(&skip);
@@ -1878,8 +1868,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// 3. Tail call with no arguments if argArray is null or undefined.
Label no_arguments;
- __ Cmp(arg_array, null_value);
- __ Ccmp(arg_array, undefined_value, ZFlag, ne);
+ __ CmpTagged(arg_array, null_value);
+ __ CcmpTagged(arg_array, undefined_value, ZFlag, ne);
__ B(eq, &no_arguments);
// 4a. Apply the receiver to the given argArray.
@@ -2261,7 +2251,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ Bind(&loop);
__ Sub(len, len, 1);
__ LoadAnyTaggedField(scratch, MemOperand(src, kTaggedSize, PostIndex));
- __ Cmp(scratch, the_hole_value);
+ __ CmpTagged(scratch, the_hole_value);
__ Csel(scratch, scratch, undefined_value, ne);
__ Poke(scratch, Operand(len, LSL, kSystemPointerSizeLog2));
__ Cbnz(len, &loop);
@@ -2319,7 +2309,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ Ldr(args_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ Ldr(x4, MemOperand(args_fp,
CommonFrameConstants::kContextOrFrameTypeOffset));
- __ Cmp(x4, StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR));
+ __ CmpTagged(x4, StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR));
__ B(eq, &arguments_adaptor);
{
__ Ldr(scratch,
@@ -2626,7 +2616,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// -- x1 : the target to call (can be any Object).
// -----------------------------------
- Label non_callable, non_function, non_smi;
+ Label non_callable, non_smi;
__ JumpIfSmi(x1, &non_callable);
__ Bind(&non_smi);
__ CompareObjectType(x1, x4, x5, JS_FUNCTION_TYPE);
@@ -2642,12 +2632,10 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// Check if target is a proxy and call CallProxy external builtin
__ Cmp(x5, JS_PROXY_TYPE);
- __ B(ne, &non_function);
- __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET);
+ __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq);
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
- __ Bind(&non_function);
// Overwrite the original receiver with the (original) target.
__ Poke(x1, Operand(x0, LSL, kXRegSizeLog2));
// Let the "call_as_function_delegate" take care of the rest.
@@ -2712,7 +2700,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// Patch new.target to [[BoundTargetFunction]] if new.target equals target.
{
Label done;
- __ Cmp(x1, x3);
+ __ CmpTagged(x1, x3);
__ B(ne, &done);
__ LoadTaggedPointerField(
x3, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
diff --git a/deps/v8/src/builtins/base.tq b/deps/v8/src/builtins/base.tq
index aa5d4cc50a..065cd08e4c 100644
--- a/deps/v8/src/builtins/base.tq
+++ b/deps/v8/src/builtins/base.tq
@@ -90,11 +90,28 @@ type bool generates 'TNode<BoolT>' constexpr 'bool';
type bint generates 'TNode<BInt>' constexpr 'BInt';
type string constexpr 'const char*';
-type NameDictionary extends FixedArray;
+// The HashTable inheritance hierarchy doesn't actually look like this in C++
+// because it uses some class templates that we can't yet (and may never)
+// express in Torque, but this is the expected organization of instance types.
+@abstract @dirtyInstantiatedAbstractClass
+extern class HashTable extends FixedArray generates 'TNode<FixedArray>';
+extern class OrderedHashMap extends HashTable;
+extern class OrderedHashSet extends HashTable;
+extern class OrderedNameDictionary extends HashTable;
+extern class NameDictionary extends HashTable;
+extern class GlobalDictionary extends HashTable;
+extern class SimpleNumberDictionary extends HashTable;
+extern class StringTable extends HashTable;
+extern class EphemeronHashTable extends HashTable;
+type ObjectHashTable extends HashTable
+ generates 'TNode<ObjectHashTable>';
+extern class NumberDictionary extends HashTable;
type RawPtr generates 'TNode<RawPtrT>' constexpr 'void*';
-type Code extends HeapObject generates 'TNode<Code>';
+extern class Code extends HeapObject;
type BuiltinPtr extends Smi generates 'TNode<BuiltinPtr>';
+
+@abstract
extern class Context extends HeapObject {
length: Smi;
scope_info: ScopeInfo;
@@ -102,10 +119,27 @@ extern class Context extends HeapObject {
extension: Object;
native_context: Object;
}
-type NativeContext extends Context generates 'TNode<NativeContext>';
+extern class AwaitContext extends Context generates 'TNode<Context>';
+extern class BlockContext extends Context generates 'TNode<Context>';
+extern class CatchContext extends Context generates 'TNode<Context>';
+extern class DebugEvaluateContext extends Context
+ generates 'TNode<Context>';
+extern class EvalContext extends Context generates 'TNode<Context>';
+extern class FunctionContext extends Context generates 'TNode<Context>';
+extern class ModuleContext extends Context generates 'TNode<Context>';
+extern class NativeContext extends Context;
+extern class ScriptContext extends Context generates 'TNode<Context>';
+extern class WithContext extends Context generates 'TNode<Context>';
+
+@generateCppClass
+@abstract
+extern class PrimitiveHeapObject extends HeapObject {
+}
@generateCppClass
-extern class Oddball extends HeapObject {
+@apiExposedInstanceTypeValue(0x43)
+@highestInstanceTypeWithinParentClassRange
+extern class Oddball extends PrimitiveHeapObject {
to_number_raw: float64;
to_string: String;
to_number: Number;
@@ -113,13 +147,13 @@ extern class Oddball extends HeapObject {
kind: Smi;
}
-extern class HeapNumber extends HeapObject { value: float64; }
+extern class HeapNumber extends PrimitiveHeapObject { value: float64; }
type Number = Smi | HeapNumber;
type Numeric = Number | BigInt;
@abstract
@generateCppClass
-extern class Name extends HeapObject {
+extern class Name extends PrimitiveHeapObject {
hash_field: uint32;
}
// This is the same as Name, but with the information that there are no other
@@ -137,6 +171,7 @@ type PrivateSymbol extends Symbol;
@abstract
@generateCppClass
+@reserveBitsInInstanceType(6)
extern class String extends Name {
length: int32;
}
@@ -222,20 +257,35 @@ extern class FixedArrayBase extends HeapObject {
length: Smi;
}
-extern class FixedArray extends FixedArrayBase { objects[length]: Object; }
+@abstract
+@dirtyInstantiatedAbstractClass
+extern class FixedArray extends FixedArrayBase {
+ objects[length]: Object;
+}
extern class FixedDoubleArray extends FixedArrayBase {
floats[length]: float64;
}
-extern class WeakFixedArray extends HeapObject { length: Smi; }
+@abstract
+@dirtyInstantiatedAbstractClass
+extern class WeakFixedArray extends HeapObject {
+ length: Smi;
+}
extern class ByteArray extends FixedArrayBase {}
+@hasSameInstanceTypeAsParent
+extern class ArrayList extends FixedArray {
+}
+
+extern class ObjectBoilerplateDescription extends FixedArray;
+extern class ClosureFeedbackCellArray extends FixedArray;
+extern class ScriptContextTable extends FixedArray;
+
type LayoutDescriptor extends ByteArray
generates 'TNode<LayoutDescriptor>';
-type TransitionArray extends WeakFixedArray
- generates 'TNode<TransitionArray>';
+extern class TransitionArray extends WeakFixedArray;
type InstanceType extends uint16 constexpr 'v8::internal::InstanceType';
@@ -282,6 +332,7 @@ extern class SourcePositionTableWithFrameCache extends Struct {
// We make this class abstract because it is missing the variable-sized part,
// which is still impossible to express in Torque.
@abstract
+@dirtyInstantiatedAbstractClass
extern class DescriptorArray extends HeapObject {
number_of_all_descriptors: uint16;
number_of_descriptors: uint16;
@@ -327,7 +378,9 @@ intrinsic
}
}
+// JSReceiver corresponds to objects in the JS sense.
@abstract
+@highestInstanceTypeWithinParentClassRange
extern class JSReceiver extends HeapObject {
properties_or_hash: FixedArrayBase | PropertyArray | Smi;
}
@@ -337,6 +390,8 @@ type Constructor extends JSReceiver;
@abstract
@dirtyInstantiatedAbstractClass
@generateCppClass
+@apiExposedInstanceTypeValue(0x421)
+@highestInstanceTypeWithinParentClassRange
extern class JSObject extends JSReceiver {
// [elements]: The elements (properties with names that are integers).
//
@@ -368,6 +423,18 @@ macro NewJSObject(implicit context: Context)(): JSObject {
};
}
+@abstract
+@generateCppClass
+@lowestInstanceTypeWithinParentClassRange
+extern class JSCustomElementsObject extends JSObject {
+}
+
+@abstract
+@generateCppClass
+@lowestInstanceTypeWithinParentClassRange
+extern class JSSpecialObject extends JSCustomElementsObject {
+}
+
extern macro HasPrototypeSlot(JSFunction): bool;
macro GetDerivedMap(implicit context: Context)(
@@ -401,7 +468,8 @@ macro AllocateFastOrSlowJSObjectFromMap(implicit context: Context)(map: Map):
map, properties, kEmptyFixedArray, kNone, kWithSlackTracking);
}
-extern class JSFunction extends JSObject {
+@highestInstanceTypeWithinParentClassRange
+extern class JSFunction extends JSFunctionOrBoundFunction {
shared_function_info: SharedFunctionInfo;
context: Context;
feedback_cell: FeedbackCell;
@@ -419,6 +487,7 @@ extern class JSProxy extends JSReceiver {
// Just a starting shape for JSObject; properties can move after initialization.
@noVerifier
+@hasSameInstanceTypeAsParent
extern class JSProxyRevocableResult extends JSObject {
proxy: JSAny;
revoke: JSAny;
@@ -436,14 +505,14 @@ macro NewJSProxyRevocableResult(implicit context: Context)(
}
@generateCppClass
-extern class JSGlobalProxy extends JSObject {
+extern class JSGlobalProxy extends JSSpecialObject {
// [native_context]: the owner native context of this global proxy object.
// It is null value if this object is not used by any context.
native_context: Object;
}
@generateCppClass
-extern class JSPrimitiveWrapper extends JSObject {
+extern class JSPrimitiveWrapper extends JSCustomElementsObject {
value: JSAny;
}
@@ -531,8 +600,6 @@ extern class CallHandlerInfo extends Struct {
data: Object;
}
-type ObjectHashTable extends FixedArray;
-
@abstract
extern class Module extends HeapObject {
exports: ObjectHashTable;
@@ -569,9 +636,12 @@ extern class SourceTextModule extends Module {
// Lazily initialized on first access. It's the hole before first access and
// a JSObject afterwards.
import_meta: TheHole | JSObject;
-
+ async_parent_modules: ArrayList;
+ top_level_capability: JSPromise | Undefined;
dfs_index: Smi;
dfs_ancestor_index: Smi;
+ pending_async_dependencies: Smi;
+ flags: Smi;
}
@generateCppClass
@@ -583,7 +653,8 @@ extern class SyntheticModule extends Module {
@abstract
@generateCppClass
-extern class JSModuleNamespace extends JSObject {
+@dirtyInstantiatedAbstractClass
+extern class JSModuleNamespace extends JSSpecialObject {
module: Module;
}
@@ -606,6 +677,7 @@ extern class JSWeakMap extends JSWeakCollection {
}
@generateCppClass
+@abstract
extern class JSCollectionIterator extends JSObject {
// The backing hash table mapping keys to values.
table: Object;
@@ -613,6 +685,20 @@ extern class JSCollectionIterator extends JSObject {
index: Object;
}
+@abstract extern class JSMapIterator extends JSCollectionIterator;
+extern class JSMapKeyIterator extends JSMapIterator
+ generates 'TNode<JSMapIterator>';
+extern class JSMapKeyValueIterator extends JSMapIterator
+ generates 'TNode<JSMapIterator>';
+extern class JSMapValueIterator extends JSMapIterator
+ generates 'TNode<JSMapIterator>';
+
+@abstract extern class JSSetIterator extends JSCollectionIterator;
+extern class JSSetKeyValueIterator extends JSSetIterator
+ generates 'TNode<JSSetIterator>';
+extern class JSSetValueIterator extends JSSetIterator
+ generates 'TNode<JSSetIterator>';
+
extern class JSMessageObject extends JSObject {
// Tagged fields.
message_type: Smi;
@@ -656,7 +742,7 @@ extern class Script extends Struct {
line_ends: Object;
id: Smi;
eval_from_shared_or_wrapped_arguments: Object;
- eval_from_position: Smi;
+ eval_from_position: Smi | Foreign; // Smi or Managed<wasm::NativeModule>
shared_function_infos: Object;
flags: Smi;
source_url: Object;
@@ -669,12 +755,13 @@ extern class EmbedderDataArray extends HeapObject {
length: Smi;
}
-type ScopeInfo extends HeapObject generates 'TNode<ScopeInfo>';
+extern class ScopeInfo extends FixedArray;
+@generateCppClass
extern class PreparseData extends HeapObject {
// TODO(v8:8983): Add declaration for variable-sized region.
data_length: int32;
- inner_length: int32;
+ children_length: int32;
}
extern class InterpreterData extends Struct {
@@ -697,13 +784,36 @@ extern class SharedFunctionInfo extends HeapObject {
@if(V8_SFI_HAS_UNIQUE_ID) unique_id: int32;
}
+@abstract
+@generateCppClass
+extern class UncompiledData extends HeapObject {
+ inferred_name: String;
+ start_position: int32;
+ end_position: int32;
+}
+
+@generateCppClass
+extern class UncompiledDataWithoutPreparseData extends UncompiledData {
+}
+
+@generateCppClass
+extern class UncompiledDataWithPreparseData extends UncompiledData {
+ preparse_data: PreparseData;
+}
+
+@abstract
+@generateCppClass
+@highestInstanceTypeWithinParentClassRange
+extern class JSFunctionOrBoundFunction extends JSObject {
+}
+
@generateCppClass
-extern class JSBoundFunction extends JSObject {
+extern class JSBoundFunction extends JSFunctionOrBoundFunction {
// The wrapped function object.
bound_target_function: Callable;
// The value that is always passed as the this value when calling the wrapped
// function.
- bound_this: JSAny;
+ bound_this: JSAny | SourceTextModule;
// A list of values whose elements are used as the first arguments to any call
// to the wrapped function.
bound_arguments: FixedArray;
@@ -728,8 +838,6 @@ extern operator '.length_intptr' macro LoadAndUntagFixedArrayBaseLength(
FixedArrayBase): intptr;
type SloppyArgumentsElements extends FixedArray;
-type NumberDictionary extends HeapObject
- generates 'TNode<NumberDictionary>';
extern class FreeSpace extends HeapObject {
size: Smi;
@@ -763,6 +871,8 @@ const PROXY_REVOCABLE_RESULT_MAP_INDEX: constexpr NativeContextSlot
generates 'Context::PROXY_REVOCABLE_RESULT_MAP_INDEX';
const REFLECT_APPLY_INDEX: constexpr NativeContextSlot
generates 'Context::REFLECT_APPLY_INDEX';
+const REGEXP_FUNCTION_INDEX: constexpr NativeContextSlot
+ generates 'Context::REGEXP_FUNCTION_INDEX';
const REGEXP_LAST_MATCH_INFO_INDEX: constexpr NativeContextSlot
generates 'Context::REGEXP_LAST_MATCH_INFO_INDEX';
const INITIAL_STRING_ITERATOR_MAP_INDEX: constexpr NativeContextSlot
@@ -834,7 +944,7 @@ extern class JSDate extends JSObject {
cache_stamp: Undefined | Smi | NaN;
}
-extern class JSGlobalObject extends JSObject {
+extern class JSGlobalObject extends JSSpecialObject {
native_context: NativeContext;
global_proxy: JSGlobalProxy;
}
@@ -847,9 +957,12 @@ extern class JSAsyncFromSyncIterator extends JSObject {
next: Object;
}
+@generateCppClass
extern class JSStringIterator extends JSObject {
+ // The [[IteratedString]] inobject property.
string: String;
- next_index: Smi;
+ // The [[StringIteratorNextIndex]] inobject property.
+ index: Smi;
}
@abstract
@@ -885,7 +998,7 @@ extern class FunctionTemplateRareData extends Struct {
@generateCppClass
extern class FunctionTemplateInfo extends TemplateInfo {
// Handler invoked when calling an instance of this FunctionTemplateInfo.
- // Either CallInfoHandler or Undefined.
+ // Either CallHandlerInfo or Undefined.
call_code: Object;
class_name: Object;
// If the signature is a FunctionTemplateInfo it is used to check whether the
@@ -946,7 +1059,10 @@ const UTF16:
const UTF32:
constexpr UnicodeEncoding generates 'UnicodeEncoding::UTF32';
-extern class Foreign extends HeapObject { foreign_address: RawPtr; }
+@apiExposedInstanceTypeValue(0x46)
+extern class Foreign extends HeapObject {
+ foreign_address: RawPtr;
+}
@generateCppClass
extern class InterceptorInfo extends Struct {
@@ -985,6 +1101,7 @@ extern class Cell extends HeapObject {
value: Object;
}
+@abstract
extern class DataHandler extends Struct {
smi_handler: Smi | Code;
validity_cell: Smi | Cell;
@@ -996,6 +1113,9 @@ extern class DataHandler extends Struct {
@noVerifier weak data_3: Object;
}
+extern class LoadHandler extends DataHandler;
+extern class StoreHandler extends DataHandler;
+
@abstract
@dirtyInstantiatedAbstractClass
@generateCppClass
@@ -1087,7 +1207,7 @@ extern class ClassPositions extends Struct {
end: Smi;
}
-type WasmInstanceObject extends JSObject;
+extern class WasmInstanceObject extends JSObject;
extern class WasmExportedFunctionData extends Struct {
wrapper_code: Code;
@@ -1129,6 +1249,7 @@ extern class WasmIndirectFunctionTable extends Struct {
extern class WasmDebugInfo extends Struct {
instance: WasmInstanceObject;
interpreter_handle: Foreign | Undefined;
+ interpreter_reference_stack: Cell;
locals_names: FixedArray | Undefined;
c_wasm_entries: FixedArray | Undefined;
c_wasm_entry_map: Foreign | Undefined; // Managed<wasm::SignatureMap>
@@ -1305,9 +1426,6 @@ const kStrictReadOnlyProperty: constexpr MessageTemplate
const kString: constexpr PrimitiveType
generates 'PrimitiveType::kString';
-const kExternalPointerForOnHeapArray: constexpr RawPtr
- generates 'JSTypedArray::ExternalPointerForOnHeapArray()';
-
const kNameDictionaryInitialCapacity:
constexpr int32 generates 'NameDictionary::kInitialCapacity';
@@ -1332,6 +1450,7 @@ extern macro EmptyStringConstant(): EmptyString;
extern macro LengthStringConstant(): String;
extern macro NanConstant(): NaN;
extern macro IteratorSymbolConstant(): Symbol;
+extern macro MatchSymbolConstant(): Symbol;
const TheHole: TheHole = TheHoleConstant();
const Null: Null = NullConstant();
@@ -1443,15 +1562,30 @@ RegExpBuiltinsAssembler::FastStoreLastIndex(FastJSRegExp, Smi): void;
@hasSameInstanceTypeAsParent
extern class JSRegExpResult extends JSArray {
+ // In-object properties:
+ // The below fields are externally exposed.
index: JSAny;
input: JSAny;
groups: JSAny;
+
+ // The below fields are for internal use only.
+ cached_indices_or_match_info: JSRegExpResultIndices | RegExpMatchInfo;
+ names: FixedArray | Undefined;
}
+@hasSameInstanceTypeAsParent
+extern class JSRegExpResultIndices extends JSArray {
+ // In-object properties:
+ // The groups field is externally exposed.
+ groups: JSAny;
+}
+
+transient type FastJSRegExpResult extends JSRegExpResult;
+
@generateCppClass
extern class JSRegExpStringIterator extends JSObject {
// The [[IteratingRegExp]] internal property.
- iterating_reg_exp: JSAny;
+ iterating_reg_exp: JSReceiver;
// The [[IteratedString]] internal property.
iterated_string: String;
flags: Smi;
@@ -1493,21 +1627,33 @@ extern class AccessorInfo extends Struct {
data: Object;
}
+@generateCppClass
extern class AccessorPair extends Struct {
getter: Object;
setter: Object;
}
-extern class BreakPoint extends Tuple2 {}
-extern class BreakPointInfo extends Tuple2 {}
+@hasSameInstanceTypeAsParent
+extern class BreakPoint extends Tuple2 {
+}
+@hasSameInstanceTypeAsParent
+extern class BreakPointInfo extends Tuple2 {
+}
type CoverageInfo extends FixedArray;
+@generateCppClass
extern class DebugInfo extends Struct {
- shared_function_info: SharedFunctionInfo;
+ shared: SharedFunctionInfo;
debugger_hints: Smi;
+ // Script field from shared function info.
script: Undefined | Script;
+ // The original uninstrumented bytecode array for functions with break
+ // points - the instrumented bytecode is held in the shared function info.
original_bytecode_array: Undefined | BytecodeArray;
+ // The debug instrumented bytecode array for functions with break points
+ // - also pointed to by the shared function info.
debug_bytecode_array: Undefined | BytecodeArray;
+ // Fixed array holding status information for each active break point.
break_points: FixedArray;
flags: Smi;
coverage_info: CoverageInfo | Undefined;
@@ -1527,12 +1673,15 @@ extern class FeedbackVector extends HeapObject {
padding: uint32;
}
+@generateCppClass
extern class FeedbackCell extends Struct {
value: Undefined | FeedbackVector | FixedArray;
interrupt_budget: int32;
}
-type AllocationSite extends Struct;
+extern class FeedbackMetadata extends HeapObject;
+
+extern class AllocationSite extends Struct;
extern class AllocationMemento extends Struct {
allocation_site: AllocationSite;
}
@@ -1541,9 +1690,7 @@ extern class WasmModuleObject extends JSObject {
native_module: Foreign;
export_wrappers: FixedArray;
script: Script;
- weak_instance_list: WeakArrayList;
asm_js_offset_table: ByteArray | Undefined;
- break_point_infos: FixedArray | Undefined;
}
extern class WasmTableObject extends JSObject {
@@ -1590,22 +1737,35 @@ extern class JSFinalizationGroup extends JSObject {
flags: Smi;
}
+@generateCppClass
extern class JSFinalizationGroupCleanupIterator extends JSObject {
finalization_group: JSFinalizationGroup;
}
+@generateCppClass
extern class WeakCell extends HeapObject {
finalization_group: Undefined | JSFinalizationGroup;
target: Undefined | JSReceiver;
holdings: Object;
+
+ // For storing doubly linked lists of WeakCells in JSFinalizationGroup's
+ // "active_cells" and "cleared_cells" lists.
prev: Undefined | WeakCell;
next: Undefined | WeakCell;
+
+ // For storing doubly linked lists of WeakCells per key in
+ // JSFinalizationGroup's key-based hashmap. WeakCell also needs to know its
+ // key, so that we can remove the key from the key_map when we remove the last
+ // WeakCell associated with it.
key: Object;
key_list_prev: Undefined | WeakCell;
key_list_next: Undefined | WeakCell;
}
-extern class JSWeakRef extends JSObject { target: Undefined | JSReceiver; }
+@generateCppClass
+extern class JSWeakRef extends JSObject {
+ target: Undefined | JSReceiver;
+}
extern class BytecodeArray extends FixedArrayBase {
// TODO(v8:8983): bytecode array object sizes vary based on their contents.
@@ -1620,6 +1780,29 @@ extern class BytecodeArray extends FixedArrayBase {
bytecode_age: int8;
}
+extern class Filler extends HeapObject generates 'TNode<HeapObject>';
+extern class CodeDataContainer extends HeapObject;
+@abstract
+extern class SmallOrderedHashTable extends HeapObject
+ generates 'TNode<HeapObject>';
+extern class SmallOrderedHashMap extends SmallOrderedHashTable;
+extern class SmallOrderedHashSet extends SmallOrderedHashTable;
+extern class SmallOrderedNameDictionary extends SmallOrderedHashTable;
+
+// Various logical subclasses of JSObject, which have their own instance types
+// but not their own class definitions:
+
+// Like JSObject, but created from API function.
+@apiExposedInstanceTypeValue(0x420)
+extern class JSApiObject extends JSObject generates 'TNode<JSObject>';
+// Like JSApiObject, but requires access checks and/or has interceptors.
+@apiExposedInstanceTypeValue(0x410)
+extern class JSSpecialApiObject extends JSSpecialObject
+ generates 'TNode<JSSpecialObject>';
+extern class JSContextExtensionObject extends JSObject
+ generates 'TNode<JSObject>';
+extern class JSError extends JSObject generates 'TNode<JSObject>';
+
extern macro Is64(): constexpr bool;
extern macro SelectBooleanConstant(bool): Boolean;
@@ -1657,6 +1840,8 @@ extern transitioning builtin HasProperty(implicit context: Context)(
extern transitioning macro HasProperty_Inline(implicit context: Context)(
JSReceiver, JSAny): Boolean;
extern builtin LoadIC(Context, JSAny, JSAny, Smi, FeedbackVector): JSAny;
+extern macro CollectCallFeedback(
+ JSAny, Context, Undefined | FeedbackVector, uintptr);
extern macro ThrowRangeError(implicit context: Context)(
constexpr MessageTemplate): never;
@@ -1674,6 +1859,10 @@ extern macro ThrowTypeError(implicit context: Context)(
constexpr MessageTemplate, Object, Object, Object): never;
extern transitioning runtime ThrowTypeErrorIfStrict(implicit context: Context)(
Smi, Object, Object): void;
+extern transitioning runtime ThrowCalledNonCallable(implicit context: Context)(
+ JSAny): never;
+extern transitioning runtime ThrowSymbolIteratorInvalid(
+ implicit context: Context)(): never;
extern transitioning macro ThrowIfNotJSReceiver(implicit context: Context)(
JSAny, constexpr MessageTemplate, constexpr string): void;
@@ -2232,6 +2421,14 @@ Cast<JSStringIterator>(o: HeapObject): JSStringIterator
return HeapObjectToJSStringIterator(o) otherwise CastError;
}
+Cast<JSRegExpStringIterator>(o: HeapObject): JSRegExpStringIterator
+ labels CastError {
+ if (IsJSRegExpStringIterator(o)) {
+ return %RawDownCast<JSRegExpStringIterator>(o);
+ }
+ goto CastError;
+}
+
Cast<JSTypedArray>(o: HeapObject): JSTypedArray
labels CastError {
if (IsJSTypedArray(o)) return %RawDownCast<JSTypedArray>(o);
@@ -2354,12 +2551,25 @@ Cast<JSRegExp>(o: HeapObject): JSRegExp
goto CastError;
}
+Cast<FastJSRegExpResult>(implicit context: Context)(o: HeapObject):
+ FastJSRegExpResult
+ labels CastError {
+ if (regexp::IsFastRegExpResult(o)) return %RawDownCast<FastJSRegExpResult>(o);
+ goto CastError;
+}
+
Cast<Map>(implicit context: Context)(o: HeapObject): Map
labels CastError {
if (IsMap(o)) return %RawDownCast<Map>(o);
goto CastError;
}
+Cast<FeedbackVector>(implicit context: Context)(o: HeapObject): FeedbackVector
+ labels CastError {
+ if (IsFeedbackVector(o)) return %RawDownCast<FeedbackVector>(o);
+ goto CastError;
+}
+
Cast<JSPrimitiveWrapper>(o: HeapObject): JSPrimitiveWrapper
labels CastError {
if (IsJSPrimitiveWrapper(o)) return %RawDownCast<JSPrimitiveWrapper>(o);
@@ -2513,6 +2723,7 @@ extern macro Signed(RawPtr): intptr;
extern macro TruncateIntPtrToInt32(intptr): int32;
extern macro SmiTag(intptr): Smi;
extern macro SmiFromInt32(int32): Smi;
+extern macro SmiFromUint32(uint32): Smi;
extern macro SmiUntag(Smi): intptr;
extern macro SmiToInt32(Smi): int32;
extern macro RoundIntPtrToFloat64(intptr): float64;
@@ -2556,6 +2767,7 @@ extern macro BitcastWordToTaggedSigned(uintptr): Smi;
extern macro BitcastWordToTagged(intptr): Object;
extern macro BitcastWordToTagged(uintptr): Object;
extern macro BitcastTaggedToWord(Tagged): intptr;
+extern macro BitcastTaggedToWordForTagAndSmiBits(Tagged): intptr;
intrinsic %FromConstexpr<To: type, From: type>(b: From): To;
macro FromConstexpr<To: type, From: type>(o: From): To;
@@ -2674,7 +2886,7 @@ Convert<Number, uint32>(ui: uint32): Number {
return ChangeUint32ToTagged(ui);
}
Convert<Smi, uint32>(ui: uint32): Smi {
- return SmiFromInt32(Signed(ui));
+ return SmiFromUint32(ui);
}
Convert<uintptr, uint32>(ui: uint32): uintptr {
return ChangeUint32ToWord(ui);
@@ -2811,8 +3023,7 @@ extern macro IsMockArrayBufferAllocatorFlag(): bool;
extern macro IsPrototypeTypedArrayPrototype(implicit context: Context)(Map):
bool;
-extern operator '.data_ptr' macro LoadJSTypedArrayBackingStore(JSTypedArray):
- RawPtr;
+extern operator '.data_ptr' macro LoadJSTypedArrayDataPtr(JSTypedArray): RawPtr;
extern operator '.elements_kind' macro LoadMapElementsKind(Map): ElementsKind;
extern operator '.elements_kind' macro LoadElementsKind(JSTypedArray):
@@ -2879,8 +3090,6 @@ extern macro LoadConstructorOrBackPointer(Map): Object;
extern macro BasicLoadNumberDictionaryElement(NumberDictionary, intptr): JSAny
labels NotData, IfHole;
-extern macro BasicStoreNumberDictionaryElement(NumberDictionary, intptr, JSAny)
- labels NotData, IfHole, ReadOnly;
extern macro IsFastElementsKind(ElementsKind): bool;
extern macro IsDoubleElementsKind(ElementsKind): bool;
@@ -3255,9 +3464,11 @@ extern macro PerformStackCheck(implicit context: Context)(): void;
extern macro IsCallable(HeapObject): bool;
extern macro IsConstructor(HeapObject): bool;
+extern macro IsFeedbackVector(HeapObject): bool;
extern macro IsJSArray(HeapObject): bool;
extern macro IsJSProxy(HeapObject): bool;
extern macro IsJSRegExp(HeapObject): bool;
+extern macro IsJSRegExpStringIterator(HeapObject): bool;
extern macro IsMap(HeapObject): bool;
extern macro IsJSFunction(HeapObject): bool;
extern macro IsJSObject(HeapObject): bool;
diff --git a/deps/v8/src/builtins/bigint.tq b/deps/v8/src/builtins/bigint.tq
index a1b1cb6780..f0409ad23d 100644
--- a/deps/v8/src/builtins/bigint.tq
+++ b/deps/v8/src/builtins/bigint.tq
@@ -7,7 +7,8 @@
// TODO(nicohartmann): Discuss whether types used by multiple builtins should be
// in global namespace
@noVerifier
-extern class BigIntBase extends HeapObject generates 'TNode<BigInt>' {
+extern class BigIntBase extends PrimitiveHeapObject
+ generates 'TNode<BigInt>' {
}
type BigInt extends BigIntBase;
@@ -44,9 +45,6 @@ namespace bigint {
MutableBigInt, intptr, uintptr): void;
extern macro CodeStubAssembler::LoadBigIntDigit(BigIntBase, intptr): uintptr;
- @export // Silence unused warning.
- // TODO(szuend): Remove @export once macros that are only used in
- // asserts are no longer detected as unused.
macro IsCanonicalized(bigint: BigIntBase): bool {
const length = ReadBigIntLength(bigint);
diff --git a/deps/v8/src/builtins/builtins-arguments-gen.cc b/deps/v8/src/builtins/builtins-arguments-gen.cc
index c4399175e9..fb6169adf8 100644
--- a/deps/v8/src/builtins/builtins-arguments-gen.cc
+++ b/deps/v8/src/builtins/builtins-arguments-gen.cc
@@ -17,38 +17,34 @@
namespace v8 {
namespace internal {
-using Node = compiler::Node;
-
-std::tuple<Node*, Node*, Node*>
-ArgumentsBuiltinsAssembler::AllocateArgumentsObject(Node* map,
- Node* arguments_count,
- Node* parameter_map_count,
- ParameterMode mode,
- int base_size) {
+ArgumentsBuiltinsAssembler::ArgumentsAllocationResult
+ArgumentsBuiltinsAssembler::AllocateArgumentsObject(
+ TNode<Map> map, TNode<BInt> arguments_count,
+ TNode<BInt> parameter_map_count, int base_size) {
// Allocate the parameter object (either a Rest parameter object, a strict
// argument object or a sloppy arguments object) and the elements/mapped
// arguments together.
int elements_offset = base_size;
- Node* element_count = arguments_count;
+ TNode<BInt> element_count = arguments_count;
if (parameter_map_count != nullptr) {
base_size += FixedArray::kHeaderSize;
- element_count = IntPtrOrSmiAdd(element_count, parameter_map_count, mode);
+ element_count = IntPtrOrSmiAdd(element_count, parameter_map_count);
}
- bool empty = IsIntPtrOrSmiConstantZero(arguments_count, mode);
+ bool empty = IsIntPtrOrSmiConstantZero(arguments_count);
DCHECK_IMPLIES(empty, parameter_map_count == nullptr);
TNode<IntPtrT> size =
empty ? IntPtrConstant(base_size)
- : ElementOffsetFromIndex(element_count, PACKED_ELEMENTS, mode,
+ : ElementOffsetFromIndex(element_count, PACKED_ELEMENTS,
base_size + FixedArray::kHeaderSize);
TNode<HeapObject> result = Allocate(size);
Comment("Initialize arguments object");
StoreMapNoWriteBarrier(result, map);
TNode<FixedArray> empty_fixed_array = EmptyFixedArrayConstant();
StoreObjectField(result, JSArray::kPropertiesOrHashOffset, empty_fixed_array);
- TNode<Smi> smi_arguments_count = ParameterToTagged(arguments_count, mode);
+ TNode<Smi> smi_arguments_count = BIntToSmi(arguments_count);
StoreObjectFieldNoWriteBarrier(result, JSArray::kLengthOffset,
smi_arguments_count);
- Node* arguments = nullptr;
+ TNode<HeapObject> arguments;
if (!empty) {
arguments = InnerAllocate(result, elements_offset);
StoreObjectFieldNoWriteBarrier(arguments, FixedArray::kLengthOffset,
@@ -56,18 +52,17 @@ ArgumentsBuiltinsAssembler::AllocateArgumentsObject(Node* map,
TNode<Map> fixed_array_map = FixedArrayMapConstant();
StoreMapNoWriteBarrier(arguments, fixed_array_map);
}
- Node* parameter_map = nullptr;
- if (parameter_map_count != nullptr) {
+ TNode<HeapObject> parameter_map;
+ if (!parameter_map_count.is_null()) {
TNode<IntPtrT> parameter_map_offset = ElementOffsetFromIndex(
- arguments_count, PACKED_ELEMENTS, mode, FixedArray::kHeaderSize);
- parameter_map = InnerAllocate(CAST(arguments), parameter_map_offset);
+ arguments_count, PACKED_ELEMENTS, FixedArray::kHeaderSize);
+ parameter_map = InnerAllocate(arguments, parameter_map_offset);
StoreObjectFieldNoWriteBarrier(result, JSArray::kElementsOffset,
parameter_map);
TNode<Map> sloppy_elements_map = SloppyArgumentsElementsMapConstant();
StoreMapNoWriteBarrier(parameter_map, sloppy_elements_map);
- parameter_map_count = ParameterToTagged(parameter_map_count, mode);
StoreObjectFieldNoWriteBarrier(parameter_map, FixedArray::kLengthOffset,
- parameter_map_count);
+ BIntToSmi(parameter_map_count));
} else {
if (empty) {
StoreObjectFieldNoWriteBarrier(result, JSArray::kElementsOffset,
@@ -77,80 +72,73 @@ ArgumentsBuiltinsAssembler::AllocateArgumentsObject(Node* map,
arguments);
}
}
- return std::tuple<Node*, Node*, Node*>(result, arguments, parameter_map);
+ return {CAST(result), UncheckedCast<FixedArray>(arguments),
+ UncheckedCast<FixedArray>(parameter_map)};
}
-Node* ArgumentsBuiltinsAssembler::ConstructParametersObjectFromArgs(
- Node* map, Node* frame_ptr, Node* arg_count, Node* first_arg,
- Node* rest_count, ParameterMode param_mode, int base_size) {
+TNode<JSObject> ArgumentsBuiltinsAssembler::ConstructParametersObjectFromArgs(
+ TNode<Map> map, TNode<RawPtrT> frame_ptr, TNode<BInt> arg_count,
+ TNode<BInt> first_arg, TNode<BInt> rest_count, int base_size) {
// Allocate the parameter object (either a Rest parameter object, a strict
// argument object or a sloppy arguments object) and the elements together and
// fill in the contents with the arguments above |formal_parameter_count|.
- Node* result;
- Node* elements;
- Node* unused;
- std::tie(result, elements, unused) =
- AllocateArgumentsObject(map, rest_count, nullptr, param_mode, base_size);
- DCHECK_NULL(unused);
- CodeStubArguments arguments(this, arg_count, frame_ptr, param_mode);
- VARIABLE(offset, MachineType::PointerRepresentation());
- offset.Bind(IntPtrConstant(FixedArrayBase::kHeaderSize - kHeapObjectTag));
+ ArgumentsAllocationResult alloc_result =
+ AllocateArgumentsObject(map, rest_count, {}, base_size);
+ DCHECK(alloc_result.parameter_map.is_null());
+ CodeStubArguments arguments(this, arg_count, frame_ptr);
+ TVARIABLE(IntPtrT, offset,
+ IntPtrConstant(FixedArrayBase::kHeaderSize - kHeapObjectTag));
VariableList list({&offset}, zone());
arguments.ForEach(
list,
- [this, elements, &offset](Node* arg) {
- StoreNoWriteBarrier(MachineRepresentation::kTagged, elements,
- offset.value(), arg);
+ [&](TNode<Object> arg) {
+ StoreNoWriteBarrier(MachineRepresentation::kTagged,
+ alloc_result.elements, offset.value(), arg);
Increment(&offset, kTaggedSize);
},
- first_arg, nullptr, param_mode);
- return result;
+ first_arg);
+ return alloc_result.arguments_object;
}
-Node* ArgumentsBuiltinsAssembler::EmitFastNewRestParameter(Node* context,
- Node* function) {
+TNode<JSObject> ArgumentsBuiltinsAssembler::EmitFastNewRestParameter(
+ TNode<Context> context, TNode<JSFunction> function) {
ParameterMode mode = OptimalParameterMode();
- Node* zero = IntPtrOrSmiConstant(0, mode);
+ TNode<BInt> zero = BIntConstant(0);
- TorqueStructArgumentsInfo info = GetArgumentsFrameAndCount(
- CAST(context), UncheckedCast<JSFunction>(function));
+ TorqueStructArgumentsInfo info = GetArgumentsFrameAndCount(context, function);
- VARIABLE(result, MachineRepresentation::kTagged);
+ TVARIABLE(JSObject, result);
Label no_rest_parameters(this), runtime(this, Label::kDeferred),
done(this, &result);
- Node* rest_count =
- IntPtrOrSmiSub(info.argument_count, info.formal_parameter_count, mode);
+ TNode<BInt> rest_count =
+ IntPtrOrSmiSub(info.argument_count, info.formal_parameter_count);
TNode<NativeContext> const native_context = LoadNativeContext(context);
TNode<Map> const array_map =
LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
- GotoIf(IntPtrOrSmiLessThanOrEqual(rest_count, zero, mode),
- &no_rest_parameters);
+ GotoIf(IntPtrOrSmiLessThanOrEqual(rest_count, zero), &no_rest_parameters);
GotoIfFixedArraySizeDoesntFitInNewSpace(
rest_count, &runtime, JSArray::kSize + FixedArray::kHeaderSize, mode);
// Allocate the Rest JSArray and the elements together and fill in the
// contents with the arguments above |formal_parameter_count|.
- result.Bind(ConstructParametersObjectFromArgs(
+ result = ConstructParametersObjectFromArgs(
array_map, info.frame, info.argument_count, info.formal_parameter_count,
- rest_count, mode, JSArray::kSize));
+ rest_count, JSArray::kSize);
Goto(&done);
BIND(&no_rest_parameters);
{
- Node* arguments;
- Node* elements;
- Node* unused;
- std::tie(arguments, elements, unused) =
- AllocateArgumentsObject(array_map, zero, nullptr, mode, JSArray::kSize);
- result.Bind(arguments);
+ ArgumentsAllocationResult alloc_result =
+ AllocateArgumentsObject(array_map, zero, {}, JSArray::kSize);
+ result = alloc_result.arguments_object;
Goto(&done);
}
BIND(&runtime);
{
- result.Bind(CallRuntime(Runtime::kNewRestParameter, context, function));
+ result = CAST(CallRuntime(Runtime::kNewRestParameter, context, function));
Goto(&done);
}
@@ -158,45 +146,41 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewRestParameter(Node* context,
return result.value();
}
-Node* ArgumentsBuiltinsAssembler::EmitFastNewStrictArguments(Node* context,
- Node* function) {
- VARIABLE(result, MachineRepresentation::kTagged);
+TNode<JSObject> ArgumentsBuiltinsAssembler::EmitFastNewStrictArguments(
+ TNode<Context> context, TNode<JSFunction> function) {
+ TVARIABLE(JSObject, result);
Label done(this, &result), empty(this), runtime(this, Label::kDeferred);
ParameterMode mode = OptimalParameterMode();
TNode<BInt> zero = BIntConstant(0);
- TorqueStructArgumentsInfo info = GetArgumentsFrameAndCount(
- CAST(context), UncheckedCast<JSFunction>(function));
+ TorqueStructArgumentsInfo info = GetArgumentsFrameAndCount(context, function);
GotoIfFixedArraySizeDoesntFitInNewSpace(
info.argument_count, &runtime,
JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize, mode);
TNode<NativeContext> const native_context = LoadNativeContext(context);
- TNode<Object> const map =
- LoadContextElement(native_context, Context::STRICT_ARGUMENTS_MAP_INDEX);
+ TNode<Map> map = CAST(
+ LoadContextElement(native_context, Context::STRICT_ARGUMENTS_MAP_INDEX));
GotoIf(BIntEqual(info.argument_count, zero), &empty);
- result.Bind(ConstructParametersObjectFromArgs(
- map, info.frame, info.argument_count, zero, info.argument_count, mode,
- JSStrictArgumentsObject::kSize));
+ result = ConstructParametersObjectFromArgs(
+ map, info.frame, info.argument_count, zero, info.argument_count,
+ JSStrictArgumentsObject::kSize);
Goto(&done);
BIND(&empty);
{
- Node* arguments;
- Node* elements;
- Node* unused;
- std::tie(arguments, elements, unused) = AllocateArgumentsObject(
- map, zero, nullptr, mode, JSStrictArgumentsObject::kSize);
- result.Bind(arguments);
+ ArgumentsAllocationResult alloc_result =
+ AllocateArgumentsObject(map, zero, {}, JSStrictArgumentsObject::kSize);
+ result = alloc_result.arguments_object;
Goto(&done);
}
BIND(&runtime);
{
- result.Bind(CallRuntime(Runtime::kNewStrictArguments, context, function));
+ result = CAST(CallRuntime(Runtime::kNewStrictArguments, context, function));
Goto(&done);
}
@@ -204,9 +188,9 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewStrictArguments(Node* context,
return result.value();
}
-Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context,
- Node* function) {
- VARIABLE(result, MachineRepresentation::kTagged);
+TNode<JSObject> ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(
+ TNode<Context> context, TNode<JSFunction> function) {
+ TVARIABLE(JSObject, result);
ParameterMode mode = OptimalParameterMode();
TNode<BInt> zero = BIntConstant(0);
@@ -214,8 +198,7 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context,
Label done(this, &result), empty(this), no_parameters(this),
runtime(this, Label::kDeferred);
- TorqueStructArgumentsInfo info = GetArgumentsFrameAndCount(
- CAST(context), UncheckedCast<JSFunction>(function));
+ TorqueStructArgumentsInfo info = GetArgumentsFrameAndCount(context, function);
GotoIf(BIntEqual(info.argument_count, zero), &empty);
@@ -224,54 +207,55 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context,
{
Comment("Mapped parameter JSSloppyArgumentsObject");
- Node* mapped_count =
- IntPtrOrSmiMin(info.argument_count, info.formal_parameter_count, mode);
+ TNode<BInt> mapped_count =
+ IntPtrOrSmiMin(info.argument_count, info.formal_parameter_count);
- Node* parameter_map_size =
- IntPtrOrSmiAdd(mapped_count, IntPtrOrSmiConstant(2, mode), mode);
+ TNode<BInt> parameter_map_size =
+ IntPtrOrSmiAdd(mapped_count, BIntConstant(2));
// Verify that the overall allocation will fit in new space.
- Node* elements_allocated =
- IntPtrOrSmiAdd(info.argument_count, parameter_map_size, mode);
+ TNode<BInt> elements_allocated =
+ IntPtrOrSmiAdd(info.argument_count, parameter_map_size);
GotoIfFixedArraySizeDoesntFitInNewSpace(
elements_allocated, &runtime,
JSSloppyArgumentsObject::kSize + FixedArray::kHeaderSize * 2, mode);
TNode<NativeContext> const native_context = LoadNativeContext(context);
- TNode<Object> const map = LoadContextElement(
- native_context, Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
- Node* argument_object;
- Node* elements;
- Node* map_array;
- std::tie(argument_object, elements, map_array) =
+ TNode<Map> const map = CAST(LoadContextElement(
+ native_context, Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX));
+ ArgumentsAllocationResult alloc_result =
AllocateArgumentsObject(map, info.argument_count, parameter_map_size,
- mode, JSSloppyArgumentsObject::kSize);
- StoreObjectFieldNoWriteBarrier(
- argument_object, JSSloppyArgumentsObject::kCalleeOffset, function);
- StoreFixedArrayElement(CAST(map_array), 0, context, SKIP_WRITE_BARRIER);
- StoreFixedArrayElement(CAST(map_array), 1, elements, SKIP_WRITE_BARRIER);
+ JSSloppyArgumentsObject::kSize);
+ StoreObjectFieldNoWriteBarrier(alloc_result.arguments_object,
+ JSSloppyArgumentsObject::kCalleeOffset,
+ function);
+ StoreFixedArrayElement(alloc_result.parameter_map, 0, context,
+ SKIP_WRITE_BARRIER);
+ StoreFixedArrayElement(alloc_result.parameter_map, 1, alloc_result.elements,
+ SKIP_WRITE_BARRIER);
Comment("Fill in non-mapped parameters");
TNode<IntPtrT> argument_offset =
- ElementOffsetFromIndex(info.argument_count, PACKED_ELEMENTS, mode,
+ ElementOffsetFromIndex(info.argument_count, PACKED_ELEMENTS,
FixedArray::kHeaderSize - kHeapObjectTag);
TNode<IntPtrT> mapped_offset =
- ElementOffsetFromIndex(mapped_count, PACKED_ELEMENTS, mode,
+ ElementOffsetFromIndex(mapped_count, PACKED_ELEMENTS,
FixedArray::kHeaderSize - kHeapObjectTag);
- CodeStubArguments arguments(this, info.argument_count, info.frame, mode);
- VARIABLE(current_argument, MachineType::PointerRepresentation());
- current_argument.Bind(arguments.AtIndexPtr(info.argument_count, mode));
+ CodeStubArguments arguments(this, info.argument_count, info.frame);
+ TVARIABLE(RawPtrT, current_argument,
+ arguments.AtIndexPtr(info.argument_count));
VariableList var_list1({&current_argument}, zone());
- mapped_offset = UncheckedCast<IntPtrT>(BuildFastLoop(
+ mapped_offset = BuildFastLoop<IntPtrT>(
var_list1, argument_offset, mapped_offset,
- [this, elements, &current_argument](Node* offset) {
+ [&](TNode<IntPtrT> offset) {
Increment(&current_argument, kSystemPointerSize);
TNode<Object> arg = LoadBufferObject(
- UncheckedCast<RawPtrT>(current_argument.value()), 0);
- StoreNoWriteBarrier(MachineRepresentation::kTagged, elements, offset,
- arg);
+ ReinterpretCast<RawPtrT>(current_argument.value()), 0);
+ StoreNoWriteBarrier(MachineRepresentation::kTagged,
+ alloc_result.elements, offset, arg);
+ return;
},
- -kTaggedSize, INTPTR_PARAMETERS));
+ -kTaggedSize);
// Copy the parameter slots and the holes in the arguments.
// We need to fill in mapped_count slots. They index the context,
@@ -282,32 +266,32 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context,
// MIN_CONTEXT_SLOTS+argument_count-mapped_count
// We loop from right to left.
Comment("Fill in mapped parameters");
- VARIABLE(context_index, OptimalParameterRepresentation());
- context_index.Bind(IntPtrOrSmiSub(
- IntPtrOrSmiAdd(IntPtrOrSmiConstant(Context::MIN_CONTEXT_SLOTS, mode),
- info.formal_parameter_count, mode),
- mapped_count, mode));
+ TVARIABLE(
+ BInt, context_index,
+ IntPtrOrSmiSub(IntPtrOrSmiAdd(BIntConstant(Context::MIN_CONTEXT_SLOTS),
+ info.formal_parameter_count),
+ mapped_count));
TNode<Oddball> the_hole = TheHoleConstant();
VariableList var_list2({&context_index}, zone());
const int kParameterMapHeaderSize = FixedArray::OffsetOfElementAt(2);
TNode<IntPtrT> adjusted_map_array = IntPtrAdd(
- BitcastTaggedToWord(map_array),
+ BitcastTaggedToWord(alloc_result.parameter_map),
IntPtrConstant(kParameterMapHeaderSize - FixedArray::kHeaderSize));
TNode<IntPtrT> zero_offset = ElementOffsetFromIndex(
zero, PACKED_ELEMENTS, mode, FixedArray::kHeaderSize - kHeapObjectTag);
- BuildFastLoop(
+ BuildFastLoop<IntPtrT>(
var_list2, mapped_offset, zero_offset,
- [=, &context_index](Node* offset) {
- StoreNoWriteBarrier(MachineRepresentation::kTagged, elements, offset,
- the_hole);
+ [&](TNode<IntPtrT> offset) {
+ StoreNoWriteBarrier(MachineRepresentation::kTagged,
+ alloc_result.elements, offset, the_hole);
StoreNoWriteBarrier(MachineRepresentation::kTagged,
adjusted_map_array, offset,
- ParameterToTagged(context_index.value(), mode));
- Increment(&context_index, 1, mode);
+ BIntToSmi(context_index.value()));
+ Increment(&context_index);
},
- -kTaggedSize, INTPTR_PARAMETERS);
+ -kTaggedSize);
- result.Bind(argument_object);
+ result = alloc_result.arguments_object;
Goto(&done);
}
@@ -318,11 +302,11 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context,
info.argument_count, &runtime,
JSSloppyArgumentsObject::kSize + FixedArray::kHeaderSize, mode);
TNode<NativeContext> const native_context = LoadNativeContext(context);
- TNode<Object> const map =
- LoadContextElement(native_context, Context::SLOPPY_ARGUMENTS_MAP_INDEX);
- result.Bind(ConstructParametersObjectFromArgs(
- map, info.frame, info.argument_count, zero, info.argument_count, mode,
- JSSloppyArgumentsObject::kSize));
+ TNode<Map> map = CAST(LoadContextElement(
+ native_context, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
+ result = ConstructParametersObjectFromArgs(
+ map, info.frame, info.argument_count, zero, info.argument_count,
+ JSSloppyArgumentsObject::kSize);
StoreObjectFieldNoWriteBarrier(
result.value(), JSSloppyArgumentsObject::kCalleeOffset, function);
Goto(&done);
@@ -332,14 +316,11 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context,
{
Comment("Empty JSSloppyArgumentsObject");
TNode<NativeContext> const native_context = LoadNativeContext(context);
- TNode<Object> const map =
- LoadContextElement(native_context, Context::SLOPPY_ARGUMENTS_MAP_INDEX);
- Node* arguments;
- Node* elements;
- Node* unused;
- std::tie(arguments, elements, unused) = AllocateArgumentsObject(
- map, zero, nullptr, mode, JSSloppyArgumentsObject::kSize);
- result.Bind(arguments);
+ TNode<Map> const map = CAST(LoadContextElement(
+ native_context, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
+ ArgumentsAllocationResult alloc_result =
+ AllocateArgumentsObject(map, zero, {}, JSSloppyArgumentsObject::kSize);
+ result = alloc_result.arguments_object;
StoreObjectFieldNoWriteBarrier(
result.value(), JSSloppyArgumentsObject::kCalleeOffset, function);
Goto(&done);
@@ -347,7 +328,7 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context,
BIND(&runtime);
{
- result.Bind(CallRuntime(Runtime::kNewSloppyArguments, context, function));
+ result = CAST(CallRuntime(Runtime::kNewSloppyArguments, context, function));
Goto(&done);
}
diff --git a/deps/v8/src/builtins/builtins-arguments-gen.h b/deps/v8/src/builtins/builtins-arguments-gen.h
index 4eeae4bf86..2565c3e81f 100644
--- a/deps/v8/src/builtins/builtins-arguments-gen.h
+++ b/deps/v8/src/builtins/builtins-arguments-gen.h
@@ -10,7 +10,7 @@
namespace v8 {
namespace internal {
-using Node = compiler::Node;
+// TODO(v8:9396): these declarations pollute the v8::internal scope.
using CodeAssemblerState = compiler::CodeAssemblerState;
using CodeAssemblerLabel = compiler::CodeAssemblerLabel;
@@ -19,19 +19,25 @@ class ArgumentsBuiltinsAssembler : public CodeStubAssembler {
explicit ArgumentsBuiltinsAssembler(CodeAssemblerState* state)
: CodeStubAssembler(state) {}
- Node* EmitFastNewStrictArguments(Node* context, Node* function);
- Node* EmitFastNewSloppyArguments(Node* context, Node* function);
- Node* EmitFastNewRestParameter(Node* context, Node* function);
+ TNode<JSObject> EmitFastNewStrictArguments(TNode<Context> context,
+ TNode<JSFunction> function);
+ TNode<JSObject> EmitFastNewSloppyArguments(TNode<Context> context,
+ TNode<JSFunction> function);
+ TNode<JSObject> EmitFastNewRestParameter(TNode<Context> context,
+ TNode<JSFunction> function);
private:
+ struct ArgumentsAllocationResult {
+ TNode<JSObject> arguments_object;
+ TNode<FixedArray> elements;
+ TNode<FixedArray> parameter_map;
+ };
// Allocates an an arguments (either rest, strict or sloppy) together with the
// FixedArray elements for the arguments and a parameter map (for sloppy
- // arguments only). A tuple is returned with pointers to the arguments object,
- // the elements and parameter map in the form:
- // <argument object, arguments FixedArray, parameter map or nullptr>
- std::tuple<Node*, Node*, Node*> AllocateArgumentsObject(
- Node* map, Node* arguments, Node* mapped_arguments,
- ParameterMode param_mode, int base_size);
+ // arguments only, or empty TNode<> otherwise).
+ ArgumentsAllocationResult AllocateArgumentsObject(
+ TNode<Map> map, TNode<BInt> arguments, TNode<BInt> mapped_arguments,
+ int base_size);
// For Rest parameters and Strict arguments, the copying of parameters from
// the stack into the arguments object is straight-forward and shares much of
@@ -40,11 +46,9 @@ class ArgumentsBuiltinsAssembler : public CodeStubAssembler {
// and then copies |rest_count| arguments from the stack frame pointed to by
// |frame_ptr| starting from |first_arg|. |arg_count| == |first_arg| +
// |rest_count|.
- Node* ConstructParametersObjectFromArgs(Node* map, Node* frame_ptr,
- Node* arg_count, Node* first_arg,
- Node* rest_count,
- ParameterMode param_mode,
- int base_size);
+ TNode<JSObject> ConstructParametersObjectFromArgs(
+ TNode<Map> map, TNode<RawPtrT> frame_ptr, TNode<BInt> arg_count,
+ TNode<BInt> first_arg, TNode<BInt> rest_count, int base_size);
};
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-array-gen.cc b/deps/v8/src/builtins/builtins-array-gen.cc
index c7d8eb0091..f176924ae5 100644
--- a/deps/v8/src/builtins/builtins-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-array-gen.cc
@@ -25,10 +25,9 @@ using IteratorRecord = TorqueStructIteratorRecord;
ArrayBuiltinsAssembler::ArrayBuiltinsAssembler(
compiler::CodeAssemblerState* state)
: CodeStubAssembler(state),
- k_(this, MachineRepresentation::kTagged),
- a_(this, MachineRepresentation::kTagged),
- to_(this, MachineRepresentation::kTagged, SmiConstant(0)),
- fully_spec_compliant_(this, {&k_, &a_, &to_}) {}
+ k_(this),
+ a_(this),
+ fully_spec_compliant_(this, {&k_, &a_}) {}
void ArrayBuiltinsAssembler::TypedArrayMapResultGenerator() {
// 6. Let A be ? TypedArraySpeciesCreate(O, len).
@@ -44,14 +43,16 @@ void ArrayBuiltinsAssembler::TypedArrayMapResultGenerator() {
LoadJSTypedArrayLength(a)));
fast_typed_array_target_ =
Word32Equal(LoadElementsKind(original_array), LoadElementsKind(a));
- a_.Bind(a);
+ a_ = a;
}
// See tc39.github.io/ecma262/#sec-%typedarray%.prototype.map.
-Node* ArrayBuiltinsAssembler::TypedArrayMapProcessor(Node* k_value, Node* k) {
+TNode<Object> ArrayBuiltinsAssembler::TypedArrayMapProcessor(
+ TNode<Object> k_value, TNode<Object> k) {
// 8. c. Let mapped_value be ? Call(callbackfn, T, « kValue, k, O »).
- Node* mapped_value = CallJS(CodeFactory::Call(isolate()), context(),
- callbackfn(), this_arg(), k_value, k, o());
+ TNode<Object> mapped_value =
+ CallJS(CodeFactory::Call(isolate()), context(), callbackfn(), this_arg(),
+ k_value, k, o());
Label fast(this), slow(this), done(this), detached(this, Label::kDeferred);
// 8. d. Perform ? Set(A, Pk, mapped_value, true).
@@ -65,7 +66,7 @@ Node* ArrayBuiltinsAssembler::TypedArrayMapProcessor(Node* k_value, Node* k) {
// 5. If arrayTypeName is "BigUint64Array" or "BigInt64Array", let
// numValue be ? ToBigInt(v).
// 6. Otherwise, let numValue be ? ToNumber(value).
- Node* num_value;
+ TNode<Object> num_value;
if (source_elements_kind_ == BIGINT64_ELEMENTS ||
source_elements_kind_ == BIGUINT64_ELEMENTS) {
num_value = ToBigInt(context(), mapped_value);
@@ -78,7 +79,7 @@ Node* ArrayBuiltinsAssembler::TypedArrayMapProcessor(Node* k_value, Node* k) {
Goto(&done);
BIND(&slow);
- SetPropertyStrict(context(), CAST(a()), CAST(k), CAST(mapped_value));
+ SetPropertyStrict(context(), a(), k, mapped_value);
Goto(&done);
BIND(&detached);
@@ -90,32 +91,7 @@ Node* ArrayBuiltinsAssembler::TypedArrayMapProcessor(Node* k_value, Node* k) {
return a();
}
-void ArrayBuiltinsAssembler::NullPostLoopAction() {}
-
-void ArrayBuiltinsAssembler::FillFixedArrayWithSmiZero(TNode<FixedArray> array,
- TNode<Smi> smi_length) {
- CSA_ASSERT(this, Word32BinaryNot(IsFixedDoubleArray(array)));
-
- TNode<IntPtrT> length = SmiToIntPtr(smi_length);
- TNode<IntPtrT> byte_length = TimesTaggedSize(length);
- CSA_ASSERT(this, UintPtrLessThan(length, byte_length));
-
- static const int32_t fa_base_data_offset =
- FixedArray::kHeaderSize - kHeapObjectTag;
- TNode<IntPtrT> backing_store = IntPtrAdd(BitcastTaggedToWord(array),
- IntPtrConstant(fa_base_data_offset));
-
- // Call out to memset to perform initialization.
- TNode<ExternalReference> memset =
- ExternalConstant(ExternalReference::libc_memset_function());
- STATIC_ASSERT(kSizetSize == kIntptrSize);
- CallCFunction(memset, MachineType::Pointer(),
- std::make_pair(MachineType::Pointer(), backing_store),
- std::make_pair(MachineType::IntPtr(), IntPtrConstant(0)),
- std::make_pair(MachineType::UintPtr(), byte_length));
-}
-
-void ArrayBuiltinsAssembler::ReturnFromBuiltin(Node* value) {
+void ArrayBuiltinsAssembler::ReturnFromBuiltin(TNode<Object> value) {
if (argc_ == nullptr) {
Return(value);
} else {
@@ -126,8 +102,8 @@ void ArrayBuiltinsAssembler::ReturnFromBuiltin(Node* value) {
}
void ArrayBuiltinsAssembler::InitIteratingArrayBuiltinBody(
- TNode<Context> context, TNode<Object> receiver, Node* callbackfn,
- Node* this_arg, TNode<IntPtrT> argc) {
+ TNode<Context> context, TNode<Object> receiver, TNode<Object> callbackfn,
+ TNode<Object> this_arg, TNode<IntPtrT> argc) {
context_ = context;
receiver_ = receiver;
callbackfn_ = callbackfn;
@@ -137,8 +113,7 @@ void ArrayBuiltinsAssembler::InitIteratingArrayBuiltinBody(
void ArrayBuiltinsAssembler::GenerateIteratingTypedArrayBuiltinBody(
const char* name, const BuiltinResultGenerator& generator,
- const CallResultProcessor& processor, const PostLoopAction& action,
- ForEachDirection direction) {
+ const CallResultProcessor& processor, ForEachDirection direction) {
name_ = name;
// ValidateTypedArray: tc39.github.io/ecma262/#sec-validatetypedarray
@@ -160,7 +135,7 @@ void ArrayBuiltinsAssembler::GenerateIteratingTypedArrayBuiltinBody(
Label throw_not_callable(this, Label::kDeferred);
Label distinguish_types(this);
GotoIf(TaggedIsSmi(callbackfn_), &throw_not_callable);
- Branch(IsCallableMap(LoadMap(callbackfn_)), &distinguish_types,
+ Branch(IsCallableMap(LoadMap(CAST(callbackfn_))), &distinguish_types,
&throw_not_callable);
BIND(&throw_not_typed_array);
@@ -192,9 +167,9 @@ void ArrayBuiltinsAssembler::GenerateIteratingTypedArrayBuiltinBody(
generator(this);
if (direction == ForEachDirection::kForward) {
- k_.Bind(SmiConstant(0));
+ k_ = SmiConstant(0);
} else {
- k_.Bind(NumberDec(len()));
+ k_ = NumberDec(len());
}
CSA_ASSERT(this, IsSafeInteger(k()));
TNode<Int32T> elements_kind = LoadMapElementsKind(typed_array_map);
@@ -214,26 +189,18 @@ void ArrayBuiltinsAssembler::GenerateIteratingTypedArrayBuiltinBody(
Goto(&done);
// No exception, return success
BIND(&done);
- action(this);
ReturnFromBuiltin(a_.value());
}
}
void ArrayBuiltinsAssembler::VisitAllTypedArrayElements(
- Node* array_buffer, const CallResultProcessor& processor, Label* detached,
- ForEachDirection direction, TNode<JSTypedArray> typed_array) {
- VariableList list({&a_, &k_, &to_}, zone());
-
- FastLoopBody body = [&](Node* index) {
- GotoIf(IsDetachedBuffer(CAST(array_buffer)), detached);
- TNode<RawPtrT> data_ptr = LoadJSTypedArrayBackingStore(typed_array);
- auto value = LoadFixedTypedArrayElementAsTagged(
- data_ptr, index, source_elements_kind_, SMI_PARAMETERS);
- k_.Bind(index);
- a_.Bind(processor(this, value, index));
- };
- Node* start = SmiConstant(0);
- Node* end = len_;
+ TNode<JSArrayBuffer> array_buffer, const CallResultProcessor& processor,
+ Label* detached, ForEachDirection direction,
+ TNode<JSTypedArray> typed_array) {
+ VariableList list({&a_, &k_}, zone());
+
+ TNode<Smi> start = SmiConstant(0);
+ TNode<Smi> end = CAST(len_);
IndexAdvanceMode advance_mode = IndexAdvanceMode::kPost;
int incr = 1;
if (direction == ForEachDirection::kReverse) {
@@ -241,54 +208,17 @@ void ArrayBuiltinsAssembler::VisitAllTypedArrayElements(
advance_mode = IndexAdvanceMode::kPre;
incr = -1;
}
- BuildFastLoop(list, start, end, body, incr, ParameterMode::SMI_PARAMETERS,
- advance_mode);
-}
-
-// Perform ArraySpeciesCreate (ES6 #sec-arrayspeciescreate).
-void ArrayBuiltinsAssembler::GenerateArraySpeciesCreate(TNode<Number> len) {
- Label runtime(this, Label::kDeferred), done(this);
-
- TNode<Map> const original_map = LoadMap(o());
- GotoIfNot(InstanceTypeEqual(LoadMapInstanceType(original_map), JS_ARRAY_TYPE),
- &runtime);
-
- GotoIfNot(IsPrototypeInitialArrayPrototype(context(), original_map),
- &runtime);
-
- TNode<PropertyCell> species_protector = ArraySpeciesProtectorConstant();
- TNode<Object> value =
- LoadObjectField(species_protector, PropertyCell::kValueOffset);
- TNode<Smi> const protector_invalid = SmiConstant(Isolate::kProtectorInvalid);
- GotoIf(TaggedEqual(value, protector_invalid), &runtime);
-
- GotoIfNot(TaggedIsPositiveSmi(len), &runtime);
- GotoIfNot(IsValidFastJSArrayCapacity(len, CodeStubAssembler::SMI_PARAMETERS),
- &runtime);
-
- // We need to be conservative and start with holey because the builtins
- // that create output arrays aren't guaranteed to be called for every
- // element in the input array (maybe the callback deletes an element).
- const ElementsKind elements_kind =
- GetHoleyElementsKind(GetInitialFastElementsKind());
- TNode<NativeContext> native_context = LoadNativeContext(context());
- TNode<Map> array_map = LoadJSArrayElementsMap(elements_kind, native_context);
- a_.Bind(AllocateJSArray(PACKED_SMI_ELEMENTS, array_map, len, CAST(len),
- nullptr, CodeStubAssembler::SMI_PARAMETERS,
- kAllowLargeObjectAllocation));
-
- Goto(&done);
-
- BIND(&runtime);
- {
- // 5. Let A be ? ArraySpeciesCreate(O, len).
- TNode<JSReceiver> constructor =
- CAST(CallRuntime(Runtime::kArraySpeciesConstructor, context(), o()));
- a_.Bind(Construct(context(), constructor, len));
- Goto(&fully_spec_compliant_);
- }
-
- BIND(&done);
+ BuildFastLoop<Smi>(
+ list, start, end,
+ [&](TNode<Smi> index) {
+ GotoIf(IsDetachedBuffer(array_buffer), detached);
+ TNode<RawPtrT> data_ptr = LoadJSTypedArrayDataPtr(typed_array);
+ TNode<Numeric> value = LoadFixedTypedArrayElementAsTagged(
+ data_ptr, index, source_elements_kind_, SMI_PARAMETERS);
+ k_ = index;
+ a_ = processor(this, value, index);
+ },
+ incr, advance_mode);
}
TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) {
@@ -297,7 +227,7 @@ TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
CSA_ASSERT(this, IsUndefined(Parameter(Descriptor::kJSNewTarget)));
- CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
+ CodeStubArguments args(this, argc);
TNode<Object> receiver = args.GetReceiver();
Label runtime(this, Label::kDeferred);
@@ -315,9 +245,7 @@ TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) {
BIND(&fast);
{
TNode<JSArray> array_receiver = CAST(receiver);
- CSA_ASSERT(this, TaggedIsPositiveSmi(LoadJSArrayLength(array_receiver)));
- TNode<IntPtrT> length =
- LoadAndUntagObjectField(array_receiver, JSArray::kLengthOffset);
+ TNode<IntPtrT> length = SmiUntag(LoadFastJSArrayLength(array_receiver));
Label return_undefined(this), fast_elements(this);
GotoIf(IntPtrEqual(length, IntPtrConstant(0)), &return_undefined);
@@ -394,14 +322,12 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
Label double_transition(this);
Label runtime(this, Label::kDeferred);
- // TODO(ishell): use constants from Descriptor once the JSFunction linkage
- // arguments are reordered.
TNode<Int32T> argc =
UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
CSA_ASSERT(this, IsUndefined(Parameter(Descriptor::kJSNewTarget)));
- CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
+ CodeStubArguments args(this, argc);
TNode<Object> receiver = args.GetReceiver();
TNode<JSArray> array_receiver;
TNode<Int32T> kind;
@@ -493,9 +419,9 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
BIND(&default_label);
{
args.ForEach(
- [this, array_receiver, context](Node* arg) {
+ [=](TNode<Object> arg) {
TNode<Number> length = LoadJSArrayLength(array_receiver);
- SetPropertyStrict(context, array_receiver, length, CAST(arg));
+ SetPropertyStrict(context, array_receiver, length, arg);
},
arg_index.value());
args.PopAndReturn(LoadJSArrayLength(array_receiver));
@@ -515,11 +441,10 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
TF_BUILTIN(ExtractFastJSArray, ArrayBuiltinsAssembler) {
ParameterMode mode = OptimalParameterMode();
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- Node* array = Parameter(Descriptor::kSource);
+ TNode<JSArray> array = CAST(Parameter(Descriptor::kSource));
Node* begin = TaggedToParameter(Parameter(Descriptor::kBegin), mode);
Node* count = TaggedToParameter(Parameter(Descriptor::kCount), mode);
- CSA_ASSERT(this, IsJSArray(array));
CSA_ASSERT(this, Word32BinaryNot(IsNoElementsProtectorCellInvalid()));
Return(ExtractFastJSArray(context, array, begin, count, mode));
@@ -555,7 +480,7 @@ TF_BUILTIN(CloneFastJSArrayFillingHoles, ArrayBuiltinsAssembler) {
Word32BinaryNot(IsNoElementsProtectorCellInvalid())));
ParameterMode mode = OptimalParameterMode();
- Return(CloneFastJSArray(context, array, mode, nullptr,
+ Return(CloneFastJSArray(context, array, mode, {},
HoleConversionMode::kConvertToUndefined));
}
@@ -584,9 +509,9 @@ class ArrayPopulatorAssembler : public CodeStubAssembler {
TNode<Map> array_map = CAST(LoadContextElement(
context, Context::JS_ARRAY_PACKED_SMI_ELEMENTS_MAP_INDEX));
- array = AllocateJSArray(PACKED_SMI_ELEMENTS, array_map, SmiConstant(0),
- SmiConstant(0), nullptr,
- ParameterMode::SMI_PARAMETERS);
+ array =
+ AllocateJSArray(PACKED_SMI_ELEMENTS, array_map, SmiConstant(0),
+ SmiConstant(0), {}, ParameterMode::SMI_PARAMETERS);
Goto(&done);
}
@@ -626,7 +551,7 @@ TF_BUILTIN(ArrayFrom, ArrayPopulatorAssembler) {
TNode<Int32T> argc =
UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
- CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
+ CodeStubArguments args(this, argc);
TNode<Object> items = args.GetOptionalArgumentValue(0);
TNode<Object> receiver = args.GetReceiver();
@@ -810,8 +735,8 @@ TF_BUILTIN(ArrayFrom, ArrayPopulatorAssembler) {
GotoIf(IsUndefined(map_function), &next);
CSA_ASSERT(this, IsCallable(CAST(map_function)));
- value = CAST(CallJS(CodeFactory::Call(isolate()), context, map_function,
- this_arg, value.value(), index.value()));
+ value = CallJS(CodeFactory::Call(isolate()), context, map_function,
+ this_arg, value.value(), index.value());
Goto(&next);
BIND(&next);
}
@@ -846,8 +771,7 @@ TF_BUILTIN(TypedArrayPrototypeMap, ArrayBuiltinsAssembler) {
GenerateIteratingTypedArrayBuiltinBody(
"%TypedArray%.prototype.map",
&ArrayBuiltinsAssembler::TypedArrayMapResultGenerator,
- &ArrayBuiltinsAssembler::TypedArrayMapProcessor,
- &ArrayBuiltinsAssembler::NullPostLoopAction);
+ &ArrayBuiltinsAssembler::TypedArrayMapProcessor);
}
TF_BUILTIN(ArrayIsArray, CodeStubAssembler) {
@@ -884,15 +808,25 @@ class ArrayIncludesIndexofAssembler : public CodeStubAssembler {
void Generate(SearchVariant variant, TNode<IntPtrT> argc,
TNode<Context> context);
- void GenerateSmiOrObject(SearchVariant variant, Node* context, Node* elements,
- TNode<Object> search_element, Node* array_length,
- Node* from_index);
- void GeneratePackedDoubles(SearchVariant variant, Node* elements,
- Node* search_element, Node* array_length,
- Node* from_index);
- void GenerateHoleyDoubles(SearchVariant variant, Node* elements,
- Node* search_element, Node* array_length,
- Node* from_index);
+ void GenerateSmiOrObject(SearchVariant variant, TNode<Context> context,
+ TNode<FixedArray> elements,
+ TNode<Object> search_element,
+ TNode<Smi> array_length, TNode<Smi> from_index);
+ void GeneratePackedDoubles(SearchVariant variant,
+ TNode<FixedDoubleArray> elements,
+ TNode<Object> search_element,
+ TNode<Smi> array_length, TNode<Smi> from_index);
+ void GenerateHoleyDoubles(SearchVariant variant,
+ TNode<FixedDoubleArray> elements,
+ TNode<Object> search_element,
+ TNode<Smi> array_length, TNode<Smi> from_index);
+
+ void ReturnIfEmpty(TNode<Smi> length, TNode<Object> value) {
+ Label done(this);
+ GotoIf(SmiGreaterThan(length, SmiConstant(0)), &done);
+ Return(value);
+ BIND(&done);
+ }
};
void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant,
@@ -916,7 +850,7 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant,
BranchIfFastJSArrayForRead(receiver, context, &init_index, &call_runtime);
BIND(&init_index);
- VARIABLE(index_var, MachineType::PointerRepresentation(), intptr_zero);
+ TVARIABLE(IntPtrT, index_var, intptr_zero);
TNode<JSArray> array = CAST(receiver);
// JSArray length is always a positive Smi for fast arrays.
@@ -946,14 +880,14 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant,
BIND(&is_smi);
{
TNode<IntPtrT> intptr_start_from = SmiUntag(CAST(start_from));
- index_var.Bind(intptr_start_from);
+ index_var = intptr_start_from;
GotoIf(IntPtrGreaterThanOrEqual(index_var.value(), intptr_zero), &done);
// The fromIndex is negative: add it to the array's length.
- index_var.Bind(IntPtrAdd(array_length_untagged, index_var.value()));
+ index_var = IntPtrAdd(array_length_untagged, index_var.value());
// Clamp negative results at zero.
GotoIf(IntPtrGreaterThanOrEqual(index_var.value(), intptr_zero), &done);
- index_var.Bind(intptr_zero);
+ index_var = intptr_zero;
Goto(&done);
}
BIND(&done);
@@ -1031,8 +965,7 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant,
BIND(&call_runtime);
{
- TNode<Object> start_from =
- args.GetOptionalArgumentValue(kFromIndexArg, UndefinedConstant());
+ TNode<Object> start_from = args.GetOptionalArgumentValue(kFromIndexArg);
Runtime::FunctionId function = variant == kIncludes
? Runtime::kArrayIncludes_Slow
: Runtime::kArrayIndexOf;
@@ -1042,8 +975,9 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant,
}
void ArrayIncludesIndexofAssembler::GenerateSmiOrObject(
- SearchVariant variant, Node* context, Node* elements,
- TNode<Object> search_element, Node* array_length, Node* from_index) {
+ SearchVariant variant, TNode<Context> context, TNode<FixedArray> elements,
+ TNode<Object> search_element, TNode<Smi> array_length,
+ TNode<Smi> from_index) {
TVARIABLE(IntPtrT, index_var, SmiUntag(from_index));
TVARIABLE(Float64T, search_num);
TNode<IntPtrT> array_length_untagged = SmiUntag(array_length);
@@ -1077,7 +1011,7 @@ void ArrayIncludesIndexofAssembler::GenerateSmiOrObject(
GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
&return_not_found);
TNode<Object> element_k =
- UnsafeLoadFixedArrayElement(CAST(elements), index_var.value());
+ UnsafeLoadFixedArrayElement(elements, index_var.value());
GotoIf(TaggedEqual(element_k, search_element), &return_found);
Increment(&index_var);
@@ -1090,7 +1024,7 @@ void ArrayIncludesIndexofAssembler::GenerateSmiOrObject(
GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
&return_not_found);
TNode<Object> element_k =
- UnsafeLoadFixedArrayElement(CAST(elements), index_var.value());
+ UnsafeLoadFixedArrayElement(elements, index_var.value());
GotoIf(IsUndefined(element_k), &return_found);
GotoIf(IsTheHole(element_k), &return_found);
@@ -1110,7 +1044,7 @@ void ArrayIncludesIndexofAssembler::GenerateSmiOrObject(
GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
&return_not_found);
TNode<Object> element_k =
- UnsafeLoadFixedArrayElement(CAST(elements), index_var.value());
+ UnsafeLoadFixedArrayElement(elements, index_var.value());
GotoIfNot(TaggedIsSmi(element_k), &not_smi);
Branch(Float64Equal(search_num.value(), SmiToFloat64(CAST(element_k))),
&return_found, &continue_loop);
@@ -1133,7 +1067,7 @@ void ArrayIncludesIndexofAssembler::GenerateSmiOrObject(
GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
&return_not_found);
TNode<Object> element_k =
- UnsafeLoadFixedArrayElement(CAST(elements), index_var.value());
+ UnsafeLoadFixedArrayElement(elements, index_var.value());
GotoIf(TaggedIsSmi(element_k), &continue_loop);
GotoIfNot(IsHeapNumber(CAST(element_k)), &continue_loop);
BranchIfFloat64IsNaN(LoadHeapNumberValue(CAST(element_k)), &return_found,
@@ -1157,7 +1091,7 @@ void ArrayIncludesIndexofAssembler::GenerateSmiOrObject(
GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
&return_not_found);
TNode<Object> element_k =
- UnsafeLoadFixedArrayElement(CAST(elements), index_var.value());
+ UnsafeLoadFixedArrayElement(elements, index_var.value());
GotoIf(TaggedIsSmi(element_k), &continue_loop);
GotoIf(TaggedEqual(search_element_string, element_k), &return_found);
TNode<Uint16T> element_k_type = LoadInstanceType(CAST(element_k));
@@ -1186,7 +1120,7 @@ void ArrayIncludesIndexofAssembler::GenerateSmiOrObject(
&return_not_found);
TNode<Object> element_k =
- UnsafeLoadFixedArrayElement(CAST(elements), index_var.value());
+ UnsafeLoadFixedArrayElement(elements, index_var.value());
Label continue_loop(this);
GotoIf(TaggedIsSmi(element_k), &continue_loop);
GotoIfNot(IsBigInt(CAST(element_k)), &continue_loop);
@@ -1213,11 +1147,10 @@ void ArrayIncludesIndexofAssembler::GenerateSmiOrObject(
}
}
-void ArrayIncludesIndexofAssembler::GeneratePackedDoubles(SearchVariant variant,
- Node* elements,
- Node* search_element,
- Node* array_length,
- Node* from_index) {
+void ArrayIncludesIndexofAssembler::GeneratePackedDoubles(
+ SearchVariant variant, TNode<FixedDoubleArray> elements,
+ TNode<Object> search_element, TNode<Smi> array_length,
+ TNode<Smi> from_index) {
TVARIABLE(IntPtrT, index_var, SmiUntag(from_index));
TNode<IntPtrT> array_length_untagged = SmiUntag(array_length);
@@ -1228,13 +1161,13 @@ void ArrayIncludesIndexofAssembler::GeneratePackedDoubles(SearchVariant variant,
search_num = Float64Constant(0);
GotoIfNot(TaggedIsSmi(search_element), &search_notnan);
- search_num = SmiToFloat64(search_element);
+ search_num = SmiToFloat64(CAST(search_element));
Goto(&not_nan_loop);
BIND(&search_notnan);
- GotoIfNot(IsHeapNumber(search_element), &return_not_found);
+ GotoIfNot(IsHeapNumber(CAST(search_element)), &return_not_found);
- search_num = LoadHeapNumberValue(search_element);
+ search_num = LoadHeapNumberValue(CAST(search_element));
Label* nan_handling = variant == kIncludes ? &nan_loop : &return_not_found;
BranchIfFloat64IsNaN(search_num.value(), nan_handling, &not_nan_loop);
@@ -1282,11 +1215,10 @@ void ArrayIncludesIndexofAssembler::GeneratePackedDoubles(SearchVariant variant,
}
}
-void ArrayIncludesIndexofAssembler::GenerateHoleyDoubles(SearchVariant variant,
- Node* elements,
- Node* search_element,
- Node* array_length,
- Node* from_index) {
+void ArrayIncludesIndexofAssembler::GenerateHoleyDoubles(
+ SearchVariant variant, TNode<FixedDoubleArray> elements,
+ TNode<Object> search_element, TNode<Smi> array_length,
+ TNode<Smi> from_index) {
TVARIABLE(IntPtrT, index_var, SmiUntag(from_index));
TNode<IntPtrT> array_length_untagged = SmiUntag(array_length);
@@ -1297,16 +1229,16 @@ void ArrayIncludesIndexofAssembler::GenerateHoleyDoubles(SearchVariant variant,
search_num = Float64Constant(0);
GotoIfNot(TaggedIsSmi(search_element), &search_notnan);
- search_num = SmiToFloat64(search_element);
+ search_num = SmiToFloat64(CAST(search_element));
Goto(&not_nan_loop);
BIND(&search_notnan);
if (variant == kIncludes) {
GotoIf(IsUndefined(search_element), &hole_loop);
}
- GotoIfNot(IsHeapNumber(search_element), &return_not_found);
+ GotoIfNot(IsHeapNumber(CAST(search_element)), &return_not_found);
- search_num = LoadHeapNumberValue(search_element);
+ search_num = LoadHeapNumberValue(CAST(search_element));
Label* nan_handling = variant == kIncludes ? &nan_loop : &return_not_found;
BranchIfFloat64IsNaN(search_num.value(), nan_handling, &not_nan_loop);
@@ -1387,32 +1319,34 @@ TF_BUILTIN(ArrayIncludes, ArrayIncludesIndexofAssembler) {
TF_BUILTIN(ArrayIncludesSmiOrObject, ArrayIncludesIndexofAssembler) {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- Node* elements = Parameter(Descriptor::kElements);
+ TNode<FixedArray> elements = CAST(Parameter(Descriptor::kElements));
TNode<Object> search_element = CAST(Parameter(Descriptor::kSearchElement));
- Node* array_length = Parameter(Descriptor::kLength);
- Node* from_index = Parameter(Descriptor::kFromIndex);
+ TNode<Smi> array_length = CAST(Parameter(Descriptor::kLength));
+ TNode<Smi> from_index = CAST(Parameter(Descriptor::kFromIndex));
GenerateSmiOrObject(kIncludes, context, elements, search_element,
array_length, from_index);
}
TF_BUILTIN(ArrayIncludesPackedDoubles, ArrayIncludesIndexofAssembler) {
- Node* elements = Parameter(Descriptor::kElements);
- Node* search_element = Parameter(Descriptor::kSearchElement);
- Node* array_length = Parameter(Descriptor::kLength);
- Node* from_index = Parameter(Descriptor::kFromIndex);
+ TNode<FixedArrayBase> elements = CAST(Parameter(Descriptor::kElements));
+ TNode<Object> search_element = CAST(Parameter(Descriptor::kSearchElement));
+ TNode<Smi> array_length = CAST(Parameter(Descriptor::kLength));
+ TNode<Smi> from_index = CAST(Parameter(Descriptor::kFromIndex));
- GeneratePackedDoubles(kIncludes, elements, search_element, array_length,
+ ReturnIfEmpty(array_length, FalseConstant());
+ GeneratePackedDoubles(kIncludes, CAST(elements), search_element, array_length,
from_index);
}
TF_BUILTIN(ArrayIncludesHoleyDoubles, ArrayIncludesIndexofAssembler) {
- Node* elements = Parameter(Descriptor::kElements);
- Node* search_element = Parameter(Descriptor::kSearchElement);
- Node* array_length = Parameter(Descriptor::kLength);
- Node* from_index = Parameter(Descriptor::kFromIndex);
+ TNode<FixedArrayBase> elements = CAST(Parameter(Descriptor::kElements));
+ TNode<Object> search_element = CAST(Parameter(Descriptor::kSearchElement));
+ TNode<Smi> array_length = CAST(Parameter(Descriptor::kLength));
+ TNode<Smi> from_index = CAST(Parameter(Descriptor::kFromIndex));
- GenerateHoleyDoubles(kIncludes, elements, search_element, array_length,
+ ReturnIfEmpty(array_length, FalseConstant());
+ GenerateHoleyDoubles(kIncludes, CAST(elements), search_element, array_length,
from_index);
}
@@ -1426,32 +1360,34 @@ TF_BUILTIN(ArrayIndexOf, ArrayIncludesIndexofAssembler) {
TF_BUILTIN(ArrayIndexOfSmiOrObject, ArrayIncludesIndexofAssembler) {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- Node* elements = Parameter(Descriptor::kElements);
+ TNode<FixedArray> elements = CAST(Parameter(Descriptor::kElements));
TNode<Object> search_element = CAST(Parameter(Descriptor::kSearchElement));
- Node* array_length = Parameter(Descriptor::kLength);
- Node* from_index = Parameter(Descriptor::kFromIndex);
+ TNode<Smi> array_length = CAST(Parameter(Descriptor::kLength));
+ TNode<Smi> from_index = CAST(Parameter(Descriptor::kFromIndex));
GenerateSmiOrObject(kIndexOf, context, elements, search_element, array_length,
from_index);
}
TF_BUILTIN(ArrayIndexOfPackedDoubles, ArrayIncludesIndexofAssembler) {
- Node* elements = Parameter(Descriptor::kElements);
- Node* search_element = Parameter(Descriptor::kSearchElement);
- Node* array_length = Parameter(Descriptor::kLength);
- Node* from_index = Parameter(Descriptor::kFromIndex);
+ TNode<FixedArrayBase> elements = CAST(Parameter(Descriptor::kElements));
+ TNode<Object> search_element = CAST(Parameter(Descriptor::kSearchElement));
+ TNode<Smi> array_length = CAST(Parameter(Descriptor::kLength));
+ TNode<Smi> from_index = CAST(Parameter(Descriptor::kFromIndex));
- GeneratePackedDoubles(kIndexOf, elements, search_element, array_length,
+ ReturnIfEmpty(array_length, NumberConstant(-1));
+ GeneratePackedDoubles(kIndexOf, CAST(elements), search_element, array_length,
from_index);
}
TF_BUILTIN(ArrayIndexOfHoleyDoubles, ArrayIncludesIndexofAssembler) {
- Node* elements = Parameter(Descriptor::kElements);
- Node* search_element = Parameter(Descriptor::kSearchElement);
- Node* array_length = Parameter(Descriptor::kLength);
- Node* from_index = Parameter(Descriptor::kFromIndex);
+ TNode<FixedArrayBase> elements = CAST(Parameter(Descriptor::kElements));
+ TNode<Object> search_element = CAST(Parameter(Descriptor::kSearchElement));
+ TNode<Smi> array_length = CAST(Parameter(Descriptor::kLength));
+ TNode<Smi> from_index = CAST(Parameter(Descriptor::kFromIndex));
- GenerateHoleyDoubles(kIndexOf, elements, search_element, array_length,
+ ReturnIfEmpty(array_length, NumberConstant(-1));
+ GenerateHoleyDoubles(kIndexOf, CAST(elements), search_element, array_length,
from_index);
}
@@ -1484,10 +1420,10 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
const char* method_name = "Array Iterator.prototype.next";
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- Node* iterator = Parameter(Descriptor::kReceiver);
+ TNode<Object> maybe_iterator = CAST(Parameter(Descriptor::kReceiver));
- VARIABLE(var_done, MachineRepresentation::kTagged, TrueConstant());
- VARIABLE(var_value, MachineRepresentation::kTagged, UndefinedConstant());
+ TVARIABLE(Oddball, var_done, TrueConstant());
+ TVARIABLE(Object, var_value, UndefinedConstant());
Label allocate_entry_if_needed(this);
Label allocate_iterator_result(this);
@@ -1497,9 +1433,11 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
// If O does not have all of the internal slots of an Array Iterator Instance
// (22.1.5.3), throw a TypeError exception
- ThrowIfNotInstanceType(context, iterator, JS_ARRAY_ITERATOR_TYPE,
+ ThrowIfNotInstanceType(context, maybe_iterator, JS_ARRAY_ITERATOR_TYPE,
method_name);
+ TNode<JSArrayIterator> iterator = CAST(maybe_iterator);
+
// Let a be O.[[IteratedObject]].
TNode<JSReceiver> array =
CAST(LoadObjectField(iterator, JSArrayIterator::kIteratedObjectOffset));
@@ -1531,8 +1469,8 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
iterator, JSArrayIterator::kNextIndexOffset,
ChangeUint32ToTagged(Unsigned(Int32Add(index32, Int32Constant(1)))));
- var_done.Bind(FalseConstant());
- var_value.Bind(index);
+ var_done = FalseConstant();
+ var_value = index;
GotoIf(Word32Equal(LoadAndUntagToWord32ObjectField(
iterator, JSArrayIterator::kKindOffset),
@@ -1543,9 +1481,9 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
TNode<Int32T> elements_kind = LoadMapElementsKind(array_map);
TNode<FixedArrayBase> elements = LoadElements(CAST(array));
GotoIfForceSlowPath(&if_generic);
- var_value.Bind(LoadFixedArrayBaseElementAsTagged(
+ var_value = LoadFixedArrayBaseElementAsTagged(
elements, Signed(ChangeUint32ToWord(index32)), elements_kind,
- &if_generic, &if_hole));
+ &if_generic, &if_hole);
Goto(&allocate_entry_if_needed);
BIND(&if_hole);
@@ -1553,7 +1491,7 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
GotoIf(IsNoElementsProtectorCellInvalid(), &if_generic);
GotoIfNot(IsPrototypeInitialArrayPrototype(context, array_map),
&if_generic);
- var_value.Bind(UndefinedConstant());
+ var_value = UndefinedConstant();
Goto(&allocate_entry_if_needed);
}
}
@@ -1572,8 +1510,8 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
StoreObjectField(iterator, JSArrayIterator::kNextIndexOffset,
NumberInc(index));
- var_done.Bind(FalseConstant());
- var_value.Bind(index);
+ var_done = FalseConstant();
+ var_value = index;
Branch(Word32Equal(LoadAndUntagToWord32ObjectField(
iterator, JSArrayIterator::kKindOffset),
@@ -1609,7 +1547,7 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
BIND(&if_generic);
{
- var_value.Bind(GetProperty(context, array, index));
+ var_value = GetProperty(context, array, index);
Goto(&allocate_entry_if_needed);
}
@@ -1632,8 +1570,8 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
StoreObjectFieldNoWriteBarrier(iterator, JSArrayIterator::kNextIndexOffset,
SmiInc(CAST(index)));
- var_done.Bind(FalseConstant());
- var_value.Bind(index);
+ var_done = FalseConstant();
+ var_value = index;
GotoIf(Word32Equal(LoadAndUntagToWord32ObjectField(
iterator, JSArrayIterator::kKindOffset),
@@ -1641,9 +1579,9 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
&allocate_iterator_result);
TNode<Int32T> elements_kind = LoadMapElementsKind(array_map);
- TNode<RawPtrT> data_ptr = LoadJSTypedArrayBackingStore(CAST(array));
- var_value.Bind(LoadFixedTypedArrayElementAsTagged(data_ptr, CAST(index),
- elements_kind));
+ TNode<RawPtrT> data_ptr = LoadJSTypedArrayDataPtr(CAST(array));
+ var_value = LoadFixedTypedArrayElementAsTagged(data_ptr, CAST(index),
+ elements_kind);
Goto(&allocate_entry_if_needed);
}
@@ -1654,7 +1592,7 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
Int32Constant(static_cast<int>(IterationKind::kValues))),
&allocate_iterator_result);
- Node* result =
+ TNode<JSObject> result =
AllocateJSIteratorResultForEntry(context, index, var_value.value());
Return(result);
}
@@ -1673,29 +1611,28 @@ class ArrayFlattenAssembler : public CodeStubAssembler {
: CodeStubAssembler(state) {}
// https://tc39.github.io/proposal-flatMap/#sec-FlattenIntoArray
- Node* FlattenIntoArray(Node* context, Node* target, Node* source,
- Node* source_length, Node* start, Node* depth,
- Node* mapper_function = nullptr,
- Node* this_arg = nullptr) {
- CSA_ASSERT(this, IsJSReceiver(target));
- CSA_ASSERT(this, IsJSReceiver(source));
+ TNode<Number> FlattenIntoArray(
+ TNode<Context> context, TNode<JSReceiver> target,
+ TNode<JSReceiver> source, TNode<Number> source_length,
+ TNode<Number> start, TNode<Number> depth,
+ base::Optional<TNode<HeapObject>> mapper_function = base::nullopt,
+ base::Optional<TNode<Object>> this_arg = base::nullopt) {
CSA_ASSERT(this, IsNumberPositive(source_length));
CSA_ASSERT(this, IsNumberPositive(start));
- CSA_ASSERT(this, IsNumber(depth));
// 1. Let targetIndex be start.
- VARIABLE(var_target_index, MachineRepresentation::kTagged, start);
+ TVARIABLE(Number, var_target_index, start);
// 2. Let sourceIndex be 0.
- VARIABLE(var_source_index, MachineRepresentation::kTagged, SmiConstant(0));
+ TVARIABLE(Number, var_source_index, SmiConstant(0));
// 3. Repeat...
Label loop(this, {&var_target_index, &var_source_index}), done_loop(this);
Goto(&loop);
BIND(&loop);
{
- Node* const source_index = var_source_index.value();
- Node* const target_index = var_target_index.value();
+ TNode<Number> source_index = var_source_index.value();
+ TNode<Number> target_index = var_target_index.value();
// ...while sourceIndex < sourceLen
GotoIfNumberGreaterThanOrEqual(source_index, source_length, &done_loop);
@@ -1716,16 +1653,16 @@ class ArrayFlattenAssembler : public CodeStubAssembler {
GetProperty(context, source, source_index);
// ii. If mapperFunction is present, then
- if (mapper_function != nullptr) {
- CSA_ASSERT(this, Word32Or(IsUndefined(mapper_function),
- IsCallable(mapper_function)));
- DCHECK_NOT_NULL(this_arg);
+ if (mapper_function) {
+ CSA_ASSERT(this, Word32Or(IsUndefined(mapper_function.value()),
+ IsCallable(mapper_function.value())));
+ DCHECK(this_arg.has_value());
// 1. Set element to ? Call(mapperFunction, thisArg , « element,
// sourceIndex, source »).
- element_maybe_smi = CAST(
- CallJS(CodeFactory::Call(isolate()), context, mapper_function,
- this_arg, element_maybe_smi, source_index, source));
+ element_maybe_smi = CallJS(CodeFactory::Call(isolate()), context,
+ mapper_function.value(), this_arg.value(),
+ element_maybe_smi, source_index, source);
}
// iii. Let shouldFlatten be false.
@@ -1752,7 +1689,7 @@ class ArrayFlattenAssembler : public CodeStubAssembler {
// 2. Set targetIndex to ? FlattenIntoArray(target, element,
// elementLen, targetIndex,
// depth - 1).
- var_target_index.Bind(
+ var_target_index = CAST(
CallBuiltin(Builtins::kFlattenIntoArray, context, target, element,
element_length, target_index, NumberDec(depth)));
Goto(&next);
@@ -1769,7 +1706,7 @@ class ArrayFlattenAssembler : public CodeStubAssembler {
// 2. Set targetIndex to ? FlattenIntoArray(target, element,
// elementLen, targetIndex,
// depth - 1).
- var_target_index.Bind(
+ var_target_index = CAST(
CallBuiltin(Builtins::kFlattenIntoArray, context, target, element,
element_length, target_index, NumberDec(depth)));
Goto(&next);
@@ -1789,7 +1726,7 @@ class ArrayFlattenAssembler : public CodeStubAssembler {
target_index, element);
// 3. Increase targetIndex by 1.
- var_target_index.Bind(NumberInc(target_index));
+ var_target_index = NumberInc(target_index);
Goto(&next);
BIND(&throw_error);
@@ -1800,7 +1737,7 @@ class ArrayFlattenAssembler : public CodeStubAssembler {
BIND(&next);
// d. Increase sourceIndex by 1.
- var_source_index.Bind(NumberInc(source_index));
+ var_source_index = NumberInc(source_index);
Goto(&loop);
}
@@ -1811,16 +1748,16 @@ class ArrayFlattenAssembler : public CodeStubAssembler {
// https://tc39.github.io/proposal-flatMap/#sec-FlattenIntoArray
TF_BUILTIN(FlattenIntoArray, ArrayFlattenAssembler) {
- Node* const context = Parameter(Descriptor::kContext);
- Node* const target = Parameter(Descriptor::kTarget);
- Node* const source = Parameter(Descriptor::kSource);
- Node* const source_length = Parameter(Descriptor::kSourceLength);
- Node* const start = Parameter(Descriptor::kStart);
- Node* const depth = Parameter(Descriptor::kDepth);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<JSReceiver> target = CAST(Parameter(Descriptor::kTarget));
+ TNode<JSReceiver> source = CAST(Parameter(Descriptor::kSource));
+ TNode<Number> source_length = CAST(Parameter(Descriptor::kSourceLength));
+ TNode<Number> start = CAST(Parameter(Descriptor::kStart));
+ TNode<Number> depth = CAST(Parameter(Descriptor::kDepth));
// FlattenIntoArray might get called recursively, check stack for overflow
// manually as it has stub linkage.
- PerformStackCheck(CAST(context));
+ PerformStackCheck(context);
Return(
FlattenIntoArray(context, target, source, source_length, start, depth));
@@ -1828,14 +1765,15 @@ TF_BUILTIN(FlattenIntoArray, ArrayFlattenAssembler) {
// https://tc39.github.io/proposal-flatMap/#sec-FlattenIntoArray
TF_BUILTIN(FlatMapIntoArray, ArrayFlattenAssembler) {
- Node* const context = Parameter(Descriptor::kContext);
- Node* const target = Parameter(Descriptor::kTarget);
- Node* const source = Parameter(Descriptor::kSource);
- Node* const source_length = Parameter(Descriptor::kSourceLength);
- Node* const start = Parameter(Descriptor::kStart);
- Node* const depth = Parameter(Descriptor::kDepth);
- Node* const mapper_function = Parameter(Descriptor::kMapperFunction);
- Node* const this_arg = Parameter(Descriptor::kThisArg);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<JSReceiver> target = CAST(Parameter(Descriptor::kTarget));
+ TNode<JSReceiver> source = CAST(Parameter(Descriptor::kSource));
+ TNode<Number> source_length = CAST(Parameter(Descriptor::kSourceLength));
+ TNode<Number> start = CAST(Parameter(Descriptor::kStart));
+ TNode<Number> depth = CAST(Parameter(Descriptor::kDepth));
+ TNode<HeapObject> mapper_function =
+ CAST(Parameter(Descriptor::kMapperFunction));
+ TNode<Object> this_arg = CAST(Parameter(Descriptor::kThisArg));
Return(FlattenIntoArray(context, target, source, source_length, start, depth,
mapper_function, this_arg));
@@ -2127,8 +2065,9 @@ TF_BUILTIN(ArrayConstructorImpl, ArrayBuiltinsAssembler) {
}
void ArrayBuiltinsAssembler::GenerateConstructor(
- Node* context, Node* array_function, Node* array_map, Node* array_size,
- Node* allocation_site, ElementsKind elements_kind,
+ TNode<Context> context, TNode<HeapObject> array_function,
+ TNode<Map> array_map, TNode<Object> array_size,
+ TNode<HeapObject> allocation_site, ElementsKind elements_kind,
AllocationSiteMode mode) {
Label ok(this);
Label smi_size(this);
@@ -2138,33 +2077,37 @@ void ArrayBuiltinsAssembler::GenerateConstructor(
Branch(TaggedIsSmi(array_size), &smi_size, &call_runtime);
BIND(&smi_size);
-
- if (IsFastPackedElementsKind(elements_kind)) {
- Label abort(this, Label::kDeferred);
- Branch(SmiEqual(CAST(array_size), SmiConstant(0)), &small_smi_size, &abort);
-
- BIND(&abort);
- TNode<Smi> reason =
- SmiConstant(AbortReason::kAllocatingNonEmptyPackedArray);
- TailCallRuntime(Runtime::kAbort, context, reason);
- } else {
- int element_size =
- IsDoubleElementsKind(elements_kind) ? kDoubleSize : kTaggedSize;
- int max_fast_elements =
- (kMaxRegularHeapObjectSize - FixedArray::kHeaderSize - JSArray::kSize -
- AllocationMemento::kSize) /
- element_size;
- Branch(SmiAboveOrEqual(CAST(array_size), SmiConstant(max_fast_elements)),
- &call_runtime, &small_smi_size);
- }
-
- BIND(&small_smi_size);
{
- TNode<JSArray> array = AllocateJSArray(
- elements_kind, CAST(array_map), array_size, CAST(array_size),
- mode == DONT_TRACK_ALLOCATION_SITE ? nullptr : allocation_site,
- CodeStubAssembler::SMI_PARAMETERS);
- Return(array);
+ TNode<Smi> array_size_smi = CAST(array_size);
+
+ if (IsFastPackedElementsKind(elements_kind)) {
+ Label abort(this, Label::kDeferred);
+ Branch(SmiEqual(array_size_smi, SmiConstant(0)), &small_smi_size, &abort);
+
+ BIND(&abort);
+ TNode<Smi> reason =
+ SmiConstant(AbortReason::kAllocatingNonEmptyPackedArray);
+ TailCallRuntime(Runtime::kAbort, context, reason);
+ } else {
+ int element_size =
+ IsDoubleElementsKind(elements_kind) ? kDoubleSize : kTaggedSize;
+ int max_fast_elements =
+ (kMaxRegularHeapObjectSize - FixedArray::kHeaderSize -
+ JSArray::kSize - AllocationMemento::kSize) /
+ element_size;
+ Branch(SmiAboveOrEqual(array_size_smi, SmiConstant(max_fast_elements)),
+ &call_runtime, &small_smi_size);
+ }
+
+ BIND(&small_smi_size);
+ {
+ TNode<JSArray> array = AllocateJSArray(
+ elements_kind, array_map, array_size_smi, array_size_smi,
+ mode == DONT_TRACK_ALLOCATION_SITE ? TNode<AllocationSite>()
+ : CAST(allocation_site),
+ CodeStubAssembler::SMI_PARAMETERS);
+ Return(array);
+ }
}
BIND(&call_runtime);
@@ -2181,8 +2124,9 @@ void ArrayBuiltinsAssembler::GenerateArrayNoArgumentConstructor(
Parameter(Descriptor::kFunction), JSFunction::kContextOffset));
bool track_allocation_site =
AllocationSite::ShouldTrack(kind) && mode != DISABLE_ALLOCATION_SITES;
- Node* allocation_site =
- track_allocation_site ? Parameter(Descriptor::kAllocationSite) : nullptr;
+ TNode<AllocationSite> allocation_site =
+ track_allocation_site ? CAST(Parameter(Descriptor::kAllocationSite))
+ : TNode<AllocationSite>();
TNode<Map> array_map = LoadJSArrayElementsMap(kind, native_context);
TNode<JSArray> array = AllocateJSArray(
kind, array_map, IntPtrConstant(JSArray::kPreallocatedArrayElements),
@@ -2194,7 +2138,7 @@ void ArrayBuiltinsAssembler::GenerateArraySingleArgumentConstructor(
ElementsKind kind, AllocationSiteOverrideMode mode) {
using Descriptor = ArraySingleArgumentConstructorDescriptor;
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- Node* function = Parameter(Descriptor::kFunction);
+ TNode<HeapObject> function = CAST(Parameter(Descriptor::kFunction));
TNode<NativeContext> native_context =
CAST(LoadObjectField(function, JSFunction::kContextOffset));
TNode<Map> array_map = LoadJSArrayElementsMap(kind, native_context);
@@ -2206,8 +2150,11 @@ void ArrayBuiltinsAssembler::GenerateArraySingleArgumentConstructor(
: DONT_TRACK_ALLOCATION_SITE;
}
- Node* array_size = Parameter(Descriptor::kArraySizeSmiParameter);
- Node* allocation_site = Parameter(Descriptor::kAllocationSite);
+ TNode<Object> array_size =
+ CAST(Parameter(Descriptor::kArraySizeSmiParameter));
+ // allocation_site can be Undefined or an AllocationSite
+ TNode<HeapObject> allocation_site =
+ CAST(Parameter(Descriptor::kAllocationSite));
GenerateConstructor(context, function, array_map, array_size, allocation_site,
kind, allocation_site_mode);
@@ -2219,7 +2166,7 @@ void ArrayBuiltinsAssembler::GenerateArrayNArgumentsConstructor(
// Replace incoming JS receiver argument with the target.
// TODO(ishell): Avoid replacing the target on the stack and just add it
// as another additional parameter for Runtime::kNewArray.
- CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
+ CodeStubArguments args(this, argc);
args.SetReceiver(target);
// Adjust arguments count for the runtime call: +1 for implicit receiver
diff --git a/deps/v8/src/builtins/builtins-array-gen.h b/deps/v8/src/builtins/builtins-array-gen.h
index 6b8c704038..a19ba1a5da 100644
--- a/deps/v8/src/builtins/builtins-array-gen.h
+++ b/deps/v8/src/builtins/builtins-array-gen.h
@@ -17,51 +17,13 @@ class ArrayBuiltinsAssembler : public CodeStubAssembler {
using BuiltinResultGenerator =
std::function<void(ArrayBuiltinsAssembler* masm)>;
- using CallResultProcessor = std::function<Node*(ArrayBuiltinsAssembler* masm,
- Node* k_value, Node* k)>;
-
- using PostLoopAction = std::function<void(ArrayBuiltinsAssembler* masm)>;
-
- void FindResultGenerator();
-
- Node* FindProcessor(Node* k_value, Node* k);
-
- void FindIndexResultGenerator();
-
- Node* FindIndexProcessor(Node* k_value, Node* k);
-
- void ForEachResultGenerator();
-
- Node* ForEachProcessor(Node* k_value, Node* k);
-
- void SomeResultGenerator();
-
- Node* SomeProcessor(Node* k_value, Node* k);
-
- void EveryResultGenerator();
-
- Node* EveryProcessor(Node* k_value, Node* k);
-
- void ReduceResultGenerator();
-
- Node* ReduceProcessor(Node* k_value, Node* k);
-
- void ReducePostLoopAction();
+ using CallResultProcessor = std::function<TNode<Object>(
+ ArrayBuiltinsAssembler* masm, TNode<Object> k_value, TNode<Object> k)>;
void TypedArrayMapResultGenerator();
- Node* SpecCompliantMapProcessor(Node* k_value, Node* k);
-
- Node* FastMapProcessor(Node* k_value, Node* k);
-
// See tc39.github.io/ecma262/#sec-%typedarray%.prototype.map.
- Node* TypedArrayMapProcessor(Node* k_value, Node* k);
-
- void NullPostLoopAction();
-
- // Uses memset to effectively initialize the given FixedArray with Smi zeroes.
- void FillFixedArrayWithSmiZero(TNode<FixedArray> array,
- TNode<Smi> smi_length);
+ TNode<Object> TypedArrayMapProcessor(TNode<Object> k_value, TNode<Object> k);
TNode<String> CallJSArrayArrayJoinConcatToSequentialString(
TNode<FixedArray> fixed_array, TNode<IntPtrT> length, TNode<String> sep,
@@ -86,20 +48,22 @@ class ArrayBuiltinsAssembler : public CodeStubAssembler {
TNode<IntPtrT> argc() { return argc_; }
TNode<JSReceiver> o() { return o_; }
TNode<Number> len() { return len_; }
- Node* callbackfn() { return callbackfn_; }
- Node* this_arg() { return this_arg_; }
- TNode<Number> k() { return CAST(k_.value()); }
- Node* a() { return a_.value(); }
+ TNode<Object> callbackfn() { return callbackfn_; }
+ TNode<Object> this_arg() { return this_arg_; }
+ TNode<Number> k() { return k_.value(); }
+ TNode<Object> a() { return a_.value(); }
- void ReturnFromBuiltin(Node* value);
+ void ReturnFromBuiltin(TNode<Object> value);
void InitIteratingArrayBuiltinBody(TNode<Context> context,
- TNode<Object> receiver, Node* callbackfn,
- Node* this_arg, TNode<IntPtrT> argc);
+ TNode<Object> receiver,
+ TNode<Object> callbackfn,
+ TNode<Object> this_arg,
+ TNode<IntPtrT> argc);
void GenerateIteratingTypedArrayBuiltinBody(
const char* name, const BuiltinResultGenerator& generator,
- const CallResultProcessor& processor, const PostLoopAction& action,
+ const CallResultProcessor& processor,
ForEachDirection direction = ForEachDirection::kForward);
void TailCallArrayConstructorStub(
@@ -107,23 +71,25 @@ class ArrayBuiltinsAssembler : public CodeStubAssembler {
TNode<JSFunction> target, TNode<HeapObject> allocation_site_or_undefined,
TNode<Int32T> argc);
- void GenerateDispatchToArrayStub(
- TNode<Context> context, TNode<JSFunction> target, TNode<Int32T> argc,
- AllocationSiteOverrideMode mode,
- TNode<AllocationSite> allocation_site = TNode<AllocationSite>());
+ void GenerateDispatchToArrayStub(TNode<Context> context,
+ TNode<JSFunction> target, TNode<Int32T> argc,
+ AllocationSiteOverrideMode mode,
+ TNode<AllocationSite> allocation_site = {});
void CreateArrayDispatchNoArgument(
TNode<Context> context, TNode<JSFunction> target, TNode<Int32T> argc,
AllocationSiteOverrideMode mode,
- TNode<AllocationSite> allocation_site = TNode<AllocationSite>());
+ TNode<AllocationSite> allocation_site = {});
void CreateArrayDispatchSingleArgument(
TNode<Context> context, TNode<JSFunction> target, TNode<Int32T> argc,
AllocationSiteOverrideMode mode,
- TNode<AllocationSite> allocation_site = TNode<AllocationSite>());
+ TNode<AllocationSite> allocation_site = {});
- void GenerateConstructor(Node* context, Node* array_function, Node* array_map,
- Node* array_size, Node* allocation_site,
+ void GenerateConstructor(TNode<Context> context,
+ TNode<HeapObject> array_function,
+ TNode<Map> array_map, TNode<Object> array_size,
+ TNode<HeapObject> allocation_site,
ElementsKind elements_kind, AllocationSiteMode mode);
void GenerateArrayNoArgumentConstructor(ElementsKind kind,
AllocationSiteOverrideMode mode);
@@ -135,33 +101,22 @@ class ArrayBuiltinsAssembler : public CodeStubAssembler {
TNode<HeapObject> maybe_allocation_site);
private:
- static ElementsKind ElementsKindForInstanceType(InstanceType type);
-
- void VisitAllTypedArrayElements(Node* array_buffer,
+ void VisitAllTypedArrayElements(TNode<JSArrayBuffer> array_buffer,
const CallResultProcessor& processor,
Label* detached, ForEachDirection direction,
TNode<JSTypedArray> typed_array);
- // Perform ArraySpeciesCreate (ES6 #sec-arrayspeciescreate).
- // This version is specialized to create a zero length array
- // of the elements kind of the input array.
- void GenerateArraySpeciesCreate();
-
- // Perform ArraySpeciesCreate (ES6 #sec-arrayspeciescreate).
- void GenerateArraySpeciesCreate(TNode<Number> len);
-
- Node* callbackfn_ = nullptr;
+ TNode<Object> callbackfn_;
TNode<JSReceiver> o_;
- Node* this_arg_ = nullptr;
+ TNode<Object> this_arg_;
TNode<Number> len_;
TNode<Context> context_;
TNode<Object> receiver_;
TNode<IntPtrT> argc_;
- Node* fast_typed_array_target_ = nullptr;
+ TNode<BoolT> fast_typed_array_target_;
const char* name_ = nullptr;
- Variable k_;
- Variable a_;
- Variable to_;
+ TVariable<Number> k_;
+ TVariable<Object> a_;
Label fully_spec_compliant_;
ElementsKind source_elements_kind_ = ElementsKind::NO_ELEMENTS;
};
diff --git a/deps/v8/src/builtins/builtins-array.cc b/deps/v8/src/builtins/builtins-array.cc
index 6c3e724649..8002c06996 100644
--- a/deps/v8/src/builtins/builtins-array.cc
+++ b/deps/v8/src/builtins/builtins-array.cc
@@ -1189,7 +1189,8 @@ bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver,
static Maybe<bool> IsConcatSpreadable(Isolate* isolate, Handle<Object> obj) {
HandleScope handle_scope(isolate);
if (!obj->IsJSReceiver()) return Just(false);
- if (!isolate->IsIsConcatSpreadableLookupChainIntact(JSReceiver::cast(*obj))) {
+ if (!Protectors::IsIsConcatSpreadableLookupChainIntact(isolate) ||
+ JSReceiver::cast(*obj).HasProxyInPrototype(isolate)) {
// Slow path if @@isConcatSpreadable has been used.
Handle<Symbol> key(isolate->factory()->is_concat_spreadable_symbol());
Handle<Object> value;
@@ -1258,7 +1259,7 @@ Object Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
// dictionary.
bool fast_case = is_array_species &&
(estimate_nof * 2) >= estimate_result_length &&
- isolate->IsIsConcatSpreadableLookupChainIntact();
+ Protectors::IsIsConcatSpreadableLookupChainIntact(isolate);
if (fast_case && kind == PACKED_DOUBLE_ELEMENTS) {
Handle<FixedArrayBase> storage =
@@ -1406,7 +1407,7 @@ bool IsSimpleArray(Isolate* isolate, Handle<JSArray> obj) {
MaybeHandle<JSArray> Fast_ArrayConcat(Isolate* isolate,
BuiltinArguments* args) {
- if (!isolate->IsIsConcatSpreadableLookupChainIntact()) {
+ if (!Protectors::IsIsConcatSpreadableLookupChainIntact(isolate)) {
return MaybeHandle<JSArray>();
}
// We shouldn't overflow when adding another len.
diff --git a/deps/v8/src/builtins/builtins-arraybuffer.cc b/deps/v8/src/builtins/builtins-arraybuffer.cc
index 9ecb1815bc..b062b9ca3c 100644
--- a/deps/v8/src/builtins/builtins-arraybuffer.cc
+++ b/deps/v8/src/builtins/builtins-arraybuffer.cc
@@ -30,29 +30,38 @@ namespace {
Object ConstructBuffer(Isolate* isolate, Handle<JSFunction> target,
Handle<JSReceiver> new_target, Handle<Object> length,
- bool initialize) {
+ InitializedFlag initialized) {
+ SharedFlag shared = (*target != target->native_context().array_buffer_fun())
+ ? SharedFlag::kShared
+ : SharedFlag::kNotShared;
Handle<JSObject> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
JSObject::New(target, new_target, Handle<AllocationSite>::null()));
+ auto array_buffer = Handle<JSArrayBuffer>::cast(result);
+ // Ensure that all fields are initialized because BackingStore::Allocate is
+ // allowed to GC. Note that we cannot move the allocation of the ArrayBuffer
+ // after BackingStore::Allocate because of the spec.
+ array_buffer->Setup(shared, nullptr);
+
size_t byte_length;
if (!TryNumberToSize(*length, &byte_length) ||
byte_length > JSArrayBuffer::kMaxByteLength) {
- JSArrayBuffer::SetupAsEmpty(Handle<JSArrayBuffer>::cast(result), isolate);
+ // ToNumber failed.
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength));
}
- SharedFlag shared_flag =
- (*target == target->native_context().array_buffer_fun())
- ? SharedFlag::kNotShared
- : SharedFlag::kShared;
- if (!JSArrayBuffer::SetupAllocatingData(Handle<JSArrayBuffer>::cast(result),
- isolate, byte_length, initialize,
- shared_flag)) {
+
+ auto backing_store =
+ BackingStore::Allocate(isolate, byte_length, shared, initialized);
+ if (!backing_store) {
+ // Allocation of backing store failed.
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewRangeError(MessageTemplate::kArrayBufferAllocationFailed));
}
- return *result;
+
+ array_buffer->Attach(std::move(backing_store));
+ return *array_buffer;
}
} // namespace
@@ -80,7 +89,8 @@ BUILTIN(ArrayBufferConstructor) {
isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength));
}
- return ConstructBuffer(isolate, target, new_target, number_length, true);
+ return ConstructBuffer(isolate, target, new_target, number_length,
+ InitializedFlag::kZeroInitialized);
}
// This is a helper to construct an ArrayBuffer with uinitialized memory.
@@ -91,7 +101,8 @@ BUILTIN(ArrayBufferConstructor_DoNotInitialize) {
Handle<JSFunction> target(isolate->native_context()->array_buffer_fun(),
isolate);
Handle<Object> length = args.atOrUndefined(isolate, 1);
- return ConstructBuffer(isolate, target, target, length, false);
+ return ConstructBuffer(isolate, target, target, length,
+ InitializedFlag::kUninitialized);
}
// ES6 section 24.1.4.1 get ArrayBuffer.prototype.byteLength
diff --git a/deps/v8/src/builtins/builtins-async-function-gen.cc b/deps/v8/src/builtins/builtins-async-function-gen.cc
index 6ac37da3f6..cfd355724e 100644
--- a/deps/v8/src/builtins/builtins-async-function-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-function-gen.cc
@@ -263,7 +263,7 @@ void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwait(
TNode<Object> value = CAST(Parameter(Descriptor::kValue));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> outer_promise = LoadObjectField(
+ TNode<JSPromise> outer_promise = LoadObjectField<JSPromise>(
async_function_object, JSAsyncFunctionObject::kPromiseOffset);
Label after_debug_hook(this), call_debug_hook(this, Label::kDeferred);
diff --git a/deps/v8/src/builtins/builtins-async-gen.cc b/deps/v8/src/builtins/builtins-async-gen.cc
index 70d4eac9c8..edcb027226 100644
--- a/deps/v8/src/builtins/builtins-async-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-gen.cc
@@ -6,6 +6,7 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/heap/factory-inl.h"
+#include "src/objects/js-generator.h"
#include "src/objects/js-promise.h"
#include "src/objects/shared-function-info.h"
@@ -23,11 +24,12 @@ class ValueUnwrapContext {
} // namespace
-Node* AsyncBuiltinsAssembler::AwaitOld(Node* context, Node* generator,
- Node* value, Node* outer_promise,
- Node* on_resolve_context_index,
- Node* on_reject_context_index,
- Node* is_predicted_as_caught) {
+TNode<Object> AsyncBuiltinsAssembler::AwaitOld(
+ TNode<Context> context, TNode<JSGeneratorObject> generator,
+ TNode<Object> value, TNode<JSPromise> outer_promise,
+ TNode<IntPtrT> on_resolve_context_index,
+ TNode<IntPtrT> on_reject_context_index,
+ TNode<Oddball> is_predicted_as_caught) {
TNode<NativeContext> const native_context = LoadNativeContext(context);
static const int kWrappedPromiseOffset =
@@ -91,8 +93,7 @@ Node* AsyncBuiltinsAssembler::AwaitOld(Node* context, Node* generator,
InitializeNativeClosure(closure_context, native_context, on_reject,
on_reject_context_index);
- VARIABLE(var_throwaway, MachineRepresentation::kTaggedPointer,
- UndefinedConstant());
+ TVARIABLE(HeapObject, var_throwaway, UndefinedConstant());
// Deal with PromiseHooks and debug support in the runtime. This
// also allocates the throwaway promise, which is only needed in
@@ -101,9 +102,9 @@ Node* AsyncBuiltinsAssembler::AwaitOld(Node* context, Node* generator,
Branch(IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(),
&if_debugging, &do_resolve_promise);
BIND(&if_debugging);
- var_throwaway.Bind(CallRuntime(Runtime::kAwaitPromisesInitOld, context, value,
- wrapped_value, outer_promise, on_reject,
- is_predicted_as_caught));
+ var_throwaway = CAST(CallRuntime(Runtime::kAwaitPromisesInitOld, context,
+ value, wrapped_value, outer_promise,
+ on_reject, is_predicted_as_caught));
Goto(&do_resolve_promise);
BIND(&do_resolve_promise);
@@ -114,13 +115,13 @@ Node* AsyncBuiltinsAssembler::AwaitOld(Node* context, Node* generator,
on_resolve, on_reject, var_throwaway.value());
}
-Node* AsyncBuiltinsAssembler::AwaitOptimized(Node* context, Node* generator,
- Node* promise, Node* outer_promise,
- Node* on_resolve_context_index,
- Node* on_reject_context_index,
- Node* is_predicted_as_caught) {
+TNode<Object> AsyncBuiltinsAssembler::AwaitOptimized(
+ TNode<Context> context, TNode<JSGeneratorObject> generator,
+ TNode<JSPromise> promise, TNode<JSPromise> outer_promise,
+ TNode<IntPtrT> on_resolve_context_index,
+ TNode<IntPtrT> on_reject_context_index,
+ TNode<Oddball> is_predicted_as_caught) {
TNode<NativeContext> const native_context = LoadNativeContext(context);
- CSA_ASSERT(this, IsJSPromise(promise));
static const int kResolveClosureOffset =
FixedArray::SizeFor(Context::MIN_CONTEXT_SLOTS);
@@ -130,8 +131,8 @@ Node* AsyncBuiltinsAssembler::AwaitOptimized(Node* context, Node* generator,
kRejectClosureOffset + JSFunction::kSizeWithoutPrototype;
// 2. Let promise be ? PromiseResolve(« promise »).
- // Node* const promise =
- // CallBuiltin(Builtins::kPromiseResolve, context, promise_fun, value);
+ // We skip this step, because promise is already guaranteed to be a
+ // JSPRomise at this point.
TNode<HeapObject> base = AllocateInNewSpace(kTotalSize);
TNode<Context> closure_context = UncheckedCast<Context>(base);
@@ -162,8 +163,7 @@ Node* AsyncBuiltinsAssembler::AwaitOptimized(Node* context, Node* generator,
InitializeNativeClosure(closure_context, native_context, on_reject,
on_reject_context_index);
- VARIABLE(var_throwaway, MachineRepresentation::kTaggedPointer,
- UndefinedConstant());
+ TVARIABLE(HeapObject, var_throwaway, UndefinedConstant());
// Deal with PromiseHooks and debug support in the runtime. This
// also allocates the throwaway promise, which is only needed in
@@ -172,9 +172,9 @@ Node* AsyncBuiltinsAssembler::AwaitOptimized(Node* context, Node* generator,
Branch(IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(),
&if_debugging, &do_perform_promise_then);
BIND(&if_debugging);
- var_throwaway.Bind(CallRuntime(Runtime::kAwaitPromisesInit, context, promise,
- promise, outer_promise, on_reject,
- is_predicted_as_caught));
+ var_throwaway =
+ CAST(CallRuntime(Runtime::kAwaitPromisesInit, context, promise, promise,
+ outer_promise, on_reject, is_predicted_as_caught));
Goto(&do_perform_promise_then);
BIND(&do_perform_promise_then);
@@ -182,12 +182,13 @@ Node* AsyncBuiltinsAssembler::AwaitOptimized(Node* context, Node* generator,
on_resolve, on_reject, var_throwaway.value());
}
-Node* AsyncBuiltinsAssembler::Await(Node* context, Node* generator, Node* value,
- Node* outer_promise,
- Node* on_resolve_context_index,
- Node* on_reject_context_index,
- Node* is_predicted_as_caught) {
- VARIABLE(result, MachineRepresentation::kTagged);
+TNode<Object> AsyncBuiltinsAssembler::Await(
+ TNode<Context> context, TNode<JSGeneratorObject> generator,
+ TNode<Object> value, TNode<JSPromise> outer_promise,
+ TNode<IntPtrT> on_resolve_context_index,
+ TNode<IntPtrT> on_reject_context_index,
+ TNode<Oddball> is_predicted_as_caught) {
+ TVARIABLE(Object, result);
Label if_old(this), if_new(this), done(this),
if_slow_constructor(this, Label::kDeferred);
@@ -197,7 +198,8 @@ Node* AsyncBuiltinsAssembler::Await(Node* context, Node* generator, Node* value,
// to allocate the wrapper promise and can just use the `AwaitOptimized`
// logic.
GotoIf(TaggedIsSmi(value), &if_old);
- TNode<Map> const value_map = LoadMap(value);
+ TNode<HeapObject> value_object = CAST(value);
+ TNode<Map> const value_map = LoadMap(value_object);
GotoIfNot(IsJSPromiseMap(value_map), &if_old);
// We can skip the "constructor" lookup on {value} if it's [[Prototype]]
// is the (initial) Promise.prototype and the @@species protector is
@@ -223,25 +225,24 @@ Node* AsyncBuiltinsAssembler::Await(Node* context, Node* generator, Node* value,
}
BIND(&if_old);
- result.Bind(AwaitOld(context, generator, value, outer_promise,
- on_resolve_context_index, on_reject_context_index,
- is_predicted_as_caught));
+ result = AwaitOld(context, generator, value, outer_promise,
+ on_resolve_context_index, on_reject_context_index,
+ is_predicted_as_caught);
Goto(&done);
BIND(&if_new);
- result.Bind(AwaitOptimized(context, generator, value, outer_promise,
- on_resolve_context_index, on_reject_context_index,
- is_predicted_as_caught));
+ result = AwaitOptimized(context, generator, CAST(value), outer_promise,
+ on_resolve_context_index, on_reject_context_index,
+ is_predicted_as_caught);
Goto(&done);
BIND(&done);
return result.value();
}
-void AsyncBuiltinsAssembler::InitializeNativeClosure(Node* context,
- Node* native_context,
- Node* function,
- Node* context_index) {
+void AsyncBuiltinsAssembler::InitializeNativeClosure(
+ TNode<Context> context, TNode<NativeContext> native_context,
+ TNode<HeapObject> function, TNode<IntPtrT> context_index) {
TNode<Map> function_map = CAST(LoadContextElement(
native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX));
// Ensure that we don't have to initialize prototype_or_initial_map field of
@@ -276,24 +277,23 @@ void AsyncBuiltinsAssembler::InitializeNativeClosure(Node* context,
StoreObjectFieldNoWriteBarrier(function, JSFunction::kCodeOffset, code);
}
-Node* AsyncBuiltinsAssembler::CreateUnwrapClosure(Node* native_context,
- Node* done) {
- TNode<Object> const map = LoadContextElement(
- native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
- TNode<SharedFunctionInfo> const on_fulfilled_shared = CAST(LoadContextElement(
+TNode<JSFunction> AsyncBuiltinsAssembler::CreateUnwrapClosure(
+ TNode<NativeContext> native_context, TNode<Oddball> done) {
+ const TNode<Map> map = CAST(LoadContextElement(
+ native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX));
+ const TNode<SharedFunctionInfo> on_fulfilled_shared = CAST(LoadContextElement(
native_context, Context::ASYNC_ITERATOR_VALUE_UNWRAP_SHARED_FUN));
- Node* const closure_context =
+ const TNode<Context> closure_context =
AllocateAsyncIteratorValueUnwrapContext(native_context, done);
return AllocateFunctionWithMapAndContext(map, on_fulfilled_shared,
closure_context);
}
-Node* AsyncBuiltinsAssembler::AllocateAsyncIteratorValueUnwrapContext(
- Node* native_context, Node* done) {
- CSA_ASSERT(this, IsNativeContext(native_context));
+TNode<Context> AsyncBuiltinsAssembler::AllocateAsyncIteratorValueUnwrapContext(
+ TNode<NativeContext> native_context, TNode<Oddball> done) {
CSA_ASSERT(this, IsBoolean(done));
- Node* const context =
+ TNode<Context> context =
CreatePromiseContext(native_context, ValueUnwrapContext::kLength);
StoreContextElementNoWriteBarrier(context, ValueUnwrapContext::kDoneSlot,
done);
@@ -301,8 +301,8 @@ Node* AsyncBuiltinsAssembler::AllocateAsyncIteratorValueUnwrapContext(
}
TF_BUILTIN(AsyncIteratorValueUnwrap, AsyncBuiltinsAssembler) {
- Node* const value = Parameter(Descriptor::kValue);
- Node* const context = Parameter(Descriptor::kContext);
+ TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> const done =
LoadContextElement(context, ValueUnwrapContext::kDoneSlot);
diff --git a/deps/v8/src/builtins/builtins-async-gen.h b/deps/v8/src/builtins/builtins-async-gen.h
index 9dafddef21..7b9c944f4a 100644
--- a/deps/v8/src/builtins/builtins-async-gen.h
+++ b/deps/v8/src/builtins/builtins-async-gen.h
@@ -21,20 +21,27 @@ class AsyncBuiltinsAssembler : public PromiseBuiltinsAssembler {
// point to a SharedFunctioninfo instance used to create the closure. The
// value following the reject index should be a similar value for the resolve
// closure. Returns the Promise-wrapped `value`.
- Node* Await(Node* context, Node* generator, Node* value, Node* outer_promise,
- Node* on_resolve_context_index, Node* on_reject_context_index,
- Node* is_predicted_as_caught);
- Node* Await(Node* context, Node* generator, Node* value, Node* outer_promise,
- int on_resolve_context_index, int on_reject_context_index,
- Node* is_predicted_as_caught) {
+ TNode<Object> Await(TNode<Context> context,
+ TNode<JSGeneratorObject> generator, TNode<Object> value,
+ TNode<JSPromise> outer_promise,
+ TNode<IntPtrT> on_resolve_context_index,
+ TNode<IntPtrT> on_reject_context_index,
+ TNode<Oddball> is_predicted_as_caught);
+ TNode<Object> Await(TNode<Context> context,
+ TNode<JSGeneratorObject> generator, TNode<Object> value,
+ TNode<JSPromise> outer_promise,
+ int on_resolve_context_index, int on_reject_context_index,
+ TNode<Oddball> is_predicted_as_caught) {
return Await(context, generator, value, outer_promise,
IntPtrConstant(on_resolve_context_index),
IntPtrConstant(on_reject_context_index),
is_predicted_as_caught);
}
- Node* Await(Node* context, Node* generator, Node* value, Node* outer_promise,
- int on_resolve_context_index, int on_reject_context_index,
- bool is_predicted_as_caught) {
+ TNode<Object> Await(TNode<Context> context,
+ TNode<JSGeneratorObject> generator, TNode<Object> value,
+ TNode<JSPromise> outer_promise,
+ int on_resolve_context_index, int on_reject_context_index,
+ bool is_predicted_as_caught) {
return Await(context, generator, value, outer_promise,
on_resolve_context_index, on_reject_context_index,
BooleanConstant(is_predicted_as_caught));
@@ -42,21 +49,30 @@ class AsyncBuiltinsAssembler : public PromiseBuiltinsAssembler {
// Return a new built-in function object as defined in
// Async Iterator Value Unwrap Functions
- Node* CreateUnwrapClosure(Node* const native_context, Node* const done);
+ TNode<JSFunction> CreateUnwrapClosure(TNode<NativeContext> native_context,
+ TNode<Oddball> done);
private:
- void InitializeNativeClosure(Node* context, Node* native_context,
- Node* function, Node* context_index);
- Node* AllocateAsyncIteratorValueUnwrapContext(Node* native_context,
- Node* done);
+ void InitializeNativeClosure(TNode<Context> context,
+ TNode<NativeContext> native_context,
+ TNode<HeapObject> function,
+ TNode<IntPtrT> context_index);
+ TNode<Context> AllocateAsyncIteratorValueUnwrapContext(
+ TNode<NativeContext> native_context, TNode<Oddball> done);
- Node* AwaitOld(Node* context, Node* generator, Node* value,
- Node* outer_promise, Node* on_resolve_context_index,
- Node* on_reject_context_index, Node* is_predicted_as_caught);
- Node* AwaitOptimized(Node* context, Node* generator, Node* value,
- Node* outer_promise, Node* on_resolve_context_index,
- Node* on_reject_context_index,
- Node* is_predicted_as_caught);
+ TNode<Object> AwaitOld(TNode<Context> context,
+ TNode<JSGeneratorObject> generator,
+ TNode<Object> value, TNode<JSPromise> outer_promise,
+ TNode<IntPtrT> on_resolve_context_index,
+ TNode<IntPtrT> on_reject_context_index,
+ TNode<Oddball> is_predicted_as_caught);
+ TNode<Object> AwaitOptimized(TNode<Context> context,
+ TNode<JSGeneratorObject> generator,
+ TNode<JSPromise> promise,
+ TNode<JSPromise> outer_promise,
+ TNode<IntPtrT> on_resolve_context_index,
+ TNode<IntPtrT> on_reject_context_index,
+ TNode<Oddball> is_predicted_as_caught);
};
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-async-generator-gen.cc b/deps/v8/src/builtins/builtins-async-generator-gen.cc
index 8053cf0dc8..2ed7e8c83e 100644
--- a/deps/v8/src/builtins/builtins-async-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-generator-gen.cc
@@ -23,146 +23,142 @@ class AsyncGeneratorBuiltinsAssembler : public AsyncBuiltinsAssembler {
explicit AsyncGeneratorBuiltinsAssembler(CodeAssemblerState* state)
: AsyncBuiltinsAssembler(state) {}
- inline Node* TaggedIsAsyncGenerator(Node* tagged_object) {
- TNode<BoolT> if_notsmi = TaggedIsNotSmi(tagged_object);
- return Select<BoolT>(
- if_notsmi,
- [=] {
- return HasInstanceType(tagged_object, JS_ASYNC_GENERATOR_OBJECT_TYPE);
- },
- [=] { return if_notsmi; });
- }
- inline Node* LoadGeneratorState(Node* const generator) {
- return LoadObjectField(generator, JSGeneratorObject::kContinuationOffset);
+ inline TNode<Smi> LoadGeneratorState(
+ const TNode<JSGeneratorObject> generator) {
+ return LoadObjectField<Smi>(generator,
+ JSGeneratorObject::kContinuationOffset);
}
- inline TNode<BoolT> IsGeneratorStateClosed(SloppyTNode<Smi> const state) {
+ inline TNode<BoolT> IsGeneratorStateClosed(const TNode<Smi> state) {
return SmiEqual(state, SmiConstant(JSGeneratorObject::kGeneratorClosed));
}
- inline TNode<BoolT> IsGeneratorClosed(Node* const generator) {
+ inline TNode<BoolT> IsGeneratorClosed(
+ const TNode<JSGeneratorObject> generator) {
return IsGeneratorStateClosed(LoadGeneratorState(generator));
}
- inline TNode<BoolT> IsGeneratorStateSuspended(SloppyTNode<Smi> const state) {
+ inline TNode<BoolT> IsGeneratorStateSuspended(const TNode<Smi> state) {
return SmiGreaterThanOrEqual(state, SmiConstant(0));
}
- inline TNode<BoolT> IsGeneratorSuspended(Node* const generator) {
+ inline TNode<BoolT> IsGeneratorSuspended(
+ const TNode<JSGeneratorObject> generator) {
return IsGeneratorStateSuspended(LoadGeneratorState(generator));
}
- inline TNode<BoolT> IsGeneratorStateSuspendedAtStart(
- SloppyTNode<Smi> const state) {
+ inline TNode<BoolT> IsGeneratorStateSuspendedAtStart(const TNode<Smi> state) {
return SmiEqual(state, SmiConstant(0));
}
- inline TNode<BoolT> IsGeneratorStateNotExecuting(
- SloppyTNode<Smi> const state) {
+ inline TNode<BoolT> IsGeneratorStateNotExecuting(const TNode<Smi> state) {
return SmiNotEqual(state,
SmiConstant(JSGeneratorObject::kGeneratorExecuting));
}
- inline TNode<BoolT> IsGeneratorNotExecuting(Node* const generator) {
+ inline TNode<BoolT> IsGeneratorNotExecuting(
+ const TNode<JSGeneratorObject> generator) {
return IsGeneratorStateNotExecuting(LoadGeneratorState(generator));
}
- inline TNode<BoolT> IsGeneratorAwaiting(Node* const generator) {
+ inline TNode<BoolT> IsGeneratorAwaiting(
+ const TNode<JSGeneratorObject> generator) {
TNode<Object> is_generator_awaiting =
LoadObjectField(generator, JSAsyncGeneratorObject::kIsAwaitingOffset);
return TaggedEqual(is_generator_awaiting, SmiConstant(1));
}
- inline void SetGeneratorAwaiting(Node* const generator) {
+ inline void SetGeneratorAwaiting(const TNode<JSGeneratorObject> generator) {
CSA_ASSERT(this, Word32BinaryNot(IsGeneratorAwaiting(generator)));
StoreObjectFieldNoWriteBarrier(
generator, JSAsyncGeneratorObject::kIsAwaitingOffset, SmiConstant(1));
CSA_ASSERT(this, IsGeneratorAwaiting(generator));
}
- inline void SetGeneratorNotAwaiting(Node* const generator) {
+ inline void SetGeneratorNotAwaiting(
+ const TNode<JSGeneratorObject> generator) {
CSA_ASSERT(this, IsGeneratorAwaiting(generator));
StoreObjectFieldNoWriteBarrier(
generator, JSAsyncGeneratorObject::kIsAwaitingOffset, SmiConstant(0));
CSA_ASSERT(this, Word32BinaryNot(IsGeneratorAwaiting(generator)));
}
- inline void CloseGenerator(Node* const generator) {
+ inline void CloseGenerator(const TNode<JSGeneratorObject> generator) {
StoreObjectFieldNoWriteBarrier(
generator, JSGeneratorObject::kContinuationOffset,
SmiConstant(JSGeneratorObject::kGeneratorClosed));
}
- inline Node* IsFastJSIterResult(Node* const value, Node* const context) {
- CSA_ASSERT(this, TaggedIsNotSmi(value));
- TNode<NativeContext> const native_context = LoadNativeContext(context);
- return TaggedEqual(
- LoadMap(value),
- LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX));
- }
-
- inline Node* LoadFirstAsyncGeneratorRequestFromQueue(Node* const generator) {
- return LoadObjectField(generator, JSAsyncGeneratorObject::kQueueOffset);
+ inline TNode<HeapObject> LoadFirstAsyncGeneratorRequestFromQueue(
+ const TNode<JSGeneratorObject> generator) {
+ return LoadObjectField<HeapObject>(generator,
+ JSAsyncGeneratorObject::kQueueOffset);
}
- inline Node* LoadResumeTypeFromAsyncGeneratorRequest(Node* const request) {
- return LoadObjectField(request, AsyncGeneratorRequest::kResumeModeOffset);
+ inline TNode<Smi> LoadResumeTypeFromAsyncGeneratorRequest(
+ const TNode<AsyncGeneratorRequest> request) {
+ return LoadObjectField<Smi>(request,
+ AsyncGeneratorRequest::kResumeModeOffset);
}
- inline Node* LoadPromiseFromAsyncGeneratorRequest(Node* const request) {
- return LoadObjectField(request, AsyncGeneratorRequest::kPromiseOffset);
+ inline TNode<JSPromise> LoadPromiseFromAsyncGeneratorRequest(
+ const TNode<AsyncGeneratorRequest> request) {
+ return LoadObjectField<JSPromise>(request,
+ AsyncGeneratorRequest::kPromiseOffset);
}
- inline Node* LoadValueFromAsyncGeneratorRequest(Node* const request) {
+ inline TNode<Object> LoadValueFromAsyncGeneratorRequest(
+ const TNode<AsyncGeneratorRequest> request) {
return LoadObjectField(request, AsyncGeneratorRequest::kValueOffset);
}
- inline TNode<BoolT> IsAbruptResumeType(SloppyTNode<Smi> const resume_type) {
+ inline TNode<BoolT> IsAbruptResumeType(const TNode<Smi> resume_type) {
return SmiNotEqual(resume_type, SmiConstant(JSGeneratorObject::kNext));
}
- void AsyncGeneratorEnqueue(CodeStubArguments* args, Node* context,
- Node* generator, Node* value,
+ void AsyncGeneratorEnqueue(CodeStubArguments* args, TNode<Context> context,
+ TNode<Object> receiver, TNode<Object> value,
JSAsyncGeneratorObject::ResumeMode resume_mode,
const char* method_name);
- Node* TakeFirstAsyncGeneratorRequestFromQueue(Node* generator);
- Node* TakeFirstAsyncGeneratorRequestFromQueueIfPresent(Node* generator,
- Label* if_not_present);
- void AddAsyncGeneratorRequestToQueue(Node* generator, Node* request);
+ TNode<AsyncGeneratorRequest> TakeFirstAsyncGeneratorRequestFromQueue(
+ TNode<JSAsyncGeneratorObject> generator);
+ void AddAsyncGeneratorRequestToQueue(TNode<JSAsyncGeneratorObject> generator,
+ TNode<AsyncGeneratorRequest> request);
- Node* AllocateAsyncGeneratorRequest(
- JSAsyncGeneratorObject::ResumeMode resume_mode, Node* resume_value,
- Node* promise);
+ TNode<AsyncGeneratorRequest> AllocateAsyncGeneratorRequest(
+ JSAsyncGeneratorObject::ResumeMode resume_mode,
+ TNode<Object> resume_value, TNode<JSPromise> promise);
// Shared implementation of the catchable and uncatchable variations of Await
// for AsyncGenerators.
template <typename Descriptor>
void AsyncGeneratorAwait(bool is_catchable);
void AsyncGeneratorAwaitResumeClosure(
- Node* context, Node* value,
+ TNode<Context> context, TNode<Object> value,
JSAsyncGeneratorObject::ResumeMode resume_mode);
};
// Shared implementation for the 3 Async Iterator protocol methods of Async
// Generators.
void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorEnqueue(
- CodeStubArguments* args, Node* context, Node* generator, Node* value,
- JSAsyncGeneratorObject::ResumeMode resume_mode, const char* method_name) {
+ CodeStubArguments* args, TNode<Context> context, TNode<Object> receiver,
+ TNode<Object> value, JSAsyncGeneratorObject::ResumeMode resume_mode,
+ const char* method_name) {
// AsyncGeneratorEnqueue produces a new Promise, and appends it to the list
// of async generator requests to be executed. If the generator is not
// presently executing, then this method will loop through, processing each
// request from front to back.
// This loop resides in AsyncGeneratorResumeNext.
- Node* promise = AllocateAndInitJSPromise(context);
-
- Label enqueue(this), if_receiverisincompatible(this, Label::kDeferred);
+ TNode<JSPromise> promise = AllocateAndInitJSPromise(context);
- Branch(TaggedIsAsyncGenerator(generator), &enqueue,
- &if_receiverisincompatible);
+ Label if_receiverisincompatible(this, Label::kDeferred);
+ GotoIf(TaggedIsSmi(receiver), &if_receiverisincompatible);
+ GotoIfNot(HasInstanceType(CAST(receiver), JS_ASYNC_GENERATOR_OBJECT_TYPE),
+ &if_receiverisincompatible);
- BIND(&enqueue);
{
Label done(this);
- Node* const req =
+ const TNode<JSAsyncGeneratorObject> generator = CAST(receiver);
+ const TNode<AsyncGeneratorRequest> req =
AllocateAsyncGeneratorRequest(resume_mode, value, promise);
AddAsyncGeneratorRequestToQueue(generator, req);
@@ -171,7 +167,7 @@ void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorEnqueue(
// If state is not "executing", then
// Perform AsyncGeneratorResumeNext(Generator)
// Check if the {receiver} is running or already closed.
- TNode<Smi> continuation = CAST(LoadGeneratorState(generator));
+ TNode<Smi> continuation = LoadGeneratorState(generator);
GotoIf(SmiEqual(continuation,
SmiConstant(JSAsyncGeneratorObject::kGeneratorExecuting)),
@@ -186,20 +182,18 @@ void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorEnqueue(
BIND(&if_receiverisincompatible);
{
- Node* const error =
- MakeTypeError(MessageTemplate::kIncompatibleMethodReceiver, context,
- StringConstant(method_name), generator);
-
- CallBuiltin(Builtins::kRejectPromise, context, promise, error,
+ CallBuiltin(Builtins::kRejectPromise, context, promise,
+ MakeTypeError(MessageTemplate::kIncompatibleMethodReceiver,
+ context, StringConstant(method_name), receiver),
TrueConstant());
args->PopAndReturn(promise);
}
}
-Node* AsyncGeneratorBuiltinsAssembler::AllocateAsyncGeneratorRequest(
- JSAsyncGeneratorObject::ResumeMode resume_mode, Node* resume_value,
- Node* promise) {
- CSA_SLOW_ASSERT(this, HasInstanceType(promise, JS_PROMISE_TYPE));
+TNode<AsyncGeneratorRequest>
+AsyncGeneratorBuiltinsAssembler::AllocateAsyncGeneratorRequest(
+ JSAsyncGeneratorObject::ResumeMode resume_mode, TNode<Object> resume_value,
+ TNode<JSPromise> promise) {
TNode<HeapObject> request = Allocate(AsyncGeneratorRequest::kSize);
StoreMapNoWriteBarrier(request, RootIndex::kAsyncGeneratorRequestMap);
StoreObjectFieldNoWriteBarrier(request, AsyncGeneratorRequest::kNextOffset,
@@ -213,15 +207,14 @@ Node* AsyncGeneratorBuiltinsAssembler::AllocateAsyncGeneratorRequest(
promise);
StoreObjectFieldRoot(request, AsyncGeneratorRequest::kNextOffset,
RootIndex::kUndefinedValue);
- return request;
+ return CAST(request);
}
void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorAwaitResumeClosure(
- Node* context, Node* value,
+ TNode<Context> context, TNode<Object> value,
JSAsyncGeneratorObject::ResumeMode resume_mode) {
- TNode<Object> const generator =
- LoadContextElement(context, Context::EXTENSION_INDEX);
- CSA_SLOW_ASSERT(this, TaggedIsAsyncGenerator(generator));
+ const TNode<JSAsyncGeneratorObject> generator =
+ CAST(LoadContextElement(context, Context::EXTENSION_INDEX));
SetGeneratorNotAwaiting(generator);
@@ -259,12 +252,13 @@ void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorAwait(bool is_catchable) {
}
void AsyncGeneratorBuiltinsAssembler::AddAsyncGeneratorRequestToQueue(
- Node* generator, Node* request) {
- VARIABLE(var_current, MachineRepresentation::kTagged);
+ TNode<JSAsyncGeneratorObject> generator,
+ TNode<AsyncGeneratorRequest> request) {
+ TVARIABLE(HeapObject, var_current);
Label empty(this), loop(this, &var_current), done(this);
- var_current.Bind(
- LoadObjectField(generator, JSAsyncGeneratorObject::kQueueOffset));
+ var_current = LoadObjectField<HeapObject>(
+ generator, JSAsyncGeneratorObject::kQueueOffset);
Branch(IsUndefined(var_current.value()), &empty, &loop);
BIND(&empty);
@@ -276,9 +270,9 @@ void AsyncGeneratorBuiltinsAssembler::AddAsyncGeneratorRequestToQueue(
BIND(&loop);
{
Label loop_next(this), next_empty(this);
- Node* current = var_current.value();
- TNode<Object> next =
- LoadObjectField(current, AsyncGeneratorRequest::kNextOffset);
+ TNode<AsyncGeneratorRequest> current = CAST(var_current.value());
+ TNode<HeapObject> next = LoadObjectField<HeapObject>(
+ current, AsyncGeneratorRequest::kNextOffset);
Branch(IsUndefined(next), &next_empty, &loop_next);
BIND(&next_empty);
@@ -289,20 +283,20 @@ void AsyncGeneratorBuiltinsAssembler::AddAsyncGeneratorRequestToQueue(
BIND(&loop_next);
{
- var_current.Bind(next);
+ var_current = next;
Goto(&loop);
}
}
BIND(&done);
}
-Node* AsyncGeneratorBuiltinsAssembler::TakeFirstAsyncGeneratorRequestFromQueue(
- Node* generator) {
+TNode<AsyncGeneratorRequest>
+AsyncGeneratorBuiltinsAssembler::TakeFirstAsyncGeneratorRequestFromQueue(
+ TNode<JSAsyncGeneratorObject> generator) {
// Removes and returns the first AsyncGeneratorRequest from a
// JSAsyncGeneratorObject's queue. Asserts that the queue is not empty.
- CSA_ASSERT(this, TaggedIsAsyncGenerator(generator));
- TNode<AsyncGeneratorRequest> request =
- CAST(LoadObjectField(generator, JSAsyncGeneratorObject::kQueueOffset));
+ TNode<AsyncGeneratorRequest> request = LoadObjectField<AsyncGeneratorRequest>(
+ generator, JSAsyncGeneratorObject::kQueueOffset);
TNode<Object> next =
LoadObjectField(request, AsyncGeneratorRequest::kNextOffset);
@@ -323,7 +317,7 @@ TF_BUILTIN(AsyncGeneratorPrototypeNext, AsyncGeneratorBuiltinsAssembler) {
TNode<Object> generator = args.GetReceiver();
TNode<Object> value = args.GetOptionalArgumentValue(kValueArg);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
AsyncGeneratorEnqueue(&args, context, generator, value,
JSAsyncGeneratorObject::kNext,
@@ -341,7 +335,7 @@ TF_BUILTIN(AsyncGeneratorPrototypeReturn, AsyncGeneratorBuiltinsAssembler) {
TNode<Object> generator = args.GetReceiver();
TNode<Object> value = args.GetOptionalArgumentValue(kValueArg);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
AsyncGeneratorEnqueue(&args, context, generator, value,
JSAsyncGeneratorObject::kReturn,
@@ -359,7 +353,7 @@ TF_BUILTIN(AsyncGeneratorPrototypeThrow, AsyncGeneratorBuiltinsAssembler) {
TNode<Object> generator = args.GetReceiver();
TNode<Object> value = args.GetOptionalArgumentValue(kValueArg);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
AsyncGeneratorEnqueue(&args, context, generator, value,
JSAsyncGeneratorObject::kThrow,
@@ -367,15 +361,15 @@ TF_BUILTIN(AsyncGeneratorPrototypeThrow, AsyncGeneratorBuiltinsAssembler) {
}
TF_BUILTIN(AsyncGeneratorAwaitResolveClosure, AsyncGeneratorBuiltinsAssembler) {
- Node* value = Parameter(Descriptor::kValue);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
AsyncGeneratorAwaitResumeClosure(context, value,
JSAsyncGeneratorObject::kNext);
}
TF_BUILTIN(AsyncGeneratorAwaitRejectClosure, AsyncGeneratorBuiltinsAssembler) {
- Node* value = Parameter(Descriptor::kValue);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
AsyncGeneratorAwaitResumeClosure(context, value,
JSAsyncGeneratorObject::kThrow);
}
@@ -392,8 +386,9 @@ TF_BUILTIN(AsyncGeneratorAwaitCaught, AsyncGeneratorBuiltinsAssembler) {
TF_BUILTIN(AsyncGeneratorResumeNext, AsyncGeneratorBuiltinsAssembler) {
using Descriptor = AsyncGeneratorResumeNextDescriptor;
- Node* const generator = Parameter(Descriptor::kGenerator);
- Node* const context = Parameter(Descriptor::kContext);
+ const TNode<JSAsyncGeneratorObject> generator =
+ CAST(Parameter(Descriptor::kGenerator));
+ const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
// The penultimate step of proposal-async-iteration/#sec-asyncgeneratorresolve
// and proposal-async-iteration/#sec-asyncgeneratorreject both recursively
@@ -403,12 +398,10 @@ TF_BUILTIN(AsyncGeneratorResumeNext, AsyncGeneratorBuiltinsAssembler) {
// performs a loop in AsyncGeneratorResumeNext, which continues as long as
// there is an AsyncGeneratorRequest in the queue, and as long as the
// generator is not suspended due to an AwaitExpression.
- VARIABLE(var_state, MachineRepresentation::kTaggedSigned,
- LoadGeneratorState(generator));
- VARIABLE(var_next, MachineRepresentation::kTagged,
- LoadFirstAsyncGeneratorRequestFromQueue(generator));
- Variable* loop_variables[] = {&var_state, &var_next};
- Label start(this, 2, loop_variables);
+ TVARIABLE(Smi, var_state, LoadGeneratorState(generator));
+ TVARIABLE(HeapObject, var_next,
+ LoadFirstAsyncGeneratorRequestFromQueue(generator));
+ Label start(this, {&var_state, &var_next});
Goto(&start);
BIND(&start);
@@ -420,9 +413,8 @@ TF_BUILTIN(AsyncGeneratorResumeNext, AsyncGeneratorBuiltinsAssembler) {
// Stop resuming if request queue is empty.
ReturnIf(IsUndefined(var_next.value()), UndefinedConstant());
- Node* const next = var_next.value();
- TNode<Smi> const resume_type =
- CAST(LoadResumeTypeFromAsyncGeneratorRequest(next));
+ const TNode<AsyncGeneratorRequest> next = CAST(var_next.value());
+ const TNode<Smi> resume_type = LoadResumeTypeFromAsyncGeneratorRequest(next);
Label if_abrupt(this), if_normal(this), resume_generator(this);
Branch(IsAbruptResumeType(resume_type), &if_abrupt, &if_normal);
@@ -432,11 +424,11 @@ TF_BUILTIN(AsyncGeneratorResumeNext, AsyncGeneratorBuiltinsAssembler) {
GotoIfNot(IsGeneratorStateSuspendedAtStart(var_state.value()),
&settle_promise);
CloseGenerator(generator);
- var_state.Bind(SmiConstant(JSGeneratorObject::kGeneratorClosed));
+ var_state = SmiConstant(JSGeneratorObject::kGeneratorClosed);
Goto(&settle_promise);
BIND(&settle_promise);
- Node* next_value = LoadValueFromAsyncGeneratorRequest(next);
+ TNode<Object> next_value = LoadValueFromAsyncGeneratorRequest(next);
Branch(SmiEqual(resume_type, SmiConstant(JSGeneratorObject::kReturn)),
&if_return, &if_throw);
@@ -457,7 +449,7 @@ TF_BUILTIN(AsyncGeneratorResumeNext, AsyncGeneratorBuiltinsAssembler) {
GotoIfNot(IsGeneratorStateClosed(var_state.value()), &resume_generator);
CallBuiltin(Builtins::kAsyncGeneratorReject, context, generator,
next_value);
- var_next.Bind(LoadFirstAsyncGeneratorRequestFromQueue(generator));
+ var_next = LoadFirstAsyncGeneratorRequestFromQueue(generator);
Goto(&start);
}
@@ -466,8 +458,8 @@ TF_BUILTIN(AsyncGeneratorResumeNext, AsyncGeneratorBuiltinsAssembler) {
GotoIfNot(IsGeneratorStateClosed(var_state.value()), &resume_generator);
CallBuiltin(Builtins::kAsyncGeneratorResolve, context, generator,
UndefinedConstant(), TrueConstant());
- var_state.Bind(LoadGeneratorState(generator));
- var_next.Bind(LoadFirstAsyncGeneratorRequestFromQueue(generator));
+ var_state = LoadGeneratorState(generator);
+ var_next = LoadFirstAsyncGeneratorRequestFromQueue(generator);
Goto(&start);
}
@@ -478,19 +470,19 @@ TF_BUILTIN(AsyncGeneratorResumeNext, AsyncGeneratorBuiltinsAssembler) {
generator, JSGeneratorObject::kResumeModeOffset, resume_type);
CallStub(CodeFactory::ResumeGenerator(isolate()), context,
LoadValueFromAsyncGeneratorRequest(next), generator);
- var_state.Bind(LoadGeneratorState(generator));
- var_next.Bind(LoadFirstAsyncGeneratorRequestFromQueue(generator));
+ var_state = LoadGeneratorState(generator);
+ var_next = LoadFirstAsyncGeneratorRequestFromQueue(generator);
Goto(&start);
}
}
TF_BUILTIN(AsyncGeneratorResolve, AsyncGeneratorBuiltinsAssembler) {
- Node* const generator = Parameter(Descriptor::kGenerator);
- Node* const value = Parameter(Descriptor::kValue);
- Node* const done = Parameter(Descriptor::kDone);
- Node* const context = Parameter(Descriptor::kContext);
+ const TNode<JSAsyncGeneratorObject> generator =
+ CAST(Parameter(Descriptor::kGenerator));
+ const TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+ const TNode<Object> done = CAST(Parameter(Descriptor::kDone));
+ const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- CSA_SLOW_ASSERT(this, TaggedIsAsyncGenerator(generator));
CSA_ASSERT(this, Word32BinaryNot(IsGeneratorAwaiting(generator)));
// This operation should be called only when the `value` parameter has been
@@ -499,11 +491,12 @@ TF_BUILTIN(AsyncGeneratorResolve, AsyncGeneratorBuiltinsAssembler) {
// non-callable value. This can't be checked with assertions due to being
// observable, but keep it in mind.
- Node* const next = TakeFirstAsyncGeneratorRequestFromQueue(generator);
- Node* const promise = LoadPromiseFromAsyncGeneratorRequest(next);
+ const TNode<AsyncGeneratorRequest> next =
+ TakeFirstAsyncGeneratorRequestFromQueue(generator);
+ const TNode<JSPromise> promise = LoadPromiseFromAsyncGeneratorRequest(next);
// Let iteratorResult be CreateIterResultObject(value, done).
- TNode<HeapObject> const iter_result = Allocate(JSIteratorResult::kSize);
+ const TNode<HeapObject> iter_result = Allocate(JSIteratorResult::kSize);
{
TNode<Object> map = LoadContextElement(LoadNativeContext(context),
Context::ITERATOR_RESULT_MAP_INDEX);
@@ -555,25 +548,30 @@ TF_BUILTIN(AsyncGeneratorResolve, AsyncGeneratorBuiltinsAssembler) {
TF_BUILTIN(AsyncGeneratorReject, AsyncGeneratorBuiltinsAssembler) {
using Descriptor = AsyncGeneratorRejectDescriptor;
- Node* const generator = Parameter(Descriptor::kGenerator);
- Node* const value = Parameter(Descriptor::kValue);
- Node* const context = Parameter(Descriptor::kContext);
+ const TNode<JSAsyncGeneratorObject> generator =
+ CAST(Parameter(Descriptor::kGenerator));
+ const TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+ const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- Node* const next = TakeFirstAsyncGeneratorRequestFromQueue(generator);
- Node* const promise = LoadPromiseFromAsyncGeneratorRequest(next);
+ TNode<AsyncGeneratorRequest> next =
+ TakeFirstAsyncGeneratorRequestFromQueue(generator);
+ TNode<JSPromise> promise = LoadPromiseFromAsyncGeneratorRequest(next);
Return(CallBuiltin(Builtins::kRejectPromise, context, promise, value,
TrueConstant()));
}
TF_BUILTIN(AsyncGeneratorYield, AsyncGeneratorBuiltinsAssembler) {
- Node* const generator = Parameter(Descriptor::kGenerator);
- Node* const value = Parameter(Descriptor::kValue);
- Node* const is_caught = Parameter(Descriptor::kIsCaught);
- Node* const context = Parameter(Descriptor::kContext);
+ const TNode<JSGeneratorObject> generator =
+ CAST(Parameter(Descriptor::kGenerator));
+ const TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+ const TNode<Oddball> is_caught = CAST(Parameter(Descriptor::kIsCaught));
+ const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- Node* const request = LoadFirstAsyncGeneratorRequestFromQueue(generator);
- Node* const outer_promise = LoadPromiseFromAsyncGeneratorRequest(request);
+ const TNode<AsyncGeneratorRequest> request =
+ CAST(LoadFirstAsyncGeneratorRequestFromQueue(generator));
+ const TNode<JSPromise> outer_promise =
+ LoadPromiseFromAsyncGeneratorRequest(request);
const int on_resolve = Context::ASYNC_GENERATOR_YIELD_RESOLVE_SHARED_FUN;
const int on_reject = Context::ASYNC_GENERATOR_AWAIT_REJECT_SHARED_FUN;
@@ -585,10 +583,10 @@ TF_BUILTIN(AsyncGeneratorYield, AsyncGeneratorBuiltinsAssembler) {
}
TF_BUILTIN(AsyncGeneratorYieldResolveClosure, AsyncGeneratorBuiltinsAssembler) {
- Node* const context = Parameter(Descriptor::kContext);
- Node* const value = Parameter(Descriptor::kValue);
- TNode<Object> const generator =
- LoadContextElement(context, Context::EXTENSION_INDEX);
+ const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+ const TNode<JSAsyncGeneratorObject> generator =
+ CAST(LoadContextElement(context, Context::EXTENSION_INDEX));
SetGeneratorNotAwaiting(generator);
@@ -617,33 +615,35 @@ TF_BUILTIN(AsyncGeneratorReturn, AsyncGeneratorBuiltinsAssembler) {
// (per proposal-async-iteration/#sec-asyncgeneratorresumenext step 10.b.i)
//
// In all cases, the final step is to jump back to AsyncGeneratorResumeNext.
- Node* const generator = Parameter(Descriptor::kGenerator);
- Node* const value = Parameter(Descriptor::kValue);
- Node* const is_caught = Parameter(Descriptor::kIsCaught);
- Node* const req = LoadFirstAsyncGeneratorRequestFromQueue(generator);
- CSA_ASSERT(this, IsNotUndefined(req));
+ const TNode<JSGeneratorObject> generator =
+ CAST(Parameter(Descriptor::kGenerator));
+ const TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+ const TNode<Oddball> is_caught = CAST(Parameter(Descriptor::kIsCaught));
+ const TNode<AsyncGeneratorRequest> req =
+ CAST(LoadFirstAsyncGeneratorRequestFromQueue(generator));
Label perform_await(this);
- VARIABLE(var_on_resolve, MachineType::PointerRepresentation(),
- IntPtrConstant(
- Context::ASYNC_GENERATOR_RETURN_CLOSED_RESOLVE_SHARED_FUN));
- VARIABLE(
- var_on_reject, MachineType::PointerRepresentation(),
+ TVARIABLE(IntPtrT, var_on_resolve,
+ IntPtrConstant(
+ Context::ASYNC_GENERATOR_RETURN_CLOSED_RESOLVE_SHARED_FUN));
+ TVARIABLE(
+ IntPtrT, var_on_reject,
IntPtrConstant(Context::ASYNC_GENERATOR_RETURN_CLOSED_REJECT_SHARED_FUN));
- Node* const state = LoadGeneratorState(generator);
+ const TNode<Smi> state = LoadGeneratorState(generator);
GotoIf(IsGeneratorStateClosed(state), &perform_await);
- var_on_resolve.Bind(
- IntPtrConstant(Context::ASYNC_GENERATOR_RETURN_RESOLVE_SHARED_FUN));
- var_on_reject.Bind(
- IntPtrConstant(Context::ASYNC_GENERATOR_AWAIT_REJECT_SHARED_FUN));
+ var_on_resolve =
+ IntPtrConstant(Context::ASYNC_GENERATOR_RETURN_RESOLVE_SHARED_FUN);
+ var_on_reject =
+ IntPtrConstant(Context::ASYNC_GENERATOR_AWAIT_REJECT_SHARED_FUN);
Goto(&perform_await);
BIND(&perform_await);
SetGeneratorAwaiting(generator);
- Node* const context = Parameter(Descriptor::kContext);
- Node* const outer_promise = LoadPromiseFromAsyncGeneratorRequest(req);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const TNode<JSPromise> outer_promise =
+ LoadPromiseFromAsyncGeneratorRequest(req);
Await(context, generator, value, outer_promise, var_on_resolve.value(),
var_on_reject.value(), is_caught);
@@ -656,8 +656,8 @@ TF_BUILTIN(AsyncGeneratorReturn, AsyncGeneratorBuiltinsAssembler) {
// proposal-async-iteration/#sec-asyncgeneratoryield step 8.e
TF_BUILTIN(AsyncGeneratorReturnResolveClosure,
AsyncGeneratorBuiltinsAssembler) {
- Node* const context = Parameter(Descriptor::kContext);
- Node* const value = Parameter(Descriptor::kValue);
+ const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const TNode<Object> value = CAST(Parameter(Descriptor::kValue));
AsyncGeneratorAwaitResumeClosure(context, value, JSGeneratorObject::kReturn);
}
@@ -666,10 +666,10 @@ TF_BUILTIN(AsyncGeneratorReturnResolveClosure,
// AsyncGeneratorResumeNext.
TF_BUILTIN(AsyncGeneratorReturnClosedResolveClosure,
AsyncGeneratorBuiltinsAssembler) {
- Node* const context = Parameter(Descriptor::kContext);
- Node* const value = Parameter(Descriptor::kValue);
- TNode<Object> const generator =
- LoadContextElement(context, Context::EXTENSION_INDEX);
+ const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+ const TNode<JSAsyncGeneratorObject> generator =
+ CAST(LoadContextElement(context, Context::EXTENSION_INDEX));
SetGeneratorNotAwaiting(generator);
@@ -684,10 +684,10 @@ TF_BUILTIN(AsyncGeneratorReturnClosedResolveClosure,
TF_BUILTIN(AsyncGeneratorReturnClosedRejectClosure,
AsyncGeneratorBuiltinsAssembler) {
- Node* const context = Parameter(Descriptor::kContext);
- Node* const value = Parameter(Descriptor::kValue);
- TNode<Object> const generator =
- LoadContextElement(context, Context::EXTENSION_INDEX);
+ const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+ const TNode<JSAsyncGeneratorObject> generator =
+ CAST(LoadContextElement(context, Context::EXTENSION_INDEX));
SetGeneratorNotAwaiting(generator);
diff --git a/deps/v8/src/builtins/builtins-async-iterator-gen.cc b/deps/v8/src/builtins/builtins-async-iterator-gen.cc
index 0b5c5ef8b9..39ff8c9217 100644
--- a/deps/v8/src/builtins/builtins-async-iterator-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-iterator-gen.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/base/optional.h"
#include "src/builtins/builtins-async-gen.h"
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
@@ -20,29 +21,34 @@ class AsyncFromSyncBuiltinsAssembler : public AsyncBuiltinsAssembler {
explicit AsyncFromSyncBuiltinsAssembler(compiler::CodeAssemblerState* state)
: AsyncBuiltinsAssembler(state) {}
- void ThrowIfNotAsyncFromSyncIterator(Node* const context, Node* const object,
+ void ThrowIfNotAsyncFromSyncIterator(const TNode<Context> context,
+ const TNode<Object> object,
Label* if_exception,
- Variable* var_exception,
+ TVariable<Object>* var_exception,
const char* method_name);
- using UndefinedMethodHandler = std::function<void(
- Node* const context, Node* const promise, Label* if_exception)>;
- using SyncIteratorNodeGenerator = std::function<Node*(Node*)>;
+ using UndefinedMethodHandler =
+ std::function<void(const TNode<NativeContext> native_context,
+ const TNode<JSPromise> promise, Label* if_exception)>;
+ using SyncIteratorNodeGenerator =
+ std::function<TNode<Object>(TNode<JSReceiver>)>;
void Generate_AsyncFromSyncIteratorMethod(
- Node* const context, Node* const iterator, Node* const sent_value,
+ const TNode<Context> context, const TNode<Object> iterator,
+ const TNode<Object> sent_value,
const SyncIteratorNodeGenerator& get_method,
const UndefinedMethodHandler& if_method_undefined,
const char* operation_name,
Label::Type reject_label_type = Label::kDeferred,
- Node* const initial_exception_value = nullptr);
+ base::Optional<TNode<Object>> initial_exception_value = base::nullopt);
void Generate_AsyncFromSyncIteratorMethod(
- Node* const context, Node* const iterator, Node* const sent_value,
- Handle<String> name, const UndefinedMethodHandler& if_method_undefined,
+ const TNode<Context> context, const TNode<Object> iterator,
+ const TNode<Object> sent_value, Handle<String> name,
+ const UndefinedMethodHandler& if_method_undefined,
const char* operation_name,
Label::Type reject_label_type = Label::kDeferred,
- Node* const initial_exception_value = nullptr) {
- auto get_method = [=](Node* const sync_iterator) {
+ base::Optional<TNode<Object>> initial_exception_value = base::nullopt) {
+ auto get_method = [=](const TNode<JSReceiver> sync_iterator) {
return GetProperty(context, sync_iterator, name);
};
return Generate_AsyncFromSyncIteratorMethod(
@@ -51,26 +57,26 @@ class AsyncFromSyncBuiltinsAssembler : public AsyncBuiltinsAssembler {
}
// Load "value" and "done" from an iterator result object. If an exception
- // is thrown at any point, jumps to te `if_exception` label with exception
+ // is thrown at any point, jumps to the `if_exception` label with exception
// stored in `var_exception`.
//
// Returns a Pair of Nodes, whose first element is the value of the "value"
// property, and whose second element is the value of the "done" property,
// converted to a Boolean if needed.
- std::pair<Node*, Node*> LoadIteratorResult(Node* const context,
- Node* const native_context,
- Node* const iter_result,
- Label* if_exception,
- Variable* var_exception);
+ std::pair<TNode<Object>, TNode<Oddball>> LoadIteratorResult(
+ const TNode<Context> context, const TNode<NativeContext> native_context,
+ const TNode<Object> iter_result, Label* if_exception,
+ TVariable<Object>* var_exception);
};
void AsyncFromSyncBuiltinsAssembler::ThrowIfNotAsyncFromSyncIterator(
- Node* const context, Node* const object, Label* if_exception,
- Variable* var_exception, const char* method_name) {
+ const TNode<Context> context, const TNode<Object> object,
+ Label* if_exception, TVariable<Object>* var_exception,
+ const char* method_name) {
Label if_receiverisincompatible(this, Label::kDeferred), done(this);
GotoIf(TaggedIsSmi(object), &if_receiverisincompatible);
- Branch(HasInstanceType(object, JS_ASYNC_FROM_SYNC_ITERATOR_TYPE), &done,
+ Branch(HasInstanceType(CAST(object), JS_ASYNC_FROM_SYNC_ITERATOR_TYPE), &done,
&if_receiverisincompatible);
BIND(&if_receiverisincompatible);
@@ -79,13 +85,13 @@ void AsyncFromSyncBuiltinsAssembler::ThrowIfNotAsyncFromSyncIterator(
// internal slot, then
// Let badIteratorError be a new TypeError exception.
- Node* const error =
- MakeTypeError(MessageTemplate::kIncompatibleMethodReceiver, context,
- StringConstant(method_name), object);
+ TNode<HeapObject> error =
+ CAST(MakeTypeError(MessageTemplate::kIncompatibleMethodReceiver,
+ context, StringConstant(method_name), object));
// Perform ! Call(promiseCapability.[[Reject]], undefined,
// « badIteratorError »).
- var_exception->Bind(error);
+ *var_exception = error;
Goto(if_exception);
}
@@ -93,26 +99,27 @@ void AsyncFromSyncBuiltinsAssembler::ThrowIfNotAsyncFromSyncIterator(
}
void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod(
- Node* const context, Node* const iterator, Node* const sent_value,
- const SyncIteratorNodeGenerator& get_method,
+ const TNode<Context> context, const TNode<Object> iterator,
+ const TNode<Object> sent_value, const SyncIteratorNodeGenerator& get_method,
const UndefinedMethodHandler& if_method_undefined,
const char* operation_name, Label::Type reject_label_type,
- Node* const initial_exception_value) {
- TNode<NativeContext> const native_context = LoadNativeContext(context);
- Node* const promise = AllocateAndInitJSPromise(context);
+ base::Optional<TNode<Object>> initial_exception_value) {
+ const TNode<NativeContext> native_context = LoadNativeContext(context);
+ const TNode<JSPromise> promise = AllocateAndInitJSPromise(context);
- VARIABLE(var_exception, MachineRepresentation::kTagged,
- initial_exception_value == nullptr ? UndefinedConstant()
- : initial_exception_value);
+ TVARIABLE(
+ Object, var_exception,
+ initial_exception_value ? *initial_exception_value : UndefinedConstant());
Label reject_promise(this, reject_label_type);
ThrowIfNotAsyncFromSyncIterator(context, iterator, &reject_promise,
&var_exception, operation_name);
- TNode<Object> const sync_iterator =
- LoadObjectField(iterator, JSAsyncFromSyncIterator::kSyncIteratorOffset);
+ TNode<JSAsyncFromSyncIterator> async_iterator = CAST(iterator);
+ const TNode<JSReceiver> sync_iterator = LoadObjectField<JSReceiver>(
+ async_iterator, JSAsyncFromSyncIterator::kSyncIteratorOffset);
- Node* const method = get_method(sync_iterator);
+ TNode<Object> method = get_method(sync_iterator);
if (if_method_undefined) {
Label if_isnotundefined(this);
@@ -123,21 +130,21 @@ void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod(
BIND(&if_isnotundefined);
}
- Node* const iter_result = CallJS(CodeFactory::Call(isolate()), context,
- method, sync_iterator, sent_value);
+ const TNode<Object> iter_result = CallJS(
+ CodeFactory::Call(isolate()), context, method, sync_iterator, sent_value);
GotoIfException(iter_result, &reject_promise, &var_exception);
- Node* value;
- Node* done;
+ TNode<Object> value;
+ TNode<Oddball> done;
std::tie(value, done) = LoadIteratorResult(
context, native_context, iter_result, &reject_promise, &var_exception);
- TNode<JSFunction> const promise_fun =
+ const TNode<JSFunction> promise_fun =
CAST(LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX));
CSA_ASSERT(this, IsConstructor(promise_fun));
// Let valueWrapper be PromiseResolve(%Promise%, « value »).
- TNode<Object> const value_wrapper = CallBuiltin(
+ const TNode<Object> value_wrapper = CallBuiltin(
Builtins::kPromiseResolve, native_context, promise_fun, value);
// IfAbruptRejectPromise(valueWrapper, promiseCapability).
GotoIfException(value_wrapper, &reject_promise, &var_exception);
@@ -145,7 +152,8 @@ void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod(
// Let onFulfilled be a new built-in function object as defined in
// Async Iterator Value Unwrap Functions.
// Set onFulfilled.[[Done]] to throwDone.
- Node* const on_fulfilled = CreateUnwrapClosure(native_context, done);
+ const TNode<JSFunction> on_fulfilled =
+ CreateUnwrapClosure(native_context, done);
// Perform ! PerformPromiseThen(valueWrapper,
// onFulfilled, undefined, promiseCapability).
@@ -154,35 +162,39 @@ void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod(
BIND(&reject_promise);
{
- Node* const exception = var_exception.value();
+ const TNode<Object> exception = var_exception.value();
CallBuiltin(Builtins::kRejectPromise, context, promise, exception,
TrueConstant());
Return(promise);
}
}
-std::pair<Node*, Node*> AsyncFromSyncBuiltinsAssembler::LoadIteratorResult(
- Node* const context, Node* const native_context, Node* const iter_result,
- Label* if_exception, Variable* var_exception) {
+
+std::pair<TNode<Object>, TNode<Oddball>>
+AsyncFromSyncBuiltinsAssembler::LoadIteratorResult(
+ const TNode<Context> context, const TNode<NativeContext> native_context,
+ const TNode<Object> iter_result, Label* if_exception,
+ TVariable<Object>* var_exception) {
Label if_fastpath(this), if_slowpath(this), merge(this), to_boolean(this),
done(this), if_notanobject(this, Label::kDeferred);
GotoIf(TaggedIsSmi(iter_result), &if_notanobject);
- TNode<Map> const iter_result_map = LoadMap(iter_result);
+ const TNode<Map> iter_result_map = LoadMap(CAST(iter_result));
GotoIfNot(IsJSReceiverMap(iter_result_map), &if_notanobject);
- TNode<Object> const fast_iter_result_map =
+ const TNode<Object> fast_iter_result_map =
LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX);
- VARIABLE(var_value, MachineRepresentation::kTagged);
- VARIABLE(var_done, MachineRepresentation::kTagged);
+ TVARIABLE(Object, var_value);
+ TVARIABLE(Object, var_done);
Branch(TaggedEqual(iter_result_map, fast_iter_result_map), &if_fastpath,
&if_slowpath);
BIND(&if_fastpath);
{
- var_done.Bind(LoadObjectField(iter_result, JSIteratorResult::kDoneOffset));
- var_value.Bind(
- LoadObjectField(iter_result, JSIteratorResult::kValueOffset));
+ TNode<JSObject> fast_iter_result = CAST(iter_result);
+ var_done = LoadObjectField(fast_iter_result, JSIteratorResult::kDoneOffset);
+ var_value =
+ LoadObjectField(fast_iter_result, JSIteratorResult::kValueOffset);
Goto(&merge);
}
@@ -190,18 +202,18 @@ std::pair<Node*, Node*> AsyncFromSyncBuiltinsAssembler::LoadIteratorResult(
{
// Let nextDone be IteratorComplete(nextResult).
// IfAbruptRejectPromise(nextDone, promiseCapability).
- TNode<Object> const done =
+ const TNode<Object> done =
GetProperty(context, iter_result, factory()->done_string());
GotoIfException(done, if_exception, var_exception);
// Let nextValue be IteratorValue(nextResult).
// IfAbruptRejectPromise(nextValue, promiseCapability).
- TNode<Object> const value =
+ const TNode<Object> value =
GetProperty(context, iter_result, factory()->value_string());
GotoIfException(value, if_exception, var_exception);
- var_value.Bind(value);
- var_done.Bind(done);
+ var_value = value;
+ var_done = done;
Goto(&merge);
}
@@ -209,27 +221,27 @@ std::pair<Node*, Node*> AsyncFromSyncBuiltinsAssembler::LoadIteratorResult(
{
// Sync iterator result is not an object --- Produce a TypeError and jump
// to the `if_exception` path.
- Node* const error = MakeTypeError(
- MessageTemplate::kIteratorResultNotAnObject, context, iter_result);
- var_exception->Bind(error);
+ const TNode<Object> error = CAST(MakeTypeError(
+ MessageTemplate::kIteratorResultNotAnObject, context, iter_result));
+ *var_exception = error;
Goto(if_exception);
}
BIND(&merge);
// Ensure `iterResult.done` is a Boolean.
GotoIf(TaggedIsSmi(var_done.value()), &to_boolean);
- Branch(IsBoolean(var_done.value()), &done, &to_boolean);
+ Branch(IsBoolean(CAST(var_done.value())), &done, &to_boolean);
BIND(&to_boolean);
{
- TNode<Object> const result =
+ const TNode<Object> result =
CallBuiltin(Builtins::kToBoolean, context, var_done.value());
- var_done.Bind(result);
+ var_done = result;
Goto(&done);
}
BIND(&done);
- return std::make_pair(var_value.value(), var_done.value());
+ return std::make_pair(var_value.value(), CAST(var_done.value()));
}
} // namespace
@@ -237,12 +249,13 @@ std::pair<Node*, Node*> AsyncFromSyncBuiltinsAssembler::LoadIteratorResult(
// https://tc39.github.io/proposal-async-iteration/
// Section #sec-%asyncfromsynciteratorprototype%.next
TF_BUILTIN(AsyncFromSyncIteratorPrototypeNext, AsyncFromSyncBuiltinsAssembler) {
- Node* const iterator = Parameter(Descriptor::kReceiver);
- Node* const value = Parameter(Descriptor::kValue);
- Node* const context = Parameter(Descriptor::kContext);
+ const TNode<Object> iterator = CAST(Parameter(Descriptor::kReceiver));
+ const TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+ const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- auto get_method = [=](Node* const unused) {
- return LoadObjectField(iterator, JSAsyncFromSyncIterator::kNextOffset);
+ auto get_method = [=](const TNode<JSReceiver> unused) {
+ return LoadObjectField(CAST(iterator),
+ JSAsyncFromSyncIterator::kNextOffset);
};
Generate_AsyncFromSyncIteratorMethod(
context, iterator, value, get_method, UndefinedMethodHandler(),
@@ -253,15 +266,16 @@ TF_BUILTIN(AsyncFromSyncIteratorPrototypeNext, AsyncFromSyncBuiltinsAssembler) {
// Section #sec-%asyncfromsynciteratorprototype%.return
TF_BUILTIN(AsyncFromSyncIteratorPrototypeReturn,
AsyncFromSyncBuiltinsAssembler) {
- Node* const iterator = Parameter(Descriptor::kReceiver);
- Node* const value = Parameter(Descriptor::kValue);
- Node* const context = Parameter(Descriptor::kContext);
+ const TNode<Object> iterator = CAST(Parameter(Descriptor::kReceiver));
+ const TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+ const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- auto if_return_undefined = [=](Node* const native_context,
- Node* const promise, Label* if_exception) {
+ auto if_return_undefined = [=](const TNode<NativeContext> native_context,
+ const TNode<JSPromise> promise,
+ Label* if_exception) {
// If return is undefined, then
// Let iterResult be ! CreateIterResultObject(value, true)
- TNode<Object> const iter_result = CallBuiltin(
+ const TNode<Object> iter_result = CallBuiltin(
Builtins::kCreateIterResultObject, context, value, TrueConstant());
// Perform ! Call(promiseCapability.[[Resolve]], undefined, « iterResult »).
@@ -280,11 +294,12 @@ TF_BUILTIN(AsyncFromSyncIteratorPrototypeReturn,
// Section #sec-%asyncfromsynciteratorprototype%.throw
TF_BUILTIN(AsyncFromSyncIteratorPrototypeThrow,
AsyncFromSyncBuiltinsAssembler) {
- Node* const iterator = Parameter(Descriptor::kReceiver);
- Node* const reason = Parameter(Descriptor::kReason);
- Node* const context = Parameter(Descriptor::kContext);
+ const TNode<Object> iterator = CAST(Parameter(Descriptor::kReceiver));
+ const TNode<Object> reason = CAST(Parameter(Descriptor::kReason));
+ const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- auto if_throw_undefined = [=](Node* const native_context, Node* const promise,
+ auto if_throw_undefined = [=](const TNode<NativeContext> native_context,
+ const TNode<JSPromise> promise,
Label* if_exception) { Goto(if_exception); };
Generate_AsyncFromSyncIteratorMethod(
diff --git a/deps/v8/src/builtins/builtins-async-module.cc b/deps/v8/src/builtins/builtins-async-module.cc
new file mode 100644
index 0000000000..fecdb31cf3
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-async-module.cc
@@ -0,0 +1,33 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins-utils-inl.h"
+#include "src/objects/module-inl.h"
+#include "src/objects/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+BUILTIN(CallAsyncModuleFulfilled) {
+ HandleScope handle_scope(isolate);
+ Handle<SourceTextModule> module(
+ isolate->global_handles()->Create(*args.at<SourceTextModule>(0)));
+ SourceTextModule::AsyncModuleExecutionFulfilled(isolate, module);
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
+BUILTIN(CallAsyncModuleRejected) {
+ HandleScope handle_scope(isolate);
+
+ // Arguments should be a SourceTextModule and an exception object.
+ DCHECK_EQ(args.length(), 2);
+ Handle<SourceTextModule> module(
+ isolate->global_handles()->Create(*args.at<SourceTextModule>(0)));
+ Handle<Object> exception(args.at(1));
+ SourceTextModule::AsyncModuleExecutionRejected(isolate, module, exception);
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-bigint.cc b/deps/v8/src/builtins/builtins-bigint.cc
index 1201ce9730..30da5207f9 100644
--- a/deps/v8/src/builtins/builtins-bigint.cc
+++ b/deps/v8/src/builtins/builtins-bigint.cc
@@ -125,26 +125,21 @@ Object BigIntToStringImpl(Handle<Object> receiver, Handle<Object> radix,
BUILTIN(BigIntPrototypeToLocaleString) {
HandleScope scope(isolate);
+ const char* method = "BigInt.prototype.toLocaleString";
#ifdef V8_INTL_SUPPORT
- if (FLAG_harmony_intl_bigint) {
- // 1. Let x be ? thisBigIntValue(this value).
- Handle<BigInt> x;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, x,
- ThisBigIntValue(isolate, args.receiver(),
- "BigInt.prototype.toLocaleString"));
-
- RETURN_RESULT_OR_FAILURE(
- isolate,
- Intl::NumberToLocaleString(isolate, x, args.atOrUndefined(isolate, 1),
- args.atOrUndefined(isolate, 2)));
- }
- // Fallbacks to old toString implemention if flag is off or no
- // V8_INTL_SUPPORT
+ // 1. Let x be ? thisBigIntValue(this value).
+ Handle<BigInt> x;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, x, ThisBigIntValue(isolate, args.receiver(), method));
+
+ RETURN_RESULT_OR_FAILURE(
+ isolate,
+ Intl::NumberToLocaleString(isolate, x, args.atOrUndefined(isolate, 1),
+ args.atOrUndefined(isolate, 2), method));
+ // Fallbacks to old toString implemention if no V8_INTL_SUPPORT
#endif // V8_INTL_SUPPORT
Handle<Object> radix = isolate->factory()->undefined_value();
- return BigIntToStringImpl(args.receiver(), radix, isolate,
- "BigInt.prototype.toLocaleString");
+ return BigIntToStringImpl(args.receiver(), radix, isolate, method);
}
BUILTIN(BigIntPrototypeToString) {
diff --git a/deps/v8/src/builtins/builtins-call-gen.cc b/deps/v8/src/builtins/builtins-call-gen.cc
index 91370b0896..fd1ad5bb67 100644
--- a/deps/v8/src/builtins/builtins-call-gen.cc
+++ b/deps/v8/src/builtins/builtins-call-gen.cc
@@ -9,6 +9,7 @@
#include "src/codegen/macro-assembler.h"
#include "src/common/globals.h"
#include "src/execution/isolate.h"
+#include "src/execution/protectors.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/arguments.h"
#include "src/objects/property-cell.h"
@@ -17,9 +18,6 @@
namespace v8 {
namespace internal {
-template <typename T>
-using TNode = compiler::TNode<T>;
-
void Builtins::Generate_CallFunction_ReceiverIsNullOrUndefined(
MacroAssembler* masm) {
Generate_CallFunction(masm, ConvertReceiverMode::kNullOrUndefined);
@@ -297,7 +295,7 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithSpread(
TNode<PropertyCell> protector_cell = ArrayIteratorProtectorConstant();
GotoIf(
TaggedEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
- SmiConstant(Isolate::kProtectorInvalid)),
+ SmiConstant(Protectors::kProtectorInvalid)),
&if_generic);
{
// The fast-path accesses the {spread} elements directly.
diff --git a/deps/v8/src/builtins/builtins-collections-gen.cc b/deps/v8/src/builtins/builtins-collections-gen.cc
index dec4142c65..c0ca74a577 100644
--- a/deps/v8/src/builtins/builtins-collections-gen.cc
+++ b/deps/v8/src/builtins/builtins-collections-gen.cc
@@ -8,6 +8,7 @@
#include "src/builtins/builtins-iterator-gen.h"
#include "src/builtins/builtins-utils-gen.h"
#include "src/codegen/code-stub-assembler.h"
+#include "src/execution/protectors.h"
#include "src/heap/factory-inl.h"
#include "src/heap/heap-inl.h"
#include "src/objects/hash-table-inl.h"
@@ -19,8 +20,6 @@ namespace internal {
using compiler::Node;
template <class T>
-using TNode = compiler::TNode<T>;
-template <class T>
using TVariable = compiler::TypedCodeAssemblerVariable<T>;
class BaseCollectionsAssembler : public CodeStubAssembler {
@@ -81,8 +80,8 @@ class BaseCollectionsAssembler : public CodeStubAssembler {
TNode<JSReceiver> new_target);
// Allocates the backing store for a collection.
- virtual TNode<Object> AllocateTable(Variant variant, TNode<Context> context,
- TNode<IntPtrT> at_least_space_for) = 0;
+ virtual TNode<HeapObject> AllocateTable(
+ Variant variant, TNode<IntPtrT> at_least_space_for) = 0;
// Main entry point for a collection constructor builtin.
void GenerateConstructor(Variant variant,
@@ -124,7 +123,7 @@ class BaseCollectionsAssembler : public CodeStubAssembler {
TNode<IntPtrT> EstimatedInitialSize(TNode<Object> initial_entries,
TNode<BoolT> is_fast_jsarray);
- void GotoIfNotJSReceiver(Node* const obj, Label* if_not_receiver);
+ void GotoIfNotJSReceiver(TNode<Object> const obj, Label* if_not_receiver);
// Determines whether the collection's prototype has been modified.
TNode<BoolT> HasInitialCollectionPrototype(Variant variant,
@@ -160,8 +159,8 @@ void BaseCollectionsAssembler::AddConstructorEntry(
? LoadKeyValuePairNoSideEffects(context, key_value,
if_may_have_side_effects)
: LoadKeyValuePair(context, key_value);
- Node* key_n = pair.key;
- Node* value_n = pair.value;
+ TNode<Object> key_n = pair.key;
+ TNode<Object> value_n = pair.value;
CallJS(CodeFactory::Call(isolate()), context, add_function, collection,
key_n, value_n);
} else {
@@ -183,7 +182,7 @@ void BaseCollectionsAssembler::AddConstructorEntries(
Goto(&allocate_table);
BIND(&allocate_table);
{
- TNode<Object> table = AllocateTable(variant, context, at_least_space_for);
+ TNode<HeapObject> table = AllocateTable(variant, at_least_space_for);
StoreObjectField(collection, GetTableOffset(variant), table);
GotoIf(IsNullOrUndefined(initial_entries), &exit);
GotoIfInitialAddFunctionModified(variant, CAST(native_context),
@@ -261,7 +260,7 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray(
&if_doubles);
BIND(&if_smiorobjects);
{
- auto set_entry = [&](Node* index) {
+ auto set_entry = [&](TNode<IntPtrT> index) {
TNode<Object> element = LoadAndNormalizeFixedArrayElement(
CAST(elements), UncheckedCast<IntPtrT>(index));
AddConstructorEntry(variant, context, collection, add_func, element,
@@ -272,8 +271,8 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray(
// elements, a fast loop is used. This assumes that adding an element
// to the collection does not call user code that could mutate the elements
// or collection.
- BuildFastLoop(IntPtrConstant(0), length, set_entry, 1,
- ParameterMode::INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+ BuildFastLoop<IntPtrT>(IntPtrConstant(0), length, set_entry, 1,
+ IndexAdvanceMode::kPost);
Goto(&exit);
}
BIND(&if_doubles);
@@ -288,13 +287,13 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray(
element);
} else {
DCHECK(variant == kSet || variant == kWeakSet);
- auto set_entry = [&](Node* index) {
+ auto set_entry = [&](TNode<IntPtrT> index) {
TNode<Object> entry = LoadAndNormalizeFixedDoubleArrayElement(
elements, UncheckedCast<IntPtrT>(index));
AddConstructorEntry(variant, context, collection, add_func, entry);
};
- BuildFastLoop(IntPtrConstant(0), length, set_entry, 1,
- ParameterMode::INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+ BuildFastLoop<IntPtrT>(IntPtrConstant(0), length, set_entry, 1,
+ IndexAdvanceMode::kPost);
Goto(&exit);
}
}
@@ -523,10 +522,10 @@ TNode<IntPtrT> BaseCollectionsAssembler::EstimatedInitialSize(
[=] { return IntPtrConstant(0); });
}
-void BaseCollectionsAssembler::GotoIfNotJSReceiver(Node* const obj,
+void BaseCollectionsAssembler::GotoIfNotJSReceiver(TNode<Object> const obj,
Label* if_not_receiver) {
GotoIf(TaggedIsSmi(obj), if_not_receiver);
- GotoIfNot(IsJSReceiver(obj), if_not_receiver);
+ GotoIfNot(IsJSReceiver(CAST(obj)), if_not_receiver);
}
TNode<Map> BaseCollectionsAssembler::GetInitialCollectionPrototype(
@@ -608,22 +607,24 @@ class CollectionsBuiltinsAssembler : public BaseCollectionsAssembler {
protected:
template <typename IteratorType>
- Node* AllocateJSCollectionIterator(SloppyTNode<Context> context,
- int map_index, Node* collection);
- TNode<Object> AllocateTable(Variant variant, TNode<Context> context,
- TNode<IntPtrT> at_least_space_for) override;
- TNode<IntPtrT> GetHash(SloppyTNode<HeapObject> const key);
- TNode<IntPtrT> CallGetHashRaw(SloppyTNode<HeapObject> const key);
- TNode<Smi> CallGetOrCreateHashRaw(SloppyTNode<HeapObject> const key);
+ TNode<HeapObject> AllocateJSCollectionIterator(
+ const TNode<Context> context, int map_index,
+ const TNode<HeapObject> collection);
+ TNode<HeapObject> AllocateTable(Variant variant,
+ TNode<IntPtrT> at_least_space_for) override;
+ TNode<IntPtrT> GetHash(const TNode<HeapObject> key);
+ TNode<IntPtrT> CallGetHashRaw(const TNode<HeapObject> key);
+ TNode<Smi> CallGetOrCreateHashRaw(const TNode<HeapObject> key);
// Transitions the iterator to the non obsolete backing store.
// This is a NOP if the [table] is not obsolete.
- using UpdateInTransition =
- std::function<void(Node* const table, Node* const index)>;
+ template <typename TableType>
+ using UpdateInTransition = std::function<void(const TNode<TableType> table,
+ const TNode<IntPtrT> index)>;
template <typename TableType>
std::pair<TNode<TableType>, TNode<IntPtrT>> Transition(
TNode<TableType> const table, TNode<IntPtrT> const index,
- UpdateInTransition const& update_in_transition);
+ UpdateInTransition<TableType> const& update_in_transition);
template <typename IteratorType, typename TableType>
std::pair<TNode<TableType>, TNode<IntPtrT>> TransitionAndUpdate(
TNode<IteratorType> const iterator);
@@ -635,35 +636,33 @@ class CollectionsBuiltinsAssembler : public BaseCollectionsAssembler {
// The {result} variable will contain the entry index if the key was found,
// or the hash code otherwise.
template <typename CollectionType>
- void FindOrderedHashTableEntryForSmiKey(Node* table,
- SloppyTNode<Smi> key_tagged,
- Variable* result, Label* entry_found,
- Label* not_found);
- void SameValueZeroSmi(SloppyTNode<Smi> key_smi,
- SloppyTNode<Object> candidate_key, Label* if_same,
- Label* if_not_same);
+ void FindOrderedHashTableEntryForSmiKey(TNode<CollectionType> table,
+ TNode<Smi> key_tagged,
+ TVariable<IntPtrT>* result,
+ Label* entry_found, Label* not_found);
+ void SameValueZeroSmi(TNode<Smi> key_smi, TNode<Object> candidate_key,
+ Label* if_same, Label* if_not_same);
// Specialization for heap numbers.
// The {result} variable will contain the entry index if the key was found,
// or the hash code otherwise.
- void SameValueZeroHeapNumber(SloppyTNode<Float64T> key_float,
- SloppyTNode<Object> candidate_key,
- Label* if_same, Label* if_not_same);
+ void SameValueZeroHeapNumber(TNode<Float64T> key_float,
+ TNode<Object> candidate_key, Label* if_same,
+ Label* if_not_same);
template <typename CollectionType>
void FindOrderedHashTableEntryForHeapNumberKey(
- SloppyTNode<Context> context, Node* table,
- SloppyTNode<HeapNumber> key_heap_number, Variable* result,
- Label* entry_found, Label* not_found);
+ TNode<CollectionType> table, TNode<HeapNumber> key_heap_number,
+ TVariable<IntPtrT>* result, Label* entry_found, Label* not_found);
// Specialization for bigints.
// The {result} variable will contain the entry index if the key was found,
// or the hash code otherwise.
- void SameValueZeroBigInt(Node* key, Node* candidate_key, Label* if_same,
- Label* if_not_same);
+ void SameValueZeroBigInt(TNode<BigInt> key, TNode<Object> candidate_key,
+ Label* if_same, Label* if_not_same);
template <typename CollectionType>
- void FindOrderedHashTableEntryForBigIntKey(SloppyTNode<Context> context,
- Node* table, Node* key,
- Variable* result,
+ void FindOrderedHashTableEntryForBigIntKey(TNode<CollectionType> table,
+ TNode<BigInt> key_big_int,
+ TVariable<IntPtrT>* result,
Label* entry_found,
Label* not_found);
@@ -671,14 +670,14 @@ class CollectionsBuiltinsAssembler : public BaseCollectionsAssembler {
// The {result} variable will contain the entry index if the key was found,
// or the hash code otherwise.
template <typename CollectionType>
- void FindOrderedHashTableEntryForStringKey(
- SloppyTNode<Context> context, Node* table, SloppyTNode<String> key_tagged,
- Variable* result, Label* entry_found, Label* not_found);
- TNode<IntPtrT> ComputeStringHash(TNode<Context> context,
- TNode<String> string_key);
- void SameValueZeroString(SloppyTNode<Context> context,
- SloppyTNode<String> key_string,
- SloppyTNode<Object> candidate_key, Label* if_same,
+ void FindOrderedHashTableEntryForStringKey(TNode<CollectionType> table,
+ TNode<String> key_tagged,
+ TVariable<IntPtrT>* result,
+ Label* entry_found,
+ Label* not_found);
+ TNode<IntPtrT> ComputeStringHash(TNode<String> string_key);
+ void SameValueZeroString(TNode<String> key_string,
+ TNode<Object> candidate_key, Label* if_same,
Label* if_not_same);
// Specialization for non-strings, non-numbers. For those we only need
@@ -687,26 +686,32 @@ class CollectionsBuiltinsAssembler : public BaseCollectionsAssembler {
// or the hash code otherwise. If the hash-code has not been computed, it
// should be Smi -1.
template <typename CollectionType>
- void FindOrderedHashTableEntryForOtherKey(
- SloppyTNode<Context> context, Node* table, SloppyTNode<HeapObject> key,
- Variable* result, Label* entry_found, Label* not_found);
+ void FindOrderedHashTableEntryForOtherKey(TNode<CollectionType> table,
+ TNode<HeapObject> key_heap_object,
+ TVariable<IntPtrT>* result,
+ Label* entry_found,
+ Label* not_found);
template <typename CollectionType>
- void TryLookupOrderedHashTableIndex(Node* const table, Node* const key,
- Node* const context, Variable* result,
+ void TryLookupOrderedHashTableIndex(const TNode<CollectionType> table,
+ const TNode<Object> key,
+ TVariable<IntPtrT>* result,
Label* if_entry_found,
Label* if_not_found);
- Node* NormalizeNumberKey(Node* key);
+ const TNode<Object> NormalizeNumberKey(const TNode<Object> key);
void StoreOrderedHashMapNewEntry(TNode<OrderedHashMap> const table,
- Node* const key, Node* const value,
- Node* const hash,
- Node* const number_of_buckets,
- Node* const occupancy);
+ const TNode<Object> key,
+ const TNode<Object> value,
+ const TNode<IntPtrT> hash,
+ const TNode<IntPtrT> number_of_buckets,
+ const TNode<IntPtrT> occupancy);
+
void StoreOrderedHashSetNewEntry(TNode<OrderedHashSet> const table,
- Node* const key, Node* const hash,
- Node* const number_of_buckets,
- Node* const occupancy);
+ const TNode<Object> key,
+ const TNode<IntPtrT> hash,
+ const TNode<IntPtrT> number_of_buckets,
+ const TNode<IntPtrT> occupancy);
// Create a JSArray with PACKED_ELEMENTS kind from a Map.prototype.keys() or
// Map.prototype.values() iterator. The iterator is assumed to satisfy
@@ -727,11 +732,97 @@ class CollectionsBuiltinsAssembler : public BaseCollectionsAssembler {
void BranchIfMapIteratorProtectorValid(Label* if_true, Label* if_false);
void BranchIfSetIteratorProtectorValid(Label* if_true, Label* if_false);
+
+ // Builds code that finds OrderedHashTable entry for a key with hash code
+ // {hash} with using the comparison code generated by {key_compare}. The code
+ // jumps to {entry_found} if the key is found, or to {not_found} if the key
+ // was not found. In the {entry_found} branch, the variable
+ // entry_start_position will be bound to the index of the entry (relative to
+ // OrderedHashTable::kHashTableStartIndex).
+ //
+ // The {CollectionType} template parameter stands for the particular instance
+ // of OrderedHashTable, it should be OrderedHashMap or OrderedHashSet.
+ template <typename CollectionType>
+ void FindOrderedHashTableEntry(
+ Node* table, Node* hash,
+ const std::function<void(TNode<Object>, Label*, Label*)>& key_compare,
+ Variable* entry_start_position, Label* entry_found, Label* not_found);
};
+template <typename CollectionType>
+void CollectionsBuiltinsAssembler::FindOrderedHashTableEntry(
+ Node* table, Node* hash,
+ const std::function<void(TNode<Object>, Label*, Label*)>& key_compare,
+ Variable* entry_start_position, Label* entry_found, Label* not_found) {
+ // Get the index of the bucket.
+ TNode<IntPtrT> const number_of_buckets =
+ SmiUntag(CAST(UnsafeLoadFixedArrayElement(
+ CAST(table), CollectionType::NumberOfBucketsIndex())));
+ TNode<WordT> const bucket =
+ WordAnd(hash, IntPtrSub(number_of_buckets, IntPtrConstant(1)));
+ TNode<IntPtrT> const first_entry = SmiUntag(CAST(UnsafeLoadFixedArrayElement(
+ CAST(table), bucket,
+ CollectionType::HashTableStartIndex() * kTaggedSize)));
+
+ // Walk the bucket chain.
+ TNode<IntPtrT> entry_start;
+ Label if_key_found(this);
+ {
+ TVARIABLE(IntPtrT, var_entry, first_entry);
+ Label loop(this, {&var_entry, entry_start_position}),
+ continue_next_entry(this);
+ Goto(&loop);
+ BIND(&loop);
+
+ // If the entry index is the not-found sentinel, we are done.
+ GotoIf(IntPtrEqual(var_entry.value(),
+ IntPtrConstant(CollectionType::kNotFound)),
+ not_found);
+
+ // Make sure the entry index is within range.
+ CSA_ASSERT(
+ this,
+ UintPtrLessThan(
+ var_entry.value(),
+ SmiUntag(SmiAdd(
+ CAST(UnsafeLoadFixedArrayElement(
+ CAST(table), CollectionType::NumberOfElementsIndex())),
+ CAST(UnsafeLoadFixedArrayElement(
+ CAST(table),
+ CollectionType::NumberOfDeletedElementsIndex()))))));
+
+ // Compute the index of the entry relative to kHashTableStartIndex.
+ entry_start =
+ IntPtrAdd(IntPtrMul(var_entry.value(),
+ IntPtrConstant(CollectionType::kEntrySize)),
+ number_of_buckets);
+
+ // Load the key from the entry.
+ TNode<Object> const candidate_key = UnsafeLoadFixedArrayElement(
+ CAST(table), entry_start,
+ CollectionType::HashTableStartIndex() * kTaggedSize);
+
+ key_compare(candidate_key, &if_key_found, &continue_next_entry);
+
+ BIND(&continue_next_entry);
+ // Load the index of the next entry in the bucket chain.
+ var_entry = SmiUntag(CAST(UnsafeLoadFixedArrayElement(
+ CAST(table), entry_start,
+ (CollectionType::HashTableStartIndex() + CollectionType::kChainOffset) *
+ kTaggedSize)));
+
+ Goto(&loop);
+ }
+
+ BIND(&if_key_found);
+ entry_start_position->Bind(entry_start);
+ Goto(entry_found);
+}
+
template <typename IteratorType>
-Node* CollectionsBuiltinsAssembler::AllocateJSCollectionIterator(
- SloppyTNode<Context> context, int map_index, Node* collection) {
+TNode<HeapObject> CollectionsBuiltinsAssembler::AllocateJSCollectionIterator(
+ TNode<Context> const context, int map_index,
+ TNode<HeapObject> const collection) {
TNode<Object> const table =
LoadObjectField(collection, JSCollection::kTableOffset);
TNode<NativeContext> const native_context = LoadNativeContext(context);
@@ -749,9 +840,8 @@ Node* CollectionsBuiltinsAssembler::AllocateJSCollectionIterator(
return iterator;
}
-TNode<Object> CollectionsBuiltinsAssembler::AllocateTable(
- Variant variant, TNode<Context> context,
- TNode<IntPtrT> at_least_space_for) {
+TNode<HeapObject> CollectionsBuiltinsAssembler::AllocateTable(
+ Variant variant, TNode<IntPtrT> at_least_space_for) {
return CAST((variant == kMap || variant == kWeakMap)
? AllocateOrderedHashTable<OrderedHashMap>()
: AllocateOrderedHashTable<OrderedHashSet>());
@@ -778,7 +868,7 @@ TF_BUILTIN(SetConstructor, CollectionsBuiltinsAssembler) {
}
TNode<Smi> CollectionsBuiltinsAssembler::CallGetOrCreateHashRaw(
- SloppyTNode<HeapObject> const key) {
+ const TNode<HeapObject> key) {
TNode<ExternalReference> const function_addr =
ExternalConstant(ExternalReference::get_or_create_hash_raw());
TNode<ExternalReference> const isolate_ptr =
@@ -787,15 +877,15 @@ TNode<Smi> CollectionsBuiltinsAssembler::CallGetOrCreateHashRaw(
MachineType type_ptr = MachineType::Pointer();
MachineType type_tagged = MachineType::AnyTagged();
- Node* const result = CallCFunction(function_addr, type_tagged,
- std::make_pair(type_ptr, isolate_ptr),
- std::make_pair(type_tagged, key));
+ TNode<Smi> result = CAST(CallCFunction(function_addr, type_tagged,
+ std::make_pair(type_ptr, isolate_ptr),
+ std::make_pair(type_tagged, key)));
- return CAST(result);
+ return result;
}
TNode<IntPtrT> CollectionsBuiltinsAssembler::CallGetHashRaw(
- SloppyTNode<HeapObject> const key) {
+ const TNode<HeapObject> key) {
TNode<ExternalReference> const function_addr =
ExternalConstant(ExternalReference::orderedhashmap_gethash_raw());
TNode<ExternalReference> const isolate_ptr =
@@ -804,15 +894,15 @@ TNode<IntPtrT> CollectionsBuiltinsAssembler::CallGetHashRaw(
MachineType type_ptr = MachineType::Pointer();
MachineType type_tagged = MachineType::AnyTagged();
- Node* const result = CallCFunction(function_addr, type_tagged,
- std::make_pair(type_ptr, isolate_ptr),
- std::make_pair(type_tagged, key));
+ TNode<Smi> result = CAST(CallCFunction(function_addr, type_tagged,
+ std::make_pair(type_ptr, isolate_ptr),
+ std::make_pair(type_tagged, key)));
return SmiUntag(result);
}
TNode<IntPtrT> CollectionsBuiltinsAssembler::GetHash(
- SloppyTNode<HeapObject> const key) {
+ const TNode<HeapObject> key) {
TVARIABLE(IntPtrT, var_hash);
Label if_receiver(this), if_other(this), done(this);
Branch(IsJSReceiver(key), &if_receiver, &if_other);
@@ -833,9 +923,10 @@ TNode<IntPtrT> CollectionsBuiltinsAssembler::GetHash(
return var_hash.value();
}
-void CollectionsBuiltinsAssembler::SameValueZeroSmi(
- SloppyTNode<Smi> key_smi, SloppyTNode<Object> candidate_key, Label* if_same,
- Label* if_not_same) {
+void CollectionsBuiltinsAssembler::SameValueZeroSmi(TNode<Smi> key_smi,
+ TNode<Object> candidate_key,
+ Label* if_same,
+ Label* if_not_same) {
// If the key is the same, we are done.
GotoIf(TaggedEqual(candidate_key, key_smi), if_same);
@@ -862,7 +953,7 @@ void CollectionsBuiltinsAssembler::BranchIfMapIteratorProtectorValid(
DCHECK(isolate()->heap()->map_iterator_protector().IsPropertyCell());
Branch(
TaggedEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
- SmiConstant(Isolate::kProtectorValid)),
+ SmiConstant(Protectors::kProtectorValid)),
if_true, if_false);
}
@@ -921,7 +1012,7 @@ void CollectionsBuiltinsAssembler::BranchIfSetIteratorProtectorValid(
DCHECK(isolate()->heap()->set_iterator_protector().IsPropertyCell());
Branch(
TaggedEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
- SmiConstant(Isolate::kProtectorValid)),
+ SmiConstant(Protectors::kProtectorValid)),
if_true, if_false);
}
@@ -998,15 +1089,15 @@ TNode<JSArray> CollectionsBuiltinsAssembler::MapIteratorToList(
TNode<Map> array_map =
LoadJSArrayElementsMap(kind, LoadNativeContext(context));
TNode<JSArray> array =
- AllocateJSArray(kind, array_map, size, SmiTag(size), nullptr,
+ AllocateJSArray(kind, array_map, size, SmiTag(size), {},
INTPTR_PARAMETERS, kAllowLargeObjectAllocation);
TNode<FixedArray> elements = CAST(LoadElements(array));
const int first_element_offset = FixedArray::kHeaderSize - kHeapObjectTag;
TNode<IntPtrT> first_to_element_offset =
- ElementOffsetFromIndex(IntPtrConstant(0), kind, INTPTR_PARAMETERS, 0);
- VARIABLE(
- var_offset, MachineType::PointerRepresentation(),
+ ElementOffsetFromIndex(IntPtrConstant(0), kind, 0);
+ TVARIABLE(
+ IntPtrT, var_offset,
IntPtrAdd(first_to_element_offset, IntPtrConstant(first_element_offset)));
TVARIABLE(IntPtrT, var_index, index);
VariableList vars({&var_index, &var_offset}, zone());
@@ -1053,8 +1144,7 @@ TNode<JSArray> CollectionsBuiltinsAssembler::MapIteratorToList(
{
// Increment the array offset and continue the loop to the next entry.
var_index = cur_index;
- var_offset.Bind(
- IntPtrAdd(var_offset.value(), IntPtrConstant(kTaggedSize)));
+ var_offset = IntPtrAdd(var_offset.value(), IntPtrConstant(kTaggedSize));
Goto(&loop);
}
}
@@ -1111,15 +1201,15 @@ TNode<JSArray> CollectionsBuiltinsAssembler::SetOrSetIteratorToList(
TNode<Map> array_map =
LoadJSArrayElementsMap(kind, LoadNativeContext(context));
TNode<JSArray> array =
- AllocateJSArray(kind, array_map, size, SmiTag(size), nullptr,
+ AllocateJSArray(kind, array_map, size, SmiTag(size), {},
INTPTR_PARAMETERS, kAllowLargeObjectAllocation);
TNode<FixedArray> elements = CAST(LoadElements(array));
const int first_element_offset = FixedArray::kHeaderSize - kHeapObjectTag;
TNode<IntPtrT> first_to_element_offset =
- ElementOffsetFromIndex(IntPtrConstant(0), kind, INTPTR_PARAMETERS, 0);
- VARIABLE(
- var_offset, MachineType::PointerRepresentation(),
+ ElementOffsetFromIndex(IntPtrConstant(0), kind, 0);
+ TVARIABLE(
+ IntPtrT, var_offset,
IntPtrAdd(first_to_element_offset, IntPtrConstant(first_element_offset)));
TVARIABLE(IntPtrT, var_index, IntPtrConstant(0));
Label done(this), finalize(this, {&var_index}),
@@ -1139,7 +1229,7 @@ TNode<JSArray> CollectionsBuiltinsAssembler::SetOrSetIteratorToList(
Store(elements, var_offset.value(), entry_key);
var_index = cur_index;
- var_offset.Bind(IntPtrAdd(var_offset.value(), IntPtrConstant(kTaggedSize)));
+ var_offset = IntPtrAdd(var_offset.value(), IntPtrConstant(kTaggedSize));
Goto(&loop);
}
@@ -1164,13 +1254,13 @@ TF_BUILTIN(SetOrSetIteratorToList, CollectionsBuiltinsAssembler) {
template <typename CollectionType>
void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForSmiKey(
- Node* table, SloppyTNode<Smi> smi_key, Variable* result, Label* entry_found,
- Label* not_found) {
+ TNode<CollectionType> table, TNode<Smi> smi_key, TVariable<IntPtrT>* result,
+ Label* entry_found, Label* not_found) {
TNode<IntPtrT> const key_untagged = SmiUntag(smi_key);
TNode<IntPtrT> const hash =
ChangeInt32ToIntPtr(ComputeUnseededHash(key_untagged));
CSA_ASSERT(this, IntPtrGreaterThanOrEqual(hash, IntPtrConstant(0)));
- result->Bind(hash);
+ *result = hash;
FindOrderedHashTableEntry<CollectionType>(
table, hash,
[&](TNode<Object> other_key, Label* if_same, Label* if_not_same) {
@@ -1181,28 +1271,26 @@ void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForSmiKey(
template <typename CollectionType>
void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForStringKey(
- SloppyTNode<Context> context, Node* table, SloppyTNode<String> key_tagged,
- Variable* result, Label* entry_found, Label* not_found) {
- TNode<IntPtrT> const hash = ComputeStringHash(context, key_tagged);
+ TNode<CollectionType> table, TNode<String> key_tagged,
+ TVariable<IntPtrT>* result, Label* entry_found, Label* not_found) {
+ TNode<IntPtrT> const hash = ComputeStringHash(key_tagged);
CSA_ASSERT(this, IntPtrGreaterThanOrEqual(hash, IntPtrConstant(0)));
- result->Bind(hash);
+ *result = hash;
FindOrderedHashTableEntry<CollectionType>(
table, hash,
[&](TNode<Object> other_key, Label* if_same, Label* if_not_same) {
- SameValueZeroString(context, key_tagged, other_key, if_same,
- if_not_same);
+ SameValueZeroString(key_tagged, other_key, if_same, if_not_same);
},
result, entry_found, not_found);
}
template <typename CollectionType>
void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForHeapNumberKey(
- SloppyTNode<Context> context, Node* table,
- SloppyTNode<HeapNumber> key_heap_number, Variable* result,
- Label* entry_found, Label* not_found) {
+ TNode<CollectionType> table, TNode<HeapNumber> key_heap_number,
+ TVariable<IntPtrT>* result, Label* entry_found, Label* not_found) {
TNode<IntPtrT> const hash = CallGetHashRaw(key_heap_number);
CSA_ASSERT(this, IntPtrGreaterThanOrEqual(hash, IntPtrConstant(0)));
- result->Bind(hash);
+ *result = hash;
TNode<Float64T> const key_float = LoadHeapNumberValue(key_heap_number);
FindOrderedHashTableEntry<CollectionType>(
table, hash,
@@ -1214,36 +1302,36 @@ void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForHeapNumberKey(
template <typename CollectionType>
void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForBigIntKey(
- SloppyTNode<Context> context, Node* table, Node* key, Variable* result,
- Label* entry_found, Label* not_found) {
- TNode<IntPtrT> const hash = CallGetHashRaw(key);
+ TNode<CollectionType> table, TNode<BigInt> key_big_int,
+ TVariable<IntPtrT>* result, Label* entry_found, Label* not_found) {
+ TNode<IntPtrT> const hash = CallGetHashRaw(key_big_int);
CSA_ASSERT(this, IntPtrGreaterThanOrEqual(hash, IntPtrConstant(0)));
- result->Bind(hash);
+ *result = hash;
FindOrderedHashTableEntry<CollectionType>(
table, hash,
[&](TNode<Object> other_key, Label* if_same, Label* if_not_same) {
- SameValueZeroBigInt(key, other_key, if_same, if_not_same);
+ SameValueZeroBigInt(key_big_int, other_key, if_same, if_not_same);
},
result, entry_found, not_found);
}
template <typename CollectionType>
void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForOtherKey(
- SloppyTNode<Context> context, Node* table, SloppyTNode<HeapObject> key,
- Variable* result, Label* entry_found, Label* not_found) {
- TNode<IntPtrT> const hash = GetHash(key);
+ TNode<CollectionType> table, TNode<HeapObject> key_heap_object,
+ TVariable<IntPtrT>* result, Label* entry_found, Label* not_found) {
+ TNode<IntPtrT> const hash = GetHash(key_heap_object);
CSA_ASSERT(this, IntPtrGreaterThanOrEqual(hash, IntPtrConstant(0)));
- result->Bind(hash);
+ *result = hash;
FindOrderedHashTableEntry<CollectionType>(
table, hash,
[&](TNode<Object> other_key, Label* if_same, Label* if_not_same) {
- Branch(TaggedEqual(key, other_key), if_same, if_not_same);
+ Branch(TaggedEqual(key_heap_object, other_key), if_same, if_not_same);
},
result, entry_found, not_found);
}
TNode<IntPtrT> CollectionsBuiltinsAssembler::ComputeStringHash(
- TNode<Context> context, TNode<String> string_key) {
+ TNode<String> string_key) {
TVARIABLE(IntPtrT, var_result);
Label hash_not_computed(this), done(this, &var_result);
@@ -1261,25 +1349,23 @@ TNode<IntPtrT> CollectionsBuiltinsAssembler::ComputeStringHash(
}
void CollectionsBuiltinsAssembler::SameValueZeroString(
- SloppyTNode<Context> context, SloppyTNode<String> key_string,
- SloppyTNode<Object> candidate_key, Label* if_same, Label* if_not_same) {
+ TNode<String> key_string, TNode<Object> candidate_key, Label* if_same,
+ Label* if_not_same) {
// If the candidate is not a string, the keys are not equal.
GotoIf(TaggedIsSmi(candidate_key), if_not_same);
GotoIfNot(IsString(CAST(candidate_key)), if_not_same);
- Branch(TaggedEqual(CallBuiltin(Builtins::kStringEqual, context, key_string,
- candidate_key),
+ Branch(TaggedEqual(CallBuiltin(Builtins::kStringEqual, NoContextConstant(),
+ key_string, candidate_key),
TrueConstant()),
if_same, if_not_same);
}
-void CollectionsBuiltinsAssembler::SameValueZeroBigInt(Node* key,
- Node* candidate_key,
- Label* if_same,
- Label* if_not_same) {
- CSA_ASSERT(this, IsBigInt(key));
+void CollectionsBuiltinsAssembler::SameValueZeroBigInt(
+ TNode<BigInt> key, TNode<Object> candidate_key, Label* if_same,
+ Label* if_not_same) {
GotoIf(TaggedIsSmi(candidate_key), if_not_same);
- GotoIfNot(IsBigInt(candidate_key), if_not_same);
+ GotoIfNot(IsBigInt(CAST(candidate_key)), if_not_same);
Branch(TaggedEqual(CallRuntime(Runtime::kBigIntEqualToBigInt,
NoContextConstant(), key, candidate_key),
@@ -1288,8 +1374,8 @@ void CollectionsBuiltinsAssembler::SameValueZeroBigInt(Node* key,
}
void CollectionsBuiltinsAssembler::SameValueZeroHeapNumber(
- SloppyTNode<Float64T> key_float, SloppyTNode<Object> candidate_key,
- Label* if_same, Label* if_not_same) {
+ TNode<Float64T> key_float, TNode<Object> candidate_key, Label* if_same,
+ Label* if_not_same) {
Label if_smi(this), if_keyisnan(this);
GotoIf(TaggedIsSmi(candidate_key), &if_smi);
@@ -1339,20 +1425,20 @@ TF_BUILTIN(OrderedHashTableHealIndex, CollectionsBuiltinsAssembler) {
IntPtrConstant(OrderedHashMap::kClearedTableSentinel)),
&return_zero);
- VARIABLE(var_i, MachineType::PointerRepresentation(), IntPtrConstant(0));
- VARIABLE(var_index, MachineRepresentation::kTagged, index);
+ TVARIABLE(IntPtrT, var_i, IntPtrConstant(0));
+ TVARIABLE(Smi, var_index, index);
Label loop(this, {&var_i, &var_index});
Goto(&loop);
BIND(&loop);
{
- Node* i = var_i.value();
+ TNode<IntPtrT> i = var_i.value();
GotoIfNot(IntPtrLessThan(i, number_of_deleted_elements), &return_index);
STATIC_ASSERT(OrderedHashMap::RemovedHolesIndex() ==
OrderedHashSet::RemovedHolesIndex());
TNode<Smi> removed_index = CAST(LoadFixedArrayElement(
CAST(table), i, OrderedHashMap::RemovedHolesIndex() * kTaggedSize));
GotoIf(SmiGreaterThanOrEqual(removed_index, index), &return_index);
- Decrement(&var_index, 1, SMI_PARAMETERS);
+ Decrement(&var_index);
Increment(&var_i);
Goto(&loop);
}
@@ -1368,7 +1454,7 @@ template <typename TableType>
std::pair<TNode<TableType>, TNode<IntPtrT>>
CollectionsBuiltinsAssembler::Transition(
TNode<TableType> const table, TNode<IntPtrT> const index,
- UpdateInTransition const& update_in_transition) {
+ UpdateInTransition<TableType> const& update_in_transition) {
TVARIABLE(IntPtrT, var_index, index);
TVARIABLE(TableType, var_table, table);
Label if_done(this), if_transition(this, Label::kDeferred);
@@ -1413,7 +1499,8 @@ CollectionsBuiltinsAssembler::TransitionAndUpdate(
return Transition<TableType>(
CAST(LoadObjectField(iterator, IteratorType::kTableOffset)),
LoadAndUntagObjectField(iterator, IteratorType::kIndexOffset),
- [this, iterator](Node* const table, Node* const index) {
+ [this, iterator](const TNode<TableType> table,
+ const TNode<IntPtrT> index) {
// Update the {iterator} with the new state.
StoreObjectField(iterator, IteratorType::kTableOffset, table);
StoreObjectFieldNoWriteBarrier(iterator, IteratorType::kIndexOffset,
@@ -1460,13 +1547,14 @@ CollectionsBuiltinsAssembler::NextSkipHoles(TNode<TableType> table,
}
TF_BUILTIN(MapPrototypeGet, CollectionsBuiltinsAssembler) {
- Node* const receiver = Parameter(Descriptor::kReceiver);
- Node* const key = Parameter(Descriptor::kKey);
- Node* const context = Parameter(Descriptor::kContext);
+ const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ const TNode<Object> key = CAST(Parameter(Descriptor::kKey));
+ const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, "Map.prototype.get");
- TNode<Object> const table = LoadObjectField(receiver, JSMap::kTableOffset);
+ TNode<Object> const table =
+ LoadObjectField<Object>(CAST(receiver), JSMap::kTableOffset);
TNode<Smi> index = CAST(
CallBuiltin(Builtins::kFindOrderedHashMapEntry, context, table, key));
@@ -1485,13 +1573,14 @@ TF_BUILTIN(MapPrototypeGet, CollectionsBuiltinsAssembler) {
}
TF_BUILTIN(MapPrototypeHas, CollectionsBuiltinsAssembler) {
- Node* const receiver = Parameter(Descriptor::kReceiver);
- Node* const key = Parameter(Descriptor::kKey);
- Node* const context = Parameter(Descriptor::kContext);
+ const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ const TNode<Object> key = CAST(Parameter(Descriptor::kKey));
+ const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, "Map.prototype.has");
- TNode<Object> const table = LoadObjectField(receiver, JSMap::kTableOffset);
+ TNode<Object> const table =
+ LoadObjectField(CAST(receiver), JSMap::kTableOffset);
TNode<Smi> index = CAST(
CallBuiltin(Builtins::kFindOrderedHashMapEntry, context, table, key));
@@ -1506,17 +1595,18 @@ TF_BUILTIN(MapPrototypeHas, CollectionsBuiltinsAssembler) {
Return(FalseConstant());
}
-Node* CollectionsBuiltinsAssembler::NormalizeNumberKey(Node* const key) {
- VARIABLE(result, MachineRepresentation::kTagged, key);
+const TNode<Object> CollectionsBuiltinsAssembler::NormalizeNumberKey(
+ const TNode<Object> key) {
+ TVARIABLE(Object, result, key);
Label done(this);
GotoIf(TaggedIsSmi(key), &done);
- GotoIfNot(IsHeapNumber(key), &done);
- TNode<Float64T> const number = LoadHeapNumberValue(key);
+ GotoIfNot(IsHeapNumber(CAST(key)), &done);
+ TNode<Float64T> const number = LoadHeapNumberValue(CAST(key));
GotoIfNot(Float64Equal(number, Float64Constant(0.0)), &done);
// We know the value is zero, so we take the key to be Smi 0.
// Another option would be to normalize to Smi here.
- result.Bind(SmiConstant(0));
+ result = SmiConstant(0);
Goto(&done);
BIND(&done);
@@ -1524,25 +1614,23 @@ Node* CollectionsBuiltinsAssembler::NormalizeNumberKey(Node* const key) {
}
TF_BUILTIN(MapPrototypeSet, CollectionsBuiltinsAssembler) {
- Node* const receiver = Parameter(Descriptor::kReceiver);
- Node* key = Parameter(Descriptor::kKey);
- Node* const value = Parameter(Descriptor::kValue);
- Node* const context = Parameter(Descriptor::kContext);
+ const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ TNode<Object> key = CAST(Parameter(Descriptor::kKey));
+ const TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+ const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, "Map.prototype.set");
key = NormalizeNumberKey(key);
TNode<OrderedHashMap> const table =
- CAST(LoadObjectField(receiver, JSMap::kTableOffset));
+ LoadObjectField<OrderedHashMap>(CAST(receiver), JSMap::kTableOffset);
- VARIABLE(entry_start_position_or_hash, MachineType::PointerRepresentation(),
- IntPtrConstant(0));
+ TVARIABLE(IntPtrT, entry_start_position_or_hash, IntPtrConstant(0));
Label entry_found(this), not_found(this);
- TryLookupOrderedHashTableIndex<OrderedHashMap>(table, key, context,
- &entry_start_position_or_hash,
- &entry_found, &not_found);
+ TryLookupOrderedHashTableIndex<OrderedHashMap>(
+ table, key, &entry_start_position_or_hash, &entry_found, &not_found);
BIND(&entry_found);
// If we found the entry, we just store the value there.
@@ -1561,18 +1649,18 @@ TF_BUILTIN(MapPrototypeSet, CollectionsBuiltinsAssembler) {
&add_entry);
// Otherwise, go to runtime to compute the hash code.
- entry_start_position_or_hash.Bind(SmiUntag(CallGetOrCreateHashRaw(key)));
+ entry_start_position_or_hash = SmiUntag(CallGetOrCreateHashRaw(CAST(key)));
Goto(&add_entry);
}
BIND(&add_entry);
- VARIABLE(number_of_buckets, MachineType::PointerRepresentation());
- VARIABLE(occupancy, MachineType::PointerRepresentation());
+ TVARIABLE(IntPtrT, number_of_buckets);
+ TVARIABLE(IntPtrT, occupancy);
TVARIABLE(OrderedHashMap, table_var, table);
{
// Check we have enough space for the entry.
- number_of_buckets.Bind(SmiUntag(CAST(UnsafeLoadFixedArrayElement(
- table, OrderedHashMap::NumberOfBucketsIndex()))));
+ number_of_buckets = SmiUntag(CAST(UnsafeLoadFixedArrayElement(
+ table, OrderedHashMap::NumberOfBucketsIndex())));
STATIC_ASSERT(OrderedHashMap::kLoadFactor == 2);
TNode<WordT> const capacity = WordShl(number_of_buckets.value(), 1);
@@ -1580,20 +1668,21 @@ TF_BUILTIN(MapPrototypeSet, CollectionsBuiltinsAssembler) {
CAST(LoadObjectField(table, OrderedHashMap::NumberOfElementsOffset())));
TNode<IntPtrT> const number_of_deleted = SmiUntag(CAST(LoadObjectField(
table, OrderedHashMap::NumberOfDeletedElementsOffset())));
- occupancy.Bind(IntPtrAdd(number_of_elements, number_of_deleted));
+ occupancy = IntPtrAdd(number_of_elements, number_of_deleted);
GotoIf(IntPtrLessThan(occupancy.value(), capacity), &store_new_entry);
// We do not have enough space, grow the table and reload the relevant
// fields.
CallRuntime(Runtime::kMapGrow, context, receiver);
- table_var = CAST(LoadObjectField(receiver, JSMap::kTableOffset));
- number_of_buckets.Bind(SmiUntag(CAST(UnsafeLoadFixedArrayElement(
- table_var.value(), OrderedHashMap::NumberOfBucketsIndex()))));
+ table_var =
+ LoadObjectField<OrderedHashMap>(CAST(receiver), JSMap::kTableOffset);
+ number_of_buckets = SmiUntag(CAST(UnsafeLoadFixedArrayElement(
+ table_var.value(), OrderedHashMap::NumberOfBucketsIndex())));
TNode<IntPtrT> const new_number_of_elements = SmiUntag(CAST(LoadObjectField(
table_var.value(), OrderedHashMap::NumberOfElementsOffset())));
TNode<IntPtrT> const new_number_of_deleted = SmiUntag(CAST(LoadObjectField(
table_var.value(), OrderedHashMap::NumberOfDeletedElementsOffset())));
- occupancy.Bind(IntPtrAdd(new_number_of_elements, new_number_of_deleted));
+ occupancy = IntPtrAdd(new_number_of_elements, new_number_of_deleted);
Goto(&store_new_entry);
}
BIND(&store_new_entry);
@@ -1605,15 +1694,16 @@ TF_BUILTIN(MapPrototypeSet, CollectionsBuiltinsAssembler) {
}
void CollectionsBuiltinsAssembler::StoreOrderedHashMapNewEntry(
- TNode<OrderedHashMap> const table, Node* const key, Node* const value,
- Node* const hash, Node* const number_of_buckets, Node* const occupancy) {
- TNode<WordT> const bucket =
+ TNode<OrderedHashMap> const table, const TNode<Object> key,
+ const TNode<Object> value, const TNode<IntPtrT> hash,
+ const TNode<IntPtrT> number_of_buckets, const TNode<IntPtrT> occupancy) {
+ TNode<IntPtrT> const bucket =
WordAnd(hash, IntPtrSub(number_of_buckets, IntPtrConstant(1)));
TNode<Smi> bucket_entry = CAST(UnsafeLoadFixedArrayElement(
table, bucket, OrderedHashMap::HashTableStartIndex() * kTaggedSize));
// Store the entry elements.
- TNode<WordT> const entry_start = IntPtrAdd(
+ TNode<IntPtrT> const entry_start = IntPtrAdd(
IntPtrMul(occupancy, IntPtrConstant(OrderedHashMap::kEntrySize)),
number_of_buckets);
UnsafeStoreFixedArrayElement(
@@ -1642,23 +1732,21 @@ void CollectionsBuiltinsAssembler::StoreOrderedHashMapNewEntry(
}
TF_BUILTIN(MapPrototypeDelete, CollectionsBuiltinsAssembler) {
- Node* const receiver = Parameter(Descriptor::kReceiver);
- Node* key = Parameter(Descriptor::kKey);
- Node* const context = Parameter(Descriptor::kContext);
+ const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ const TNode<Object> key = CAST(Parameter(Descriptor::kKey));
+ const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE,
"Map.prototype.delete");
TNode<OrderedHashMap> const table =
- CAST(LoadObjectField(receiver, JSMap::kTableOffset));
+ LoadObjectField<OrderedHashMap>(CAST(receiver), JSMap::kTableOffset);
- VARIABLE(entry_start_position_or_hash, MachineType::PointerRepresentation(),
- IntPtrConstant(0));
+ TVARIABLE(IntPtrT, entry_start_position_or_hash, IntPtrConstant(0));
Label entry_found(this), not_found(this);
- TryLookupOrderedHashTableIndex<OrderedHashMap>(table, key, context,
- &entry_start_position_or_hash,
- &entry_found, &not_found);
+ TryLookupOrderedHashTableIndex<OrderedHashMap>(
+ table, key, &entry_start_position_or_hash, &entry_found, &not_found);
BIND(&not_found);
Return(FalseConstant());
@@ -1703,24 +1791,22 @@ TF_BUILTIN(MapPrototypeDelete, CollectionsBuiltinsAssembler) {
}
TF_BUILTIN(SetPrototypeAdd, CollectionsBuiltinsAssembler) {
- Node* const receiver = Parameter(Descriptor::kReceiver);
- Node* key = Parameter(Descriptor::kKey);
- Node* const context = Parameter(Descriptor::kContext);
+ const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ TNode<Object> key = CAST(Parameter(Descriptor::kKey));
+ const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE, "Set.prototype.add");
key = NormalizeNumberKey(key);
TNode<OrderedHashSet> const table =
- CAST(LoadObjectField(receiver, JSMap::kTableOffset));
+ LoadObjectField<OrderedHashSet>(CAST(receiver), JSMap::kTableOffset);
- VARIABLE(entry_start_position_or_hash, MachineType::PointerRepresentation(),
- IntPtrConstant(0));
+ TVARIABLE(IntPtrT, entry_start_position_or_hash, IntPtrConstant(0));
Label entry_found(this), not_found(this);
- TryLookupOrderedHashTableIndex<OrderedHashSet>(table, key, context,
- &entry_start_position_or_hash,
- &entry_found, &not_found);
+ TryLookupOrderedHashTableIndex<OrderedHashSet>(
+ table, key, &entry_start_position_or_hash, &entry_found, &not_found);
BIND(&entry_found);
// The entry was found, there is nothing to do.
@@ -1735,18 +1821,18 @@ TF_BUILTIN(SetPrototypeAdd, CollectionsBuiltinsAssembler) {
&add_entry);
// Otherwise, go to runtime to compute the hash code.
- entry_start_position_or_hash.Bind(SmiUntag(CallGetOrCreateHashRaw(key)));
+ entry_start_position_or_hash = SmiUntag(CallGetOrCreateHashRaw(CAST(key)));
Goto(&add_entry);
}
BIND(&add_entry);
- VARIABLE(number_of_buckets, MachineType::PointerRepresentation());
- VARIABLE(occupancy, MachineType::PointerRepresentation());
+ TVARIABLE(IntPtrT, number_of_buckets);
+ TVARIABLE(IntPtrT, occupancy);
TVARIABLE(OrderedHashSet, table_var, table);
{
// Check we have enough space for the entry.
- number_of_buckets.Bind(SmiUntag(CAST(UnsafeLoadFixedArrayElement(
- table, OrderedHashSet::NumberOfBucketsIndex()))));
+ number_of_buckets = SmiUntag(CAST(UnsafeLoadFixedArrayElement(
+ table, OrderedHashSet::NumberOfBucketsIndex())));
STATIC_ASSERT(OrderedHashSet::kLoadFactor == 2);
TNode<WordT> const capacity = WordShl(number_of_buckets.value(), 1);
@@ -1754,20 +1840,21 @@ TF_BUILTIN(SetPrototypeAdd, CollectionsBuiltinsAssembler) {
CAST(LoadObjectField(table, OrderedHashSet::NumberOfElementsOffset())));
TNode<IntPtrT> const number_of_deleted = SmiUntag(CAST(LoadObjectField(
table, OrderedHashSet::NumberOfDeletedElementsOffset())));
- occupancy.Bind(IntPtrAdd(number_of_elements, number_of_deleted));
+ occupancy = IntPtrAdd(number_of_elements, number_of_deleted);
GotoIf(IntPtrLessThan(occupancy.value(), capacity), &store_new_entry);
// We do not have enough space, grow the table and reload the relevant
// fields.
CallRuntime(Runtime::kSetGrow, context, receiver);
- table_var = CAST(LoadObjectField(receiver, JSMap::kTableOffset));
- number_of_buckets.Bind(SmiUntag(CAST(UnsafeLoadFixedArrayElement(
- table_var.value(), OrderedHashSet::NumberOfBucketsIndex()))));
+ table_var =
+ LoadObjectField<OrderedHashSet>(CAST(receiver), JSMap::kTableOffset);
+ number_of_buckets = SmiUntag(CAST(UnsafeLoadFixedArrayElement(
+ table_var.value(), OrderedHashSet::NumberOfBucketsIndex())));
TNode<IntPtrT> const new_number_of_elements = SmiUntag(CAST(LoadObjectField(
table_var.value(), OrderedHashSet::NumberOfElementsOffset())));
TNode<IntPtrT> const new_number_of_deleted = SmiUntag(CAST(LoadObjectField(
table_var.value(), OrderedHashSet::NumberOfDeletedElementsOffset())));
- occupancy.Bind(IntPtrAdd(new_number_of_elements, new_number_of_deleted));
+ occupancy = IntPtrAdd(new_number_of_elements, new_number_of_deleted);
Goto(&store_new_entry);
}
BIND(&store_new_entry);
@@ -1779,15 +1866,16 @@ TF_BUILTIN(SetPrototypeAdd, CollectionsBuiltinsAssembler) {
}
void CollectionsBuiltinsAssembler::StoreOrderedHashSetNewEntry(
- TNode<OrderedHashSet> const table, Node* const key, Node* const hash,
- Node* const number_of_buckets, Node* const occupancy) {
- TNode<WordT> const bucket =
+ TNode<OrderedHashSet> const table, const TNode<Object> key,
+ const TNode<IntPtrT> hash, const TNode<IntPtrT> number_of_buckets,
+ const TNode<IntPtrT> occupancy) {
+ TNode<IntPtrT> const bucket =
WordAnd(hash, IntPtrSub(number_of_buckets, IntPtrConstant(1)));
TNode<Smi> bucket_entry = CAST(UnsafeLoadFixedArrayElement(
table, bucket, OrderedHashSet::HashTableStartIndex() * kTaggedSize));
// Store the entry elements.
- TNode<WordT> const entry_start = IntPtrAdd(
+ TNode<IntPtrT> const entry_start = IntPtrAdd(
IntPtrMul(occupancy, IntPtrConstant(OrderedHashSet::kEntrySize)),
number_of_buckets);
UnsafeStoreFixedArrayElement(
@@ -1812,23 +1900,21 @@ void CollectionsBuiltinsAssembler::StoreOrderedHashSetNewEntry(
}
TF_BUILTIN(SetPrototypeDelete, CollectionsBuiltinsAssembler) {
- Node* const receiver = Parameter(Descriptor::kReceiver);
- Node* key = Parameter(Descriptor::kKey);
- Node* const context = Parameter(Descriptor::kContext);
+ const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ const TNode<Object> key = CAST(Parameter(Descriptor::kKey));
+ const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE,
"Set.prototype.delete");
TNode<OrderedHashSet> const table =
- CAST(LoadObjectField(receiver, JSMap::kTableOffset));
+ LoadObjectField<OrderedHashSet>(CAST(receiver), JSMap::kTableOffset);
- VARIABLE(entry_start_position_or_hash, MachineType::PointerRepresentation(),
- IntPtrConstant(0));
+ TVARIABLE(IntPtrT, entry_start_position_or_hash, IntPtrConstant(0));
Label entry_found(this), not_found(this);
- TryLookupOrderedHashTableIndex<OrderedHashSet>(table, key, context,
- &entry_start_position_or_hash,
- &entry_found, &not_found);
+ TryLookupOrderedHashTableIndex<OrderedHashSet>(
+ table, key, &entry_start_position_or_hash, &entry_found, &not_found);
BIND(&not_found);
Return(FalseConstant());
@@ -1869,29 +1955,30 @@ TF_BUILTIN(SetPrototypeDelete, CollectionsBuiltinsAssembler) {
}
TF_BUILTIN(MapPrototypeEntries, CollectionsBuiltinsAssembler) {
- Node* const receiver = Parameter(Descriptor::kReceiver);
- Node* const context = Parameter(Descriptor::kContext);
+ const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE,
"Map.prototype.entries");
Return(AllocateJSCollectionIterator<JSMapIterator>(
- context, Context::MAP_KEY_VALUE_ITERATOR_MAP_INDEX, receiver));
+ context, Context::MAP_KEY_VALUE_ITERATOR_MAP_INDEX, CAST(receiver)));
}
TF_BUILTIN(MapPrototypeGetSize, CollectionsBuiltinsAssembler) {
- Node* const receiver = Parameter(Descriptor::kReceiver);
- Node* const context = Parameter(Descriptor::kContext);
+ const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE,
"get Map.prototype.size");
TNode<OrderedHashMap> const table =
- CAST(LoadObjectField(receiver, JSMap::kTableOffset));
+ LoadObjectField<OrderedHashMap>(CAST(receiver), JSMap::kTableOffset);
Return(LoadObjectField(table, OrderedHashMap::NumberOfElementsOffset()));
}
TF_BUILTIN(MapPrototypeForEach, CollectionsBuiltinsAssembler) {
const char* const kMethodName = "Map.prototype.forEach";
- Node* const argc = Parameter(Descriptor::kJSActualArgumentsCount);
- Node* const context = Parameter(Descriptor::kContext);
- CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
+ TNode<Int32T> argc =
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
+ const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ CodeStubArguments args(this, argc);
TNode<Object> const receiver = args.GetReceiver();
TNode<Object> const callback = args.GetOptionalArgumentValue(0);
TNode<Object> const this_arg = args.GetOptionalArgumentValue(1);
@@ -1914,8 +2001,8 @@ TF_BUILTIN(MapPrototypeForEach, CollectionsBuiltinsAssembler) {
// the {receiver} while we're iterating.
TNode<IntPtrT> index = var_index.value();
TNode<OrderedHashMap> table = var_table.value();
- std::tie(table, index) =
- Transition<OrderedHashMap>(table, index, [](Node*, Node*) {});
+ std::tie(table, index) = Transition<OrderedHashMap>(
+ table, index, [](const TNode<OrderedHashMap>, const TNode<IntPtrT>) {});
// Read the next entry from the {table}, skipping holes.
TNode<Object> entry_key;
@@ -1951,31 +2038,32 @@ TF_BUILTIN(MapPrototypeForEach, CollectionsBuiltinsAssembler) {
}
TF_BUILTIN(MapPrototypeKeys, CollectionsBuiltinsAssembler) {
- Node* const receiver = Parameter(Descriptor::kReceiver);
- Node* const context = Parameter(Descriptor::kContext);
+ const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, "Map.prototype.keys");
Return(AllocateJSCollectionIterator<JSMapIterator>(
- context, Context::MAP_KEY_ITERATOR_MAP_INDEX, receiver));
+ context, Context::MAP_KEY_ITERATOR_MAP_INDEX, CAST(receiver)));
}
TF_BUILTIN(MapPrototypeValues, CollectionsBuiltinsAssembler) {
- Node* const receiver = Parameter(Descriptor::kReceiver);
- Node* const context = Parameter(Descriptor::kContext);
+ const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE,
"Map.prototype.values");
Return(AllocateJSCollectionIterator<JSMapIterator>(
- context, Context::MAP_VALUE_ITERATOR_MAP_INDEX, receiver));
+ context, Context::MAP_VALUE_ITERATOR_MAP_INDEX, CAST(receiver)));
}
TF_BUILTIN(MapIteratorPrototypeNext, CollectionsBuiltinsAssembler) {
const char* const kMethodName = "Map Iterator.prototype.next";
- Node* const receiver = Parameter(Descriptor::kReceiver);
- Node* const context = Parameter(Descriptor::kContext);
+ const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
// Ensure that the {receiver} is actually a JSMapIterator.
Label if_receiver_valid(this), if_receiver_invalid(this, Label::kDeferred);
GotoIf(TaggedIsSmi(receiver), &if_receiver_invalid);
- TNode<Uint16T> const receiver_instance_type = LoadInstanceType(receiver);
+ TNode<Uint16T> const receiver_instance_type =
+ LoadInstanceType(CAST(receiver));
GotoIf(
InstanceTypeEqual(receiver_instance_type, JS_MAP_KEY_VALUE_ITERATOR_TYPE),
&if_receiver_valid);
@@ -1989,8 +2077,8 @@ TF_BUILTIN(MapIteratorPrototypeNext, CollectionsBuiltinsAssembler) {
BIND(&if_receiver_valid);
// Check if the {receiver} is exhausted.
- VARIABLE(var_done, MachineRepresentation::kTagged, TrueConstant());
- VARIABLE(var_value, MachineRepresentation::kTagged, UndefinedConstant());
+ TVARIABLE(Oddball, var_done, TrueConstant());
+ TVARIABLE(Object, var_value, UndefinedConstant());
Label return_value(this, {&var_done, &var_value}), return_entry(this),
return_end(this, Label::kDeferred);
@@ -2007,22 +2095,22 @@ TF_BUILTIN(MapIteratorPrototypeNext, CollectionsBuiltinsAssembler) {
NextSkipHoles<OrderedHashMap>(table, index, &return_end);
StoreObjectFieldNoWriteBarrier(receiver, JSMapIterator::kIndexOffset,
SmiTag(index));
- var_value.Bind(entry_key);
- var_done.Bind(FalseConstant());
+ var_value = entry_key;
+ var_done = FalseConstant();
// Check how to return the {key} (depending on {receiver} type).
GotoIf(InstanceTypeEqual(receiver_instance_type, JS_MAP_KEY_ITERATOR_TYPE),
&return_value);
- var_value.Bind(LoadFixedArrayElement(
+ var_value = LoadFixedArrayElement(
table, entry_start_position,
(OrderedHashMap::HashTableStartIndex() + OrderedHashMap::kValueOffset) *
- kTaggedSize));
+ kTaggedSize);
Branch(InstanceTypeEqual(receiver_instance_type, JS_MAP_VALUE_ITERATOR_TYPE),
&return_value, &return_entry);
BIND(&return_entry);
{
- Node* result =
+ TNode<JSObject> result =
AllocateJSIteratorResultForEntry(context, entry_key, var_value.value());
Return(result);
}
@@ -2043,23 +2131,22 @@ TF_BUILTIN(MapIteratorPrototypeNext, CollectionsBuiltinsAssembler) {
}
TF_BUILTIN(SetPrototypeHas, CollectionsBuiltinsAssembler) {
- Node* const receiver = Parameter(Descriptor::kReceiver);
- Node* const key = Parameter(Descriptor::kKey);
- Node* const context = Parameter(Descriptor::kContext);
+ const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ const TNode<Object> key = CAST(Parameter(Descriptor::kKey));
+ const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE, "Set.prototype.has");
- TNode<Object> const table = LoadObjectField(receiver, JSMap::kTableOffset);
+ TNode<Object> const table =
+ LoadObjectField(CAST(receiver), JSMap::kTableOffset);
- VARIABLE(entry_start_position, MachineType::PointerRepresentation(),
- IntPtrConstant(0));
- VARIABLE(result, MachineRepresentation::kTaggedSigned, IntPtrConstant(0));
+ TVARIABLE(IntPtrT, entry_start_position, IntPtrConstant(0));
Label if_key_smi(this), if_key_string(this), if_key_heap_number(this),
if_key_bigint(this), entry_found(this), not_found(this), done(this);
GotoIf(TaggedIsSmi(key), &if_key_smi);
- TNode<Map> key_map = LoadMap(key);
+ TNode<Map> key_map = LoadMap(CAST(key));
TNode<Uint16T> key_instance_type = LoadMapInstanceType(key_map);
GotoIf(IsStringInstanceType(key_instance_type), &if_key_string);
@@ -2067,30 +2154,34 @@ TF_BUILTIN(SetPrototypeHas, CollectionsBuiltinsAssembler) {
GotoIf(IsBigIntInstanceType(key_instance_type), &if_key_bigint);
FindOrderedHashTableEntryForOtherKey<OrderedHashSet>(
- context, table, key, &entry_start_position, &entry_found, &not_found);
+ CAST(table), CAST(key), &entry_start_position, &entry_found, &not_found);
BIND(&if_key_smi);
{
FindOrderedHashTableEntryForSmiKey<OrderedHashSet>(
- table, key, &entry_start_position, &entry_found, &not_found);
+ CAST(table), CAST(key), &entry_start_position, &entry_found,
+ &not_found);
}
BIND(&if_key_string);
{
FindOrderedHashTableEntryForStringKey<OrderedHashSet>(
- context, table, key, &entry_start_position, &entry_found, &not_found);
+ CAST(table), CAST(key), &entry_start_position, &entry_found,
+ &not_found);
}
BIND(&if_key_heap_number);
{
FindOrderedHashTableEntryForHeapNumberKey<OrderedHashSet>(
- context, table, key, &entry_start_position, &entry_found, &not_found);
+ CAST(table), CAST(key), &entry_start_position, &entry_found,
+ &not_found);
}
BIND(&if_key_bigint);
{
FindOrderedHashTableEntryForBigIntKey<OrderedHashSet>(
- context, table, key, &entry_start_position, &entry_found, &not_found);
+ CAST(table), CAST(key), &entry_start_position, &entry_found,
+ &not_found);
}
BIND(&entry_found);
@@ -2101,29 +2192,30 @@ TF_BUILTIN(SetPrototypeHas, CollectionsBuiltinsAssembler) {
}
TF_BUILTIN(SetPrototypeEntries, CollectionsBuiltinsAssembler) {
- Node* const receiver = Parameter(Descriptor::kReceiver);
- Node* const context = Parameter(Descriptor::kContext);
+ const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE,
"Set.prototype.entries");
Return(AllocateJSCollectionIterator<JSSetIterator>(
- context, Context::SET_KEY_VALUE_ITERATOR_MAP_INDEX, receiver));
+ context, Context::SET_KEY_VALUE_ITERATOR_MAP_INDEX, CAST(receiver)));
}
TF_BUILTIN(SetPrototypeGetSize, CollectionsBuiltinsAssembler) {
- Node* const receiver = Parameter(Descriptor::kReceiver);
- Node* const context = Parameter(Descriptor::kContext);
+ const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE,
"get Set.prototype.size");
TNode<OrderedHashSet> const table =
- CAST(LoadObjectField(receiver, JSSet::kTableOffset));
+ LoadObjectField<OrderedHashSet>(CAST(receiver), JSSet::kTableOffset);
Return(LoadObjectField(table, OrderedHashSet::NumberOfElementsOffset()));
}
TF_BUILTIN(SetPrototypeForEach, CollectionsBuiltinsAssembler) {
const char* const kMethodName = "Set.prototype.forEach";
- Node* const argc = Parameter(Descriptor::kJSActualArgumentsCount);
- Node* const context = Parameter(Descriptor::kContext);
- CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
+ TNode<Int32T> argc =
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
+ const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ CodeStubArguments args(this, argc);
TNode<Object> const receiver = args.GetReceiver();
TNode<Object> const callback = args.GetOptionalArgumentValue(0);
TNode<Object> const this_arg = args.GetOptionalArgumentValue(1);
@@ -2146,12 +2238,12 @@ TF_BUILTIN(SetPrototypeForEach, CollectionsBuiltinsAssembler) {
// the {receiver} while we're iterating.
TNode<IntPtrT> index = var_index.value();
TNode<OrderedHashSet> table = var_table.value();
- std::tie(table, index) =
- Transition<OrderedHashSet>(table, index, [](Node*, Node*) {});
+ std::tie(table, index) = Transition<OrderedHashSet>(
+ table, index, [](const TNode<OrderedHashSet>, const TNode<IntPtrT>) {});
// Read the next entry from the {table}, skipping holes.
- Node* entry_key;
- Node* entry_start_position;
+ TNode<Object> entry_key;
+ TNode<IntPtrT> entry_start_position;
std::tie(entry_key, entry_start_position, index) =
NextSkipHoles<OrderedHashSet>(table, index, &done_loop);
@@ -2176,23 +2268,24 @@ TF_BUILTIN(SetPrototypeForEach, CollectionsBuiltinsAssembler) {
}
TF_BUILTIN(SetPrototypeValues, CollectionsBuiltinsAssembler) {
- Node* const receiver = Parameter(Descriptor::kReceiver);
- Node* const context = Parameter(Descriptor::kContext);
+ const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE,
"Set.prototype.values");
Return(AllocateJSCollectionIterator<JSSetIterator>(
- context, Context::SET_VALUE_ITERATOR_MAP_INDEX, receiver));
+ context, Context::SET_VALUE_ITERATOR_MAP_INDEX, CAST(receiver)));
}
TF_BUILTIN(SetIteratorPrototypeNext, CollectionsBuiltinsAssembler) {
const char* const kMethodName = "Set Iterator.prototype.next";
- Node* const receiver = Parameter(Descriptor::kReceiver);
- Node* const context = Parameter(Descriptor::kContext);
+ const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
// Ensure that the {receiver} is actually a JSSetIterator.
Label if_receiver_valid(this), if_receiver_invalid(this, Label::kDeferred);
GotoIf(TaggedIsSmi(receiver), &if_receiver_invalid);
- TNode<Uint16T> const receiver_instance_type = LoadInstanceType(receiver);
+ TNode<Uint16T> const receiver_instance_type =
+ LoadInstanceType(CAST(receiver));
GotoIf(InstanceTypeEqual(receiver_instance_type, JS_SET_VALUE_ITERATOR_TYPE),
&if_receiver_valid);
Branch(
@@ -2204,8 +2297,8 @@ TF_BUILTIN(SetIteratorPrototypeNext, CollectionsBuiltinsAssembler) {
BIND(&if_receiver_valid);
// Check if the {receiver} is exhausted.
- VARIABLE(var_done, MachineRepresentation::kTagged, TrueConstant());
- VARIABLE(var_value, MachineRepresentation::kTagged, UndefinedConstant());
+ TVARIABLE(Oddball, var_done, TrueConstant());
+ TVARIABLE(Object, var_value, UndefinedConstant());
Label return_value(this, {&var_done, &var_value}), return_entry(this),
return_end(this, Label::kDeferred);
@@ -2216,14 +2309,14 @@ TF_BUILTIN(SetIteratorPrototypeNext, CollectionsBuiltinsAssembler) {
TransitionAndUpdate<JSSetIterator, OrderedHashSet>(CAST(receiver));
// Read the next entry from the {table}, skipping holes.
- Node* entry_key;
- Node* entry_start_position;
+ TNode<Object> entry_key;
+ TNode<IntPtrT> entry_start_position;
std::tie(entry_key, entry_start_position, index) =
NextSkipHoles<OrderedHashSet>(table, index, &return_end);
StoreObjectFieldNoWriteBarrier(receiver, JSSetIterator::kIndexOffset,
SmiTag(index));
- var_value.Bind(entry_key);
- var_done.Bind(FalseConstant());
+ var_value = entry_key;
+ var_done = FalseConstant();
// Check how to return the {key} (depending on {receiver} type).
Branch(InstanceTypeEqual(receiver_instance_type, JS_SET_VALUE_ITERATOR_TYPE),
@@ -2231,8 +2324,8 @@ TF_BUILTIN(SetIteratorPrototypeNext, CollectionsBuiltinsAssembler) {
BIND(&return_entry);
{
- Node* result = AllocateJSIteratorResultForEntry(context, var_value.value(),
- var_value.value());
+ TNode<JSObject> result = AllocateJSIteratorResultForEntry(
+ context, var_value.value(), var_value.value());
Return(result);
}
@@ -2253,14 +2346,14 @@ TF_BUILTIN(SetIteratorPrototypeNext, CollectionsBuiltinsAssembler) {
template <typename CollectionType>
void CollectionsBuiltinsAssembler::TryLookupOrderedHashTableIndex(
- Node* const table, Node* const key, Node* const context, Variable* result,
- Label* if_entry_found, Label* if_not_found) {
+ const TNode<CollectionType> table, const TNode<Object> key,
+ TVariable<IntPtrT>* result, Label* if_entry_found, Label* if_not_found) {
Label if_key_smi(this), if_key_string(this), if_key_heap_number(this),
if_key_bigint(this);
GotoIf(TaggedIsSmi(key), &if_key_smi);
- TNode<Map> key_map = LoadMap(key);
+ TNode<Map> key_map = LoadMap(CAST(key));
TNode<Uint16T> key_instance_type = LoadMapInstanceType(key_map);
GotoIf(IsStringInstanceType(key_instance_type), &if_key_string);
@@ -2268,44 +2361,42 @@ void CollectionsBuiltinsAssembler::TryLookupOrderedHashTableIndex(
GotoIf(IsBigIntInstanceType(key_instance_type), &if_key_bigint);
FindOrderedHashTableEntryForOtherKey<CollectionType>(
- context, table, key, result, if_entry_found, if_not_found);
+ table, CAST(key), result, if_entry_found, if_not_found);
BIND(&if_key_smi);
{
FindOrderedHashTableEntryForSmiKey<CollectionType>(
- table, key, result, if_entry_found, if_not_found);
+ table, CAST(key), result, if_entry_found, if_not_found);
}
BIND(&if_key_string);
{
FindOrderedHashTableEntryForStringKey<CollectionType>(
- context, table, key, result, if_entry_found, if_not_found);
+ table, CAST(key), result, if_entry_found, if_not_found);
}
BIND(&if_key_heap_number);
{
FindOrderedHashTableEntryForHeapNumberKey<CollectionType>(
- context, table, key, result, if_entry_found, if_not_found);
+ table, CAST(key), result, if_entry_found, if_not_found);
}
BIND(&if_key_bigint);
{
FindOrderedHashTableEntryForBigIntKey<CollectionType>(
- context, table, key, result, if_entry_found, if_not_found);
+ table, CAST(key), result, if_entry_found, if_not_found);
}
}
TF_BUILTIN(FindOrderedHashMapEntry, CollectionsBuiltinsAssembler) {
- Node* const table = Parameter(Descriptor::kTable);
- Node* const key = Parameter(Descriptor::kKey);
- Node* const context = Parameter(Descriptor::kContext);
+ const TNode<OrderedHashMap> table = CAST(Parameter(Descriptor::kTable));
+ const TNode<Object> key = CAST(Parameter(Descriptor::kKey));
- VARIABLE(entry_start_position, MachineType::PointerRepresentation(),
- IntPtrConstant(0));
+ TVARIABLE(IntPtrT, entry_start_position, IntPtrConstant(0));
Label entry_found(this), not_found(this);
TryLookupOrderedHashTableIndex<OrderedHashMap>(
- table, key, context, &entry_start_position, &entry_found, &not_found);
+ table, key, &entry_start_position, &entry_found, &not_found);
BIND(&entry_found);
Return(SmiTag(entry_start_position.value()));
@@ -2324,8 +2415,8 @@ class WeakCollectionsBuiltinsAssembler : public BaseCollectionsAssembler {
TNode<Object> key, TNode<Object> value,
TNode<IntPtrT> number_of_elements);
- TNode<Object> AllocateTable(Variant variant, TNode<Context> context,
- TNode<IntPtrT> at_least_space_for) override;
+ TNode<HeapObject> AllocateTable(Variant variant,
+ TNode<IntPtrT> at_least_space_for) override;
// Generates and sets the identity for a JSRececiver.
TNode<Smi> CreateIdentityHash(TNode<Object> receiver);
@@ -2390,9 +2481,8 @@ void WeakCollectionsBuiltinsAssembler::AddEntry(
SmiFromIntPtr(number_of_elements), SKIP_WRITE_BARRIER);
}
-TNode<Object> WeakCollectionsBuiltinsAssembler::AllocateTable(
- Variant variant, TNode<Context> context,
- TNode<IntPtrT> at_least_space_for) {
+TNode<HeapObject> WeakCollectionsBuiltinsAssembler::AllocateTable(
+ Variant variant, TNode<IntPtrT> at_least_space_for) {
// See HashTable::New().
CSA_ASSERT(this,
IntPtrLessThanOrEqual(IntPtrConstant(0), at_least_space_for));
@@ -2446,8 +2536,7 @@ TNode<IntPtrT> WeakCollectionsBuiltinsAssembler::FindKeyIndex(
TVARIABLE(IntPtrT, var_entry, WordAnd(key_hash, entry_mask));
TVARIABLE(IntPtrT, var_count, IntPtrConstant(0));
- Variable* loop_vars[] = {&var_count, &var_entry};
- Label loop(this, arraysize(loop_vars), loop_vars), if_found(this);
+ Label loop(this, {&var_count, &var_entry}), if_found(this);
Goto(&loop);
BIND(&loop);
TNode<IntPtrT> key_index;
@@ -2631,9 +2720,9 @@ TF_BUILTIN(WeakMapLookupHashIndex, WeakCollectionsBuiltinsAssembler) {
}
TF_BUILTIN(WeakMapGet, WeakCollectionsBuiltinsAssembler) {
- Node* const receiver = Parameter(Descriptor::kReceiver);
- Node* const key = Parameter(Descriptor::kKey);
- Node* const context = Parameter(Descriptor::kContext);
+ const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ const TNode<Object> key = CAST(Parameter(Descriptor::kKey));
+ const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Label return_undefined(this);
@@ -2653,9 +2742,9 @@ TF_BUILTIN(WeakMapGet, WeakCollectionsBuiltinsAssembler) {
}
TF_BUILTIN(WeakMapPrototypeHas, WeakCollectionsBuiltinsAssembler) {
- Node* const receiver = Parameter(Descriptor::kReceiver);
- Node* const key = Parameter(Descriptor::kKey);
- Node* const context = Parameter(Descriptor::kContext);
+ const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ const TNode<Object> key = CAST(Parameter(Descriptor::kKey));
+ const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Label return_false(this);
@@ -2817,9 +2906,9 @@ TF_BUILTIN(WeakSetPrototypeDelete, CodeStubAssembler) {
}
TF_BUILTIN(WeakSetPrototypeHas, WeakCollectionsBuiltinsAssembler) {
- Node* const receiver = Parameter(Descriptor::kReceiver);
- Node* const key = Parameter(Descriptor::kKey);
- Node* const context = Parameter(Descriptor::kContext);
+ const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ const TNode<Object> key = CAST(Parameter(Descriptor::kKey));
+ const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Label return_false(this);
diff --git a/deps/v8/src/builtins/builtins-collections-gen.h b/deps/v8/src/builtins/builtins-collections-gen.h
index 2bde108e9a..a132557e3c 100644
--- a/deps/v8/src/builtins/builtins-collections-gen.h
+++ b/deps/v8/src/builtins/builtins-collections-gen.h
@@ -11,13 +11,13 @@ namespace v8 {
namespace internal {
void BranchIfIterableWithOriginalKeyOrValueMapIterator(
- compiler::CodeAssemblerState* state, compiler::TNode<Object> iterable,
- compiler::TNode<Context> context, compiler::CodeAssemblerLabel* if_true,
+ compiler::CodeAssemblerState* state, TNode<Object> iterable,
+ TNode<Context> context, compiler::CodeAssemblerLabel* if_true,
compiler::CodeAssemblerLabel* if_false);
void BranchIfIterableWithOriginalValueSetIterator(
- compiler::CodeAssemblerState* state, compiler::TNode<Object> iterable,
- compiler::TNode<Context> context, compiler::CodeAssemblerLabel* if_true,
+ compiler::CodeAssemblerState* state, TNode<Object> iterable,
+ TNode<Context> context, compiler::CodeAssemblerLabel* if_true,
compiler::CodeAssemblerLabel* if_false);
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-console-gen.cc b/deps/v8/src/builtins/builtins-console-gen.cc
index 1d6a22f611..8a4c8b83da 100644
--- a/deps/v8/src/builtins/builtins-console-gen.cc
+++ b/deps/v8/src/builtins/builtins-console-gen.cc
@@ -15,15 +15,13 @@ TF_BUILTIN(FastConsoleAssert, CodeStubAssembler) {
Label runtime(this);
Label out(this);
- // TODO(ishell): use constants from Descriptor once the JSFunction linkage
- // arguments are reordered.
TNode<Int32T> argc =
UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
- Node* context = Parameter(Descriptor::kContext);
- Node* new_target = Parameter(Descriptor::kJSNewTarget);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget));
GotoIf(Word32Equal(argc, Int32Constant(0)), &runtime);
- CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
+ CodeStubArguments args(this, argc);
BranchIfToBooleanIsTrue(args.AtIndex(0), &out, &runtime);
BIND(&out);
args.PopAndReturn(UndefinedConstant());
diff --git a/deps/v8/src/builtins/builtins-console.cc b/deps/v8/src/builtins/builtins-console.cc
index 28c9261ed4..bc743b6e70 100644
--- a/deps/v8/src/builtins/builtins-console.cc
+++ b/deps/v8/src/builtins/builtins-console.cc
@@ -39,8 +39,7 @@ namespace internal {
namespace {
void ConsoleCall(
- Isolate* isolate,
- internal::BuiltinArguments& args, // NOLINT(runtime/references)
+ Isolate* isolate, const internal::BuiltinArguments& args,
void (debug::ConsoleDelegate::*func)(const v8::debug::ConsoleCallArguments&,
const v8::debug::ConsoleContext&)) {
CHECK(!isolate->has_pending_exception());
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.cc b/deps/v8/src/builtins/builtins-constructor-gen.cc
index 856718cedf..bc03e86f79 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.cc
+++ b/deps/v8/src/builtins/builtins-constructor-gen.cc
@@ -57,12 +57,11 @@ TF_BUILTIN(ConstructWithSpread, CallOrConstructBuiltinsAssembler) {
using Node = compiler::Node;
TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
- Node* shared_function_info = Parameter(Descriptor::kSharedFunctionInfo);
- Node* feedback_cell = Parameter(Descriptor::kFeedbackCell);
- Node* context = Parameter(Descriptor::kContext);
-
- CSA_ASSERT(this, IsFeedbackCell(feedback_cell));
- CSA_ASSERT(this, IsSharedFunctionInfo(shared_function_info));
+ TNode<SharedFunctionInfo> shared_function_info =
+ CAST(Parameter(Descriptor::kSharedFunctionInfo));
+ TNode<FeedbackCell> feedback_cell =
+ CAST(Parameter(Descriptor::kFeedbackCell));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
IncrementCounter(isolate()->counters()->fast_new_closure_total(), 1);
@@ -90,9 +89,8 @@ TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
// The calculation of |function_map_index| must be in sync with
// SharedFunctionInfo::function_map_index().
- Node* const flags =
- LoadObjectField(shared_function_info, SharedFunctionInfo::kFlagsOffset,
- MachineType::Uint32());
+ TNode<Uint32T> flags = LoadObjectField<Uint32T>(
+ shared_function_info, SharedFunctionInfo::kFlagsOffset);
TNode<IntPtrT> const function_map_index = Signed(IntPtrAdd(
DecodeWordFromWord32<SharedFunctionInfo::FunctionMapIndexBits>(flags),
IntPtrConstant(Context::FIRST_FUNCTION_MAP_INDEX)));
@@ -161,7 +159,7 @@ TF_BUILTIN(FastNewObject, ConstructorBuiltinsAssembler) {
TailCallRuntime(Runtime::kNewObject, context, target, new_target);
}
-compiler::TNode<JSObject> ConstructorBuiltinsAssembler::EmitFastNewObject(
+TNode<JSObject> ConstructorBuiltinsAssembler::EmitFastNewObject(
SloppyTNode<Context> context, SloppyTNode<JSFunction> target,
SloppyTNode<JSReceiver> new_target) {
TVARIABLE(JSObject, var_obj);
@@ -178,7 +176,7 @@ compiler::TNode<JSObject> ConstructorBuiltinsAssembler::EmitFastNewObject(
return var_obj.value();
}
-compiler::TNode<JSObject> ConstructorBuiltinsAssembler::EmitFastNewObject(
+TNode<JSObject> ConstructorBuiltinsAssembler::EmitFastNewObject(
SloppyTNode<Context> context, SloppyTNode<JSFunction> target,
SloppyTNode<JSReceiver> new_target, Label* call_runtime) {
// Verify that the new target is a JSFunction.
@@ -202,17 +200,17 @@ compiler::TNode<JSObject> ConstructorBuiltinsAssembler::EmitFastNewObject(
LoadObjectField(initial_map, Map::kConstructorOrBackPointerOffset);
GotoIf(TaggedNotEqual(target, new_target_constructor), call_runtime);
- VARIABLE(properties, MachineRepresentation::kTagged);
+ TVARIABLE(HeapObject, properties);
Label instantiate_map(this), allocate_properties(this);
GotoIf(IsDictionaryMap(initial_map), &allocate_properties);
{
- properties.Bind(EmptyFixedArrayConstant());
+ properties = EmptyFixedArrayConstant();
Goto(&instantiate_map);
}
BIND(&allocate_properties);
{
- properties.Bind(AllocateNameDictionary(NameDictionary::kInitialCapacity));
+ properties = AllocateNameDictionary(NameDictionary::kInitialCapacity);
Goto(&instantiate_map);
}
@@ -221,11 +219,12 @@ compiler::TNode<JSObject> ConstructorBuiltinsAssembler::EmitFastNewObject(
kNone, kWithSlackTracking);
}
-Node* ConstructorBuiltinsAssembler::EmitFastNewFunctionContext(
- Node* scope_info, Node* slots_uint32, Node* context, ScopeType scope_type) {
- TNode<IntPtrT> slots = Signed(ChangeUint32ToWord(slots_uint32));
- TNode<IntPtrT> size = ElementOffsetFromIndex(
- slots, PACKED_ELEMENTS, INTPTR_PARAMETERS, Context::kTodoHeaderSize);
+TNode<Context> ConstructorBuiltinsAssembler::EmitFastNewFunctionContext(
+ TNode<ScopeInfo> scope_info, TNode<Uint32T> slots, TNode<Context> context,
+ ScopeType scope_type) {
+ TNode<IntPtrT> slots_intptr = Signed(ChangeUint32ToWord(slots));
+ TNode<IntPtrT> size = ElementOffsetFromIndex(slots_intptr, PACKED_ELEMENTS,
+ Context::kTodoHeaderSize);
// Create a new closure from the given function info in new space
TNode<Context> function_context =
@@ -246,7 +245,7 @@ Node* ConstructorBuiltinsAssembler::EmitFastNewFunctionContext(
StoreMapNoWriteBarrier(function_context, context_type);
TNode<IntPtrT> min_context_slots = IntPtrConstant(Context::MIN_CONTEXT_SLOTS);
// TODO(ishell): for now, length also includes MIN_CONTEXT_SLOTS.
- TNode<IntPtrT> length = IntPtrAdd(slots, min_context_slots);
+ TNode<IntPtrT> length = IntPtrAdd(slots_intptr, min_context_slots);
StoreObjectFieldNoWriteBarrier(function_context, Context::kLengthOffset,
SmiTag(length));
StoreObjectFieldNoWriteBarrier(function_context, Context::kScopeInfoOffset,
@@ -263,60 +262,60 @@ Node* ConstructorBuiltinsAssembler::EmitFastNewFunctionContext(
TNode<Oddball> undefined = UndefinedConstant();
TNode<IntPtrT> start_offset = IntPtrConstant(Context::kTodoHeaderSize);
CodeStubAssembler::VariableList vars(0, zone());
- BuildFastLoop(
+ BuildFastLoop<IntPtrT>(
vars, start_offset, size,
- [=](Node* offset) {
- StoreObjectFieldNoWriteBarrier(
- function_context, UncheckedCast<IntPtrT>(offset), undefined);
+ [=](TNode<IntPtrT> offset) {
+ StoreObjectFieldNoWriteBarrier(function_context, offset, undefined);
},
- kTaggedSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+ kTaggedSize, IndexAdvanceMode::kPost);
return function_context;
}
TF_BUILTIN(FastNewFunctionContextEval, ConstructorBuiltinsAssembler) {
- Node* scope_info = Parameter(Descriptor::kScopeInfo);
- Node* slots = Parameter(Descriptor::kSlots);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<ScopeInfo> scope_info = CAST(Parameter(Descriptor::kScopeInfo));
+ TNode<Uint32T> slots = UncheckedCast<Uint32T>(Parameter(Descriptor::kSlots));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Return(EmitFastNewFunctionContext(scope_info, slots, context,
ScopeType::EVAL_SCOPE));
}
TF_BUILTIN(FastNewFunctionContextFunction, ConstructorBuiltinsAssembler) {
- Node* scope_info = Parameter(Descriptor::kScopeInfo);
- Node* slots = Parameter(Descriptor::kSlots);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<ScopeInfo> scope_info = CAST(Parameter(Descriptor::kScopeInfo));
+ TNode<Uint32T> slots = UncheckedCast<Uint32T>(Parameter(Descriptor::kSlots));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Return(EmitFastNewFunctionContext(scope_info, slots, context,
ScopeType::FUNCTION_SCOPE));
}
-Node* ConstructorBuiltinsAssembler::EmitCreateRegExpLiteral(
- Node* feedback_vector, Node* slot, Node* pattern, Node* flags,
- Node* context) {
+TNode<JSRegExp> ConstructorBuiltinsAssembler::EmitCreateRegExpLiteral(
+ TNode<HeapObject> maybe_feedback_vector, TNode<UintPtrT> slot,
+ TNode<Object> pattern, TNode<Smi> flags, TNode<Context> context) {
Label call_runtime(this, Label::kDeferred), end(this);
- GotoIf(IsUndefined(feedback_vector), &call_runtime);
+ GotoIf(IsUndefined(maybe_feedback_vector), &call_runtime);
- VARIABLE(result, MachineRepresentation::kTagged);
+ TVARIABLE(JSRegExp, result);
+ TNode<FeedbackVector> feedback_vector = CAST(maybe_feedback_vector);
TNode<Object> literal_site =
- CAST(LoadFeedbackVectorSlot(feedback_vector, slot, 0, INTPTR_PARAMETERS));
+ CAST(LoadFeedbackVectorSlot(feedback_vector, slot));
GotoIf(NotHasBoilerplate(literal_site), &call_runtime);
{
- Node* boilerplate = literal_site;
- CSA_ASSERT(this, IsJSRegExp(boilerplate));
+ TNode<JSRegExp> boilerplate = CAST(literal_site);
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kTaggedSize;
TNode<HeapObject> copy = Allocate(size);
for (int offset = 0; offset < size; offset += kTaggedSize) {
TNode<Object> value = LoadObjectField(boilerplate, offset);
StoreObjectFieldNoWriteBarrier(copy, offset, value);
}
- result.Bind(copy);
+ result = CAST(copy);
Goto(&end);
}
BIND(&call_runtime);
{
- result.Bind(CallRuntime(Runtime::kCreateRegExpLiteral, context,
- feedback_vector, SmiTag(slot), pattern, flags));
+ result = CAST(CallRuntime(Runtime::kCreateRegExpLiteral, context,
+ maybe_feedback_vector, SmiTag(Signed(slot)),
+ pattern, flags));
Goto(&end);
}
@@ -325,25 +324,26 @@ Node* ConstructorBuiltinsAssembler::EmitCreateRegExpLiteral(
}
TF_BUILTIN(CreateRegExpLiteral, ConstructorBuiltinsAssembler) {
- Node* feedback_vector = Parameter(Descriptor::kFeedbackVector);
- TNode<IntPtrT> slot = SmiUntag(Parameter(Descriptor::kSlot));
- Node* pattern = Parameter(Descriptor::kPattern);
- Node* flags = Parameter(Descriptor::kFlags);
- Node* context = Parameter(Descriptor::kContext);
- Node* result =
- EmitCreateRegExpLiteral(feedback_vector, slot, pattern, flags, context);
+ TNode<HeapObject> maybe_feedback_vector =
+ CAST(Parameter(Descriptor::kFeedbackVector));
+ TNode<UintPtrT> slot = Unsigned(SmiUntag(Parameter(Descriptor::kSlot)));
+ TNode<Object> pattern = CAST(Parameter(Descriptor::kPattern));
+ TNode<Smi> flags = CAST(Parameter(Descriptor::kFlags));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<JSRegExp> result = EmitCreateRegExpLiteral(maybe_feedback_vector, slot,
+ pattern, flags, context);
Return(result);
}
-Node* ConstructorBuiltinsAssembler::EmitCreateShallowArrayLiteral(
- Node* feedback_vector, Node* slot, Node* context, Label* call_runtime,
+TNode<JSArray> ConstructorBuiltinsAssembler::EmitCreateShallowArrayLiteral(
+ TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot,
+ TNode<Context> context, Label* call_runtime,
AllocationSiteMode allocation_site_mode) {
Label zero_capacity(this), cow_elements(this), fast_elements(this),
return_result(this);
- VARIABLE(result, MachineRepresentation::kTagged);
TNode<Object> maybe_allocation_site =
- CAST(LoadFeedbackVectorSlot(feedback_vector, slot, 0, INTPTR_PARAMETERS));
+ CAST(LoadFeedbackVectorSlot(feedback_vector, slot));
GotoIf(NotHasBoilerplate(maybe_allocation_site), call_runtime);
TNode<AllocationSite> allocation_site = CAST(maybe_allocation_site);
@@ -358,10 +358,12 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowArrayLiteral(
}
TF_BUILTIN(CreateShallowArrayLiteral, ConstructorBuiltinsAssembler) {
- Node* feedback_vector = Parameter(Descriptor::kFeedbackVector);
- TNode<IntPtrT> slot = SmiUntag(Parameter(Descriptor::kSlot));
- Node* constant_elements = Parameter(Descriptor::kConstantElements);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<FeedbackVector> feedback_vector =
+ CAST(Parameter(Descriptor::kFeedbackVector));
+ TNode<UintPtrT> slot = Unsigned(SmiUntag(Parameter(Descriptor::kSlot)));
+ TNode<ArrayBoilerplateDescription> constant_elements =
+ CAST(Parameter(Descriptor::kConstantElements));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Label call_runtime(this, Label::kDeferred);
Return(EmitCreateShallowArrayLiteral(feedback_vector, slot, context,
&call_runtime,
@@ -373,16 +375,18 @@ TF_BUILTIN(CreateShallowArrayLiteral, ConstructorBuiltinsAssembler) {
int const flags =
AggregateLiteral::kDisableMementos | AggregateLiteral::kIsShallow;
Return(CallRuntime(Runtime::kCreateArrayLiteral, context, feedback_vector,
- SmiTag(slot), constant_elements, SmiConstant(flags)));
+ SmiTag(Signed(slot)), constant_elements,
+ SmiConstant(flags)));
}
}
-Node* ConstructorBuiltinsAssembler::EmitCreateEmptyArrayLiteral(
- Node* feedback_vector, Node* slot, Node* context) {
+TNode<JSArray> ConstructorBuiltinsAssembler::EmitCreateEmptyArrayLiteral(
+ TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot,
+ TNode<Context> context) {
// Array literals always have a valid AllocationSite to properly track
// elements transitions.
TNode<Object> maybe_allocation_site =
- CAST(LoadFeedbackVectorSlot(feedback_vector, slot, 0, INTPTR_PARAMETERS));
+ CAST(LoadFeedbackVectorSlot(feedback_vector, slot));
TVARIABLE(AllocationSite, allocation_site);
Label create_empty_array(this),
@@ -396,7 +400,7 @@ Node* ConstructorBuiltinsAssembler::EmitCreateEmptyArrayLiteral(
BIND(&initialize_allocation_site);
{
allocation_site =
- CreateAllocationSiteInFeedbackVector(feedback_vector, SmiTag(slot));
+ CreateAllocationSiteInFeedbackVector(feedback_vector, slot);
Goto(&create_empty_array);
}
@@ -418,17 +422,20 @@ Node* ConstructorBuiltinsAssembler::EmitCreateEmptyArrayLiteral(
}
TF_BUILTIN(CreateEmptyArrayLiteral, ConstructorBuiltinsAssembler) {
- Node* feedback_vector = Parameter(Descriptor::kFeedbackVector);
- TNode<IntPtrT> slot = SmiUntag(Parameter(Descriptor::kSlot));
- Node* context = Parameter(Descriptor::kContext);
- Node* result = EmitCreateEmptyArrayLiteral(feedback_vector, slot, context);
+ TNode<FeedbackVector> feedback_vector =
+ CAST(Parameter(Descriptor::kFeedbackVector));
+ TNode<UintPtrT> slot = Unsigned(SmiUntag(Parameter(Descriptor::kSlot)));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<JSArray> result =
+ EmitCreateEmptyArrayLiteral(feedback_vector, slot, context);
Return(result);
}
-Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral(
- Node* feedback_vector, Node* slot, Label* call_runtime) {
+TNode<HeapObject> ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral(
+ TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot,
+ Label* call_runtime) {
TNode<Object> maybe_allocation_site =
- CAST(LoadFeedbackVectorSlot(feedback_vector, slot, 0, INTPTR_PARAMETERS));
+ CAST(LoadFeedbackVectorSlot(feedback_vector, slot));
GotoIf(NotHasBoilerplate(maybe_allocation_site), call_runtime);
TNode<AllocationSite> allocation_site = CAST(maybe_allocation_site);
@@ -436,7 +443,7 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral(
TNode<Map> boilerplate_map = LoadMap(boilerplate);
CSA_ASSERT(this, IsJSObjectMap(boilerplate_map));
- VARIABLE(var_properties, MachineRepresentation::kTagged);
+ TVARIABLE(FixedArray, var_properties);
{
TNode<Uint32T> bit_field_3 = LoadMapBitField3(boilerplate_map);
GotoIf(IsSetWord32<Map::IsDeprecatedBit>(bit_field_3), call_runtime);
@@ -447,8 +454,8 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral(
BIND(&if_dictionary);
{
Comment("Copy dictionary properties");
- var_properties.Bind(CopyNameDictionary(
- CAST(LoadSlowProperties(boilerplate)), call_runtime));
+ var_properties = CopyNameDictionary(CAST(LoadSlowProperties(boilerplate)),
+ call_runtime);
// Slow objects have no in-object properties.
Goto(&done);
}
@@ -458,13 +465,13 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral(
TNode<HeapObject> boilerplate_properties =
LoadFastProperties(boilerplate);
GotoIfNot(IsEmptyFixedArray(boilerplate_properties), call_runtime);
- var_properties.Bind(EmptyFixedArrayConstant());
+ var_properties = EmptyFixedArrayConstant();
Goto(&done);
}
BIND(&done);
}
- VARIABLE(var_elements, MachineRepresentation::kTagged);
+ TVARIABLE(FixedArrayBase, var_elements);
{
// Copy the elements backing store, assuming that it's flat.
Label if_empty_fixed_array(this), if_copy_elements(this), done(this);
@@ -473,7 +480,7 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral(
&if_copy_elements);
BIND(&if_empty_fixed_array);
- var_elements.Bind(boilerplate_elements);
+ var_elements = boilerplate_elements;
Goto(&done);
BIND(&if_copy_elements);
@@ -483,7 +490,7 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral(
flags |= ExtractFixedArrayFlag::kAllFixedArrays;
flags |= ExtractFixedArrayFlag::kNewSpaceAllocationOnly;
flags |= ExtractFixedArrayFlag::kDontCopyCOW;
- var_elements.Bind(CloneFixedArray(boilerplate_elements, flags));
+ var_elements = CloneFixedArray(boilerplate_elements, flags);
Goto(&done);
BIND(&done);
}
@@ -563,18 +570,18 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral(
BIND(&continue_with_write_barrier);
{
Comment("Copy in-object properties slow");
- BuildFastLoop(
+ BuildFastLoop<IntPtrT>(
offset.value(), instance_size,
- [=](Node* offset) {
+ [=](TNode<IntPtrT> offset) {
// TODO(ishell): value decompression is not necessary here.
TNode<Object> field = LoadObjectField(boilerplate, offset);
StoreObjectFieldNoWriteBarrier(copy, offset, field);
},
- kTaggedSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+ kTaggedSize, IndexAdvanceMode::kPost);
Comment("Copy mutable HeapNumber values");
- BuildFastLoop(
+ BuildFastLoop<IntPtrT>(
offset.value(), instance_size,
- [=](Node* offset) {
+ [=](TNode<IntPtrT> offset) {
TNode<Object> field = LoadObjectField(copy, offset);
Label copy_heap_number(this, Label::kDeferred), continue_loop(this);
// We only have to clone complex field values.
@@ -593,7 +600,7 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral(
}
BIND(&continue_loop);
},
- kTaggedSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+ kTaggedSize, IndexAdvanceMode::kPost);
Goto(&done_init);
}
BIND(&done_init);
@@ -603,29 +610,30 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral(
TF_BUILTIN(CreateShallowObjectLiteral, ConstructorBuiltinsAssembler) {
Label call_runtime(this);
- Node* feedback_vector = Parameter(Descriptor::kFeedbackVector);
- TNode<IntPtrT> slot = SmiUntag(Parameter(Descriptor::kSlot));
- Node* copy =
+ TNode<FeedbackVector> feedback_vector =
+ CAST(Parameter(Descriptor::kFeedbackVector));
+ TNode<UintPtrT> slot = Unsigned(SmiUntag(Parameter(Descriptor::kSlot)));
+ TNode<HeapObject> copy =
EmitCreateShallowObjectLiteral(feedback_vector, slot, &call_runtime);
Return(copy);
BIND(&call_runtime);
- Node* object_boilerplate_description =
- Parameter(Descriptor::kObjectBoilerplateDescription);
- Node* flags = Parameter(Descriptor::kFlags);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<ObjectBoilerplateDescription> object_boilerplate_description =
+ CAST(Parameter(Descriptor::kObjectBoilerplateDescription));
+ TNode<Smi> flags = CAST(Parameter(Descriptor::kFlags));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TailCallRuntime(Runtime::kCreateObjectLiteral, context, feedback_vector,
- SmiTag(slot), object_boilerplate_description, flags);
+ SmiTag(Signed(slot)), object_boilerplate_description, flags);
}
// Used by the CreateEmptyObjectLiteral bytecode and the Object constructor.
-Node* ConstructorBuiltinsAssembler::EmitCreateEmptyObjectLiteral(
- Node* context) {
+TNode<JSObject> ConstructorBuiltinsAssembler::EmitCreateEmptyObjectLiteral(
+ TNode<Context> context) {
TNode<NativeContext> native_context = LoadNativeContext(context);
TNode<JSFunction> object_function =
CAST(LoadContextElement(native_context, Context::OBJECT_FUNCTION_INDEX));
- TNode<Map> map = CAST(LoadObjectField(
- object_function, JSFunction::kPrototypeOrInitialMapOffset));
+ TNode<Map> map = LoadObjectField<Map>(
+ object_function, JSFunction::kPrototypeOrInitialMapOffset);
// Ensure that slack tracking is disabled for the map.
STATIC_ASSERT(Map::kNoSlackTracking == 0);
CSA_ASSERT(
@@ -642,10 +650,10 @@ TF_BUILTIN(ObjectConstructor, ConstructorBuiltinsAssembler) {
TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget));
- VARIABLE(var_result, MachineRepresentation::kTagged);
+ TVARIABLE(Object, var_result);
Label if_subclass(this, Label::kDeferred), if_notsubclass(this),
return_result(this);
GotoIf(IsUndefined(new_target), &if_notsubclass);
@@ -654,9 +662,8 @@ TF_BUILTIN(ObjectConstructor, ConstructorBuiltinsAssembler) {
BIND(&if_subclass);
{
- TNode<Object> result =
+ var_result =
CallBuiltin(Builtins::kFastNewObject, context, target, new_target);
- var_result.Bind(result);
Goto(&return_result);
}
@@ -672,15 +679,13 @@ TF_BUILTIN(ObjectConstructor, ConstructorBuiltinsAssembler) {
BIND(&if_newobject);
{
- Node* result = EmitCreateEmptyObjectLiteral(context);
- var_result.Bind(result);
+ var_result = EmitCreateEmptyObjectLiteral(context);
Goto(&return_result);
}
BIND(&if_toobject);
{
- TNode<Object> result = CallBuiltin(Builtins::kToObject, context, value);
- var_result.Bind(result);
+ var_result = CallBuiltin(Builtins::kToObject, context, value);
Goto(&return_result);
}
}
@@ -691,13 +696,13 @@ TF_BUILTIN(ObjectConstructor, ConstructorBuiltinsAssembler) {
// ES #sec-number-constructor
TF_BUILTIN(NumberConstructor, ConstructorBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
// 1. If no arguments were passed to this function invocation, let n be +0.
- VARIABLE(var_n, MachineRepresentation::kTagged, SmiConstant(0));
+ TVARIABLE(Number, var_n, SmiConstant(0));
Label if_nloaded(this, &var_n);
GotoIf(IntPtrEqual(argc, IntPtrConstant(0)), &if_nloaded);
@@ -706,14 +711,14 @@ TF_BUILTIN(NumberConstructor, ConstructorBuiltinsAssembler) {
// b. If Type(prim) is BigInt, let n be the Number value for prim.
// c. Otherwise, let n be prim.
TNode<Object> value = args.AtIndex(0);
- var_n.Bind(ToNumber(context, value, BigIntHandling::kConvertToNumber));
+ var_n = ToNumber(context, value, BigIntHandling::kConvertToNumber);
Goto(&if_nloaded);
BIND(&if_nloaded);
{
// 3. If NewTarget is undefined, return n.
- Node* n_value = var_n.value();
- Node* new_target = Parameter(Descriptor::kJSNewTarget);
+ TNode<Number> n_value = var_n.value();
+ TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget));
Label return_n(this), constructnumber(this, Label::kDeferred);
Branch(IsUndefined(new_target), &return_n, &constructnumber);
@@ -740,7 +745,7 @@ TF_BUILTIN(NumberConstructor, ConstructorBuiltinsAssembler) {
}
TF_BUILTIN(GenericLazyDeoptContinuation, ConstructorBuiltinsAssembler) {
- Node* result = Parameter(Descriptor::kResult);
+ TNode<Object> result = CAST(Parameter(Descriptor::kResult));
Return(result);
}
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.h b/deps/v8/src/builtins/builtins-constructor-gen.h
index 9208506c79..761a6c7adb 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.h
+++ b/deps/v8/src/builtins/builtins-constructor-gen.h
@@ -15,21 +15,28 @@ class ConstructorBuiltinsAssembler : public CodeStubAssembler {
explicit ConstructorBuiltinsAssembler(compiler::CodeAssemblerState* state)
: CodeStubAssembler(state) {}
- Node* EmitFastNewFunctionContext(Node* closure, Node* slots, Node* context,
- ScopeType scope_type);
-
- Node* EmitCreateRegExpLiteral(Node* feedback_vector, Node* slot,
- Node* pattern, Node* flags, Node* context);
- Node* EmitCreateShallowArrayLiteral(Node* feedback_vector, Node* slot,
- Node* context, Label* call_runtime,
- AllocationSiteMode allocation_site_mode);
-
- Node* EmitCreateEmptyArrayLiteral(Node* feedback_vector, Node* slot,
- Node* context);
-
- Node* EmitCreateShallowObjectLiteral(Node* feedback_vector, Node* slot,
- Label* call_runtime);
- Node* EmitCreateEmptyObjectLiteral(Node* context);
+ TNode<Context> EmitFastNewFunctionContext(TNode<ScopeInfo> scope_info,
+ TNode<Uint32T> slots,
+ TNode<Context> context,
+ ScopeType scope_type);
+
+ TNode<JSRegExp> EmitCreateRegExpLiteral(
+ TNode<HeapObject> maybe_feedback_vector, TNode<UintPtrT> slot,
+ TNode<Object> pattern, TNode<Smi> flags, TNode<Context> context);
+
+ TNode<JSArray> EmitCreateShallowArrayLiteral(
+ TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot,
+ TNode<Context> context, Label* call_runtime,
+ AllocationSiteMode allocation_site_mode);
+
+ TNode<JSArray> EmitCreateEmptyArrayLiteral(
+ TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot,
+ TNode<Context> context);
+
+ TNode<HeapObject> EmitCreateShallowObjectLiteral(
+ TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot,
+ Label* call_runtime);
+ TNode<JSObject> EmitCreateEmptyObjectLiteral(TNode<Context> context);
TNode<JSObject> EmitFastNewObject(SloppyTNode<Context> context,
SloppyTNode<JSFunction> target,
diff --git a/deps/v8/src/builtins/builtins-conversion-gen.cc b/deps/v8/src/builtins/builtins-conversion-gen.cc
index 8a0c73b292..1666cbf6ac 100644
--- a/deps/v8/src/builtins/builtins-conversion-gen.cc
+++ b/deps/v8/src/builtins/builtins-conversion-gen.cc
@@ -18,16 +18,17 @@ class ConversionBuiltinsAssembler : public CodeStubAssembler {
: CodeStubAssembler(state) {}
protected:
- void Generate_NonPrimitiveToPrimitive(Node* context, Node* input,
+ void Generate_NonPrimitiveToPrimitive(TNode<Context> context,
+ TNode<Object> input,
ToPrimitiveHint hint);
- void Generate_OrdinaryToPrimitive(Node* context, Node* input,
+ void Generate_OrdinaryToPrimitive(TNode<Context> context, TNode<Object> input,
OrdinaryToPrimitiveHint hint);
};
// ES6 section 7.1.1 ToPrimitive ( input [ , PreferredType ] )
void ConversionBuiltinsAssembler::Generate_NonPrimitiveToPrimitive(
- Node* context, Node* input, ToPrimitiveHint hint) {
+ TNode<Context> context, TNode<Object> input, ToPrimitiveHint hint) {
// Lookup the @@toPrimitive property on the {input}.
TNode<Object> exotic_to_prim =
GetProperty(context, input, factory()->to_primitive_symbol());
@@ -42,14 +43,14 @@ void ConversionBuiltinsAssembler::Generate_NonPrimitiveToPrimitive(
CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined);
TNode<String> hint_string =
HeapConstant(factory()->ToPrimitiveHintString(hint));
- Node* result =
+ TNode<Object> result =
CallJS(callable, context, exotic_to_prim, input, hint_string);
// Verify that the {result} is actually a primitive.
Label if_resultisprimitive(this),
if_resultisnotprimitive(this, Label::kDeferred);
GotoIf(TaggedIsSmi(result), &if_resultisprimitive);
- TNode<Uint16T> result_instance_type = LoadInstanceType(result);
+ TNode<Uint16T> result_instance_type = LoadInstanceType(CAST(result));
Branch(IsPrimitiveInstanceType(result_instance_type), &if_resultisprimitive,
&if_resultisnotprimitive);
@@ -78,22 +79,22 @@ void ConversionBuiltinsAssembler::Generate_NonPrimitiveToPrimitive(
}
TF_BUILTIN(NonPrimitiveToPrimitive_Default, ConversionBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* input = Parameter(Descriptor::kArgument);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> input = CAST(Parameter(Descriptor::kArgument));
Generate_NonPrimitiveToPrimitive(context, input, ToPrimitiveHint::kDefault);
}
TF_BUILTIN(NonPrimitiveToPrimitive_Number, ConversionBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* input = Parameter(Descriptor::kArgument);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> input = CAST(Parameter(Descriptor::kArgument));
Generate_NonPrimitiveToPrimitive(context, input, ToPrimitiveHint::kNumber);
}
TF_BUILTIN(NonPrimitiveToPrimitive_String, ConversionBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* input = Parameter(Descriptor::kArgument);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> input = CAST(Parameter(Descriptor::kArgument));
Generate_NonPrimitiveToPrimitive(context, input, ToPrimitiveHint::kString);
}
@@ -105,22 +106,22 @@ TF_BUILTIN(StringToNumber, CodeStubAssembler) {
}
TF_BUILTIN(ToName, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* input = Parameter(Descriptor::kArgument);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> input = CAST(Parameter(Descriptor::kArgument));
- VARIABLE(var_input, MachineRepresentation::kTagged, input);
+ TVARIABLE(Object, var_input, input);
Label loop(this, &var_input);
Goto(&loop);
BIND(&loop);
{
// Load the current {input} value.
- Node* input = var_input.value();
+ TNode<Object> input = var_input.value();
// Dispatch based on the type of the {input.}
Label if_inputisbigint(this), if_inputisname(this), if_inputisnumber(this),
if_inputisoddball(this), if_inputisreceiver(this, Label::kDeferred);
GotoIf(TaggedIsSmi(input), &if_inputisnumber);
- TNode<Uint16T> input_instance_type = LoadInstanceType(input);
+ TNode<Uint16T> input_instance_type = LoadInstanceType(CAST(input));
STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
GotoIf(IsNameInstanceType(input_instance_type), &if_inputisname);
GotoIf(IsJSReceiverInstanceType(input_instance_type), &if_inputisreceiver);
@@ -151,7 +152,7 @@ TF_BUILTIN(ToName, CodeStubAssembler) {
{
// Just return the {input}'s string representation.
CSA_ASSERT(this, IsOddballInstanceType(input_instance_type));
- Return(LoadObjectField(input, Oddball::kToStringOffset));
+ Return(LoadObjectField(CAST(input), Oddball::kToStringOffset));
}
BIND(&if_inputisreceiver);
@@ -159,23 +160,23 @@ TF_BUILTIN(ToName, CodeStubAssembler) {
// Convert the JSReceiver {input} to a primitive first,
// and then run the loop again with the new {input},
// which is then a primitive value.
- var_input.Bind(CallBuiltin(Builtins::kNonPrimitiveToPrimitive_String,
- context, input));
+ var_input = CallBuiltin(Builtins::kNonPrimitiveToPrimitive_String,
+ context, input);
Goto(&loop);
}
}
}
TF_BUILTIN(NonNumberToNumber, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* input = Parameter(Descriptor::kArgument);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<HeapObject> input = CAST(Parameter(Descriptor::kArgument));
Return(NonNumberToNumber(context, input));
}
TF_BUILTIN(NonNumberToNumeric, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* input = Parameter(Descriptor::kArgument);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<HeapObject> input = CAST(Parameter(Descriptor::kArgument));
Return(NonNumberToNumeric(context, input));
}
@@ -191,16 +192,19 @@ TF_BUILTIN(ToNumeric, CodeStubAssembler) {
// ES6 section 7.1.3 ToNumber ( argument )
TF_BUILTIN(ToNumber, CodeStubAssembler) {
+ // TODO(solanes, v8:6949): Changing this to a TNode<Context> crashes with the
+ // empty context. Context might not be needed, but it is propagated all over
+ // the place and hard to pull out.
Node* context = Parameter(Descriptor::kContext);
- Node* input = Parameter(Descriptor::kArgument);
+ TNode<Object> input = CAST(Parameter(Descriptor::kArgument));
Return(ToNumber(context, input));
}
// Like ToNumber, but also converts BigInts.
TF_BUILTIN(ToNumberConvertBigInt, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* input = Parameter(Descriptor::kArgument);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> input = CAST(Parameter(Descriptor::kArgument));
Return(ToNumber(context, input, BigIntHandling::kConvertToNumber));
}
@@ -214,8 +218,8 @@ TF_BUILTIN(NumberToString, CodeStubAssembler) {
// 7.1.1.1 OrdinaryToPrimitive ( O, hint )
void ConversionBuiltinsAssembler::Generate_OrdinaryToPrimitive(
- Node* context, Node* input, OrdinaryToPrimitiveHint hint) {
- VARIABLE(var_result, MachineRepresentation::kTagged);
+ TNode<Context> context, TNode<Object> input, OrdinaryToPrimitiveHint hint) {
+ TVARIABLE(Object, var_result);
Label return_result(this, &var_result);
Handle<String> method_names[2];
@@ -246,12 +250,12 @@ void ConversionBuiltinsAssembler::Generate_OrdinaryToPrimitive(
// Call the {method} on the {input}.
Callable callable = CodeFactory::Call(
isolate(), ConvertReceiverMode::kNotNullOrUndefined);
- Node* result = CallJS(callable, context, method, input);
- var_result.Bind(result);
+ TNode<Object> result = CallJS(callable, context, method, input);
+ var_result = result;
// Return the {result} if it is a primitive.
GotoIf(TaggedIsSmi(result), &return_result);
- TNode<Uint16T> result_instance_type = LoadInstanceType(result);
+ TNode<Uint16T> result_instance_type = LoadInstanceType(CAST(result));
GotoIf(IsPrimitiveInstanceType(result_instance_type), &return_result);
}
@@ -267,22 +271,22 @@ void ConversionBuiltinsAssembler::Generate_OrdinaryToPrimitive(
}
TF_BUILTIN(OrdinaryToPrimitive_Number, ConversionBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* input = Parameter(Descriptor::kArgument);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> input = CAST(Parameter(Descriptor::kArgument));
Generate_OrdinaryToPrimitive(context, input,
OrdinaryToPrimitiveHint::kNumber);
}
TF_BUILTIN(OrdinaryToPrimitive_String, ConversionBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* input = Parameter(Descriptor::kArgument);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> input = CAST(Parameter(Descriptor::kArgument));
Generate_OrdinaryToPrimitive(context, input,
OrdinaryToPrimitiveHint::kString);
}
// ES6 section 7.1.2 ToBoolean ( argument )
TF_BUILTIN(ToBoolean, CodeStubAssembler) {
- Node* value = Parameter(Descriptor::kArgument);
+ TNode<Object> value = CAST(Parameter(Descriptor::kArgument));
Label return_true(this), return_false(this);
BranchIfToBooleanIsTrue(value, &return_true, &return_false);
@@ -298,7 +302,7 @@ TF_BUILTIN(ToBoolean, CodeStubAssembler) {
// Requires parameter on stack so that it can be used as a continuation from a
// LAZY deopt.
TF_BUILTIN(ToBooleanLazyDeoptContinuation, CodeStubAssembler) {
- Node* value = Parameter(Descriptor::kArgument);
+ TNode<Object> value = CAST(Parameter(Descriptor::kArgument));
Label return_true(this), return_false(this);
BranchIfToBooleanIsTrue(value, &return_true, &return_false);
@@ -311,11 +315,10 @@ TF_BUILTIN(ToBooleanLazyDeoptContinuation, CodeStubAssembler) {
}
TF_BUILTIN(ToLength, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
// We might need to loop once for ToNumber conversion.
- VARIABLE(var_len, MachineRepresentation::kTagged,
- Parameter(Descriptor::kArgument));
+ TVARIABLE(Object, var_len, CAST(Parameter(Descriptor::kArgument)));
Label loop(this, &var_len);
Goto(&loop);
BIND(&loop);
@@ -325,7 +328,7 @@ TF_BUILTIN(ToLength, CodeStubAssembler) {
return_zero(this, Label::kDeferred);
// Load the current {len} value.
- Node* len = var_len.value();
+ TNode<Object> len = var_len.value();
// Check if {len} is a positive Smi.
GotoIf(TaggedIsPositiveSmi(len), &return_len);
@@ -334,14 +337,16 @@ TF_BUILTIN(ToLength, CodeStubAssembler) {
GotoIf(TaggedIsSmi(len), &return_zero);
// Check if {len} is a HeapNumber.
+ TNode<HeapObject> len_heap_object = CAST(len);
Label if_lenisheapnumber(this),
if_lenisnotheapnumber(this, Label::kDeferred);
- Branch(IsHeapNumber(len), &if_lenisheapnumber, &if_lenisnotheapnumber);
+ Branch(IsHeapNumber(len_heap_object), &if_lenisheapnumber,
+ &if_lenisnotheapnumber);
BIND(&if_lenisheapnumber);
{
// Load the floating-point value of {len}.
- TNode<Float64T> len_value = LoadHeapNumberValue(len);
+ TNode<Float64T> len_value = LoadHeapNumberValue(len_heap_object);
// Check if {len} is not greater than zero.
GotoIfNot(Float64GreaterThan(len_value, Float64Constant(0.0)),
@@ -361,7 +366,7 @@ TF_BUILTIN(ToLength, CodeStubAssembler) {
BIND(&if_lenisnotheapnumber);
{
// Need to convert {len} to a Number first.
- var_len.Bind(CallBuiltin(Builtins::kNonNumberToNumber, context, len));
+ var_len = CallBuiltin(Builtins::kNonNumberToNumber, context, len);
Goto(&loop);
}
@@ -377,15 +382,15 @@ TF_BUILTIN(ToLength, CodeStubAssembler) {
}
TF_BUILTIN(ToInteger, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* input = Parameter(Descriptor::kArgument);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> input = CAST(Parameter(Descriptor::kArgument));
Return(ToInteger(context, input, kNoTruncation));
}
TF_BUILTIN(ToInteger_TruncateMinusZero, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* input = Parameter(Descriptor::kArgument);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> input = CAST(Parameter(Descriptor::kArgument));
Return(ToInteger(context, input, kTruncateMinusZero));
}
@@ -396,15 +401,14 @@ TF_BUILTIN(ToObject, CodeStubAssembler) {
if_noconstructor(this, Label::kDeferred),
if_wrapjs_primitive_wrapper(this);
- Node* context = Parameter(Descriptor::kContext);
- Node* object = Parameter(Descriptor::kArgument);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> object = CAST(Parameter(Descriptor::kArgument));
- VARIABLE(constructor_function_index_var,
- MachineType::PointerRepresentation());
+ TVARIABLE(IntPtrT, constructor_function_index_var);
GotoIf(TaggedIsSmi(object), &if_smi);
- TNode<Map> map = LoadMap(object);
+ TNode<Map> map = LoadMap(CAST(object));
TNode<Uint16T> instance_type = LoadMapInstanceType(map);
GotoIf(IsJSReceiverInstanceType(instance_type), &if_jsreceiver);
@@ -413,12 +417,12 @@ TF_BUILTIN(ToObject, CodeStubAssembler) {
GotoIf(WordEqual(constructor_function_index,
IntPtrConstant(Map::kNoConstructorFunctionIndex)),
&if_noconstructor);
- constructor_function_index_var.Bind(constructor_function_index);
+ constructor_function_index_var = constructor_function_index;
Goto(&if_wrapjs_primitive_wrapper);
BIND(&if_smi);
- constructor_function_index_var.Bind(
- IntPtrConstant(Context::NUMBER_FUNCTION_INDEX));
+ constructor_function_index_var =
+ IntPtrConstant(Context::NUMBER_FUNCTION_INDEX);
Goto(&if_wrapjs_primitive_wrapper);
BIND(&if_wrapjs_primitive_wrapper);
@@ -449,7 +453,7 @@ TF_BUILTIN(ToObject, CodeStubAssembler) {
// ES6 section 12.5.5 typeof operator
TF_BUILTIN(Typeof, CodeStubAssembler) {
- Node* object = Parameter(Descriptor::kObject);
+ TNode<Object> object = CAST(Parameter(Descriptor::kObject));
Return(Typeof(object));
}
diff --git a/deps/v8/src/builtins/builtins-date-gen.cc b/deps/v8/src/builtins/builtins-date-gen.cc
index 97600efaa4..98c1343d2c 100644
--- a/deps/v8/src/builtins/builtins-date-gen.cc
+++ b/deps/v8/src/builtins/builtins-date-gen.cc
@@ -18,23 +18,23 @@ class DateBuiltinsAssembler : public CodeStubAssembler {
: CodeStubAssembler(state) {}
protected:
- void Generate_DatePrototype_GetField(Node* context, Node* receiver,
- int field_index);
+ void Generate_DatePrototype_GetField(TNode<Context> context,
+ TNode<Object> receiver, int field_index);
};
-void DateBuiltinsAssembler::Generate_DatePrototype_GetField(Node* context,
- Node* receiver,
- int field_index) {
+void DateBuiltinsAssembler::Generate_DatePrototype_GetField(
+ TNode<Context> context, TNode<Object> receiver, int field_index) {
Label receiver_not_date(this, Label::kDeferred);
GotoIf(TaggedIsSmi(receiver), &receiver_not_date);
- TNode<Uint16T> receiver_instance_type = LoadInstanceType(receiver);
+ TNode<Uint16T> receiver_instance_type = LoadInstanceType(CAST(receiver));
GotoIfNot(InstanceTypeEqual(receiver_instance_type, JS_DATE_TYPE),
&receiver_not_date);
+ TNode<JSDate> date_receiver = CAST(receiver);
// Load the specified date field, falling back to the runtime as necessary.
if (field_index == JSDate::kDateValue) {
- Return(LoadObjectField(receiver, JSDate::kValueOffset));
+ Return(LoadObjectField(date_receiver, JSDate::kValueOffset));
} else {
if (field_index < JSDate::kFirstUncachedField) {
Label stamp_mismatch(this, Label::kDeferred);
@@ -42,9 +42,9 @@ void DateBuiltinsAssembler::Generate_DatePrototype_GetField(Node* context,
ExternalConstant(ExternalReference::date_cache_stamp(isolate())));
TNode<Object> cache_stamp =
- LoadObjectField(receiver, JSDate::kCacheStampOffset);
+ LoadObjectField(date_receiver, JSDate::kCacheStampOffset);
GotoIf(TaggedNotEqual(date_cache_stamp, cache_stamp), &stamp_mismatch);
- Return(LoadObjectField(receiver,
+ Return(LoadObjectField(date_receiver,
JSDate::kValueOffset + field_index * kTaggedSize));
BIND(&stamp_mismatch);
@@ -53,10 +53,10 @@ void DateBuiltinsAssembler::Generate_DatePrototype_GetField(Node* context,
TNode<Smi> field_index_smi = SmiConstant(field_index);
TNode<ExternalReference> function =
ExternalConstant(ExternalReference::get_date_field_function());
- Node* result = CallCFunction(
+ TNode<Object> result = CAST(CallCFunction(
function, MachineType::AnyTagged(),
- std::make_pair(MachineType::AnyTagged(), receiver),
- std::make_pair(MachineType::AnyTagged(), field_index_smi));
+ std::make_pair(MachineType::AnyTagged(), date_receiver),
+ std::make_pair(MachineType::AnyTagged(), field_index_smi)));
Return(result);
}
@@ -66,128 +66,128 @@ void DateBuiltinsAssembler::Generate_DatePrototype_GetField(Node* context,
}
TF_BUILTIN(DatePrototypeGetDate, DateBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Generate_DatePrototype_GetField(context, receiver, JSDate::kDay);
}
TF_BUILTIN(DatePrototypeGetDay, DateBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Generate_DatePrototype_GetField(context, receiver, JSDate::kWeekday);
}
TF_BUILTIN(DatePrototypeGetFullYear, DateBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Generate_DatePrototype_GetField(context, receiver, JSDate::kYear);
}
TF_BUILTIN(DatePrototypeGetHours, DateBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Generate_DatePrototype_GetField(context, receiver, JSDate::kHour);
}
TF_BUILTIN(DatePrototypeGetMilliseconds, DateBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Generate_DatePrototype_GetField(context, receiver, JSDate::kMillisecond);
}
TF_BUILTIN(DatePrototypeGetMinutes, DateBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Generate_DatePrototype_GetField(context, receiver, JSDate::kMinute);
}
TF_BUILTIN(DatePrototypeGetMonth, DateBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Generate_DatePrototype_GetField(context, receiver, JSDate::kMonth);
}
TF_BUILTIN(DatePrototypeGetSeconds, DateBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Generate_DatePrototype_GetField(context, receiver, JSDate::kSecond);
}
TF_BUILTIN(DatePrototypeGetTime, DateBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Generate_DatePrototype_GetField(context, receiver, JSDate::kDateValue);
}
TF_BUILTIN(DatePrototypeGetTimezoneOffset, DateBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Generate_DatePrototype_GetField(context, receiver, JSDate::kTimezoneOffset);
}
TF_BUILTIN(DatePrototypeGetUTCDate, DateBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Generate_DatePrototype_GetField(context, receiver, JSDate::kDayUTC);
}
TF_BUILTIN(DatePrototypeGetUTCDay, DateBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Generate_DatePrototype_GetField(context, receiver, JSDate::kWeekdayUTC);
}
TF_BUILTIN(DatePrototypeGetUTCFullYear, DateBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Generate_DatePrototype_GetField(context, receiver, JSDate::kYearUTC);
}
TF_BUILTIN(DatePrototypeGetUTCHours, DateBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Generate_DatePrototype_GetField(context, receiver, JSDate::kHourUTC);
}
TF_BUILTIN(DatePrototypeGetUTCMilliseconds, DateBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Generate_DatePrototype_GetField(context, receiver, JSDate::kMillisecondUTC);
}
TF_BUILTIN(DatePrototypeGetUTCMinutes, DateBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Generate_DatePrototype_GetField(context, receiver, JSDate::kMinuteUTC);
}
TF_BUILTIN(DatePrototypeGetUTCMonth, DateBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Generate_DatePrototype_GetField(context, receiver, JSDate::kMonthUTC);
}
TF_BUILTIN(DatePrototypeGetUTCSeconds, DateBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Generate_DatePrototype_GetField(context, receiver, JSDate::kSecondUTC);
}
TF_BUILTIN(DatePrototypeValueOf, DateBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Generate_DatePrototype_GetField(context, receiver, JSDate::kDateValue);
}
TF_BUILTIN(DatePrototypeToPrimitive, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
TNode<Object> hint = CAST(Parameter(Descriptor::kHint));
// Check if the {receiver} is actually a JSReceiver.
Label receiver_is_invalid(this, Label::kDeferred);
GotoIf(TaggedIsSmi(receiver), &receiver_is_invalid);
- GotoIfNot(IsJSReceiver(receiver), &receiver_is_invalid);
+ GotoIfNot(IsJSReceiver(CAST(receiver)), &receiver_is_invalid);
// Dispatch to the appropriate OrdinaryToPrimitive builtin.
Label hint_is_number(this), hint_is_string(this),
diff --git a/deps/v8/src/builtins/builtins-date.cc b/deps/v8/src/builtins/builtins-date.cc
index c3e7601832..258b1022da 100644
--- a/deps/v8/src/builtins/builtins-date.cc
+++ b/deps/v8/src/builtins/builtins-date.cc
@@ -854,16 +854,18 @@ BUILTIN(DatePrototypeToLocaleDateString) {
isolate->CountUsage(v8::Isolate::UseCounterFeature::kDateToLocaleDateString);
- CHECK_RECEIVER(JSDate, date, "Date.prototype.toLocaleDateString");
+ const char* method = "Date.prototype.toLocaleDateString";
+ CHECK_RECEIVER(JSDate, date, method);
RETURN_RESULT_OR_FAILURE(
isolate, JSDateTimeFormat::ToLocaleDateTime(
isolate,
- date, // date
- args.atOrUndefined(isolate, 1), // locales
- args.atOrUndefined(isolate, 2), // options
- JSDateTimeFormat::RequiredOption::kDate, // required
- JSDateTimeFormat::DefaultsOption::kDate)); // defaults
+ date, // date
+ args.atOrUndefined(isolate, 1), // locales
+ args.atOrUndefined(isolate, 2), // options
+ JSDateTimeFormat::RequiredOption::kDate, // required
+ JSDateTimeFormat::DefaultsOption::kDate, // defaults
+ method)); // method
}
// ecma402 #sup-date.prototype.tolocalestring
@@ -872,16 +874,18 @@ BUILTIN(DatePrototypeToLocaleString) {
isolate->CountUsage(v8::Isolate::UseCounterFeature::kDateToLocaleString);
- CHECK_RECEIVER(JSDate, date, "Date.prototype.toLocaleString");
+ const char* method = "Date.prototype.toLocaleString";
+ CHECK_RECEIVER(JSDate, date, method);
RETURN_RESULT_OR_FAILURE(
isolate, JSDateTimeFormat::ToLocaleDateTime(
isolate,
- date, // date
- args.atOrUndefined(isolate, 1), // locales
- args.atOrUndefined(isolate, 2), // options
- JSDateTimeFormat::RequiredOption::kAny, // required
- JSDateTimeFormat::DefaultsOption::kAll)); // defaults
+ date, // date
+ args.atOrUndefined(isolate, 1), // locales
+ args.atOrUndefined(isolate, 2), // options
+ JSDateTimeFormat::RequiredOption::kAny, // required
+ JSDateTimeFormat::DefaultsOption::kAll, // defaults
+ method)); // method
}
// ecma402 #sup-date.prototype.tolocaletimestring
@@ -890,16 +894,18 @@ BUILTIN(DatePrototypeToLocaleTimeString) {
isolate->CountUsage(v8::Isolate::UseCounterFeature::kDateToLocaleTimeString);
- CHECK_RECEIVER(JSDate, date, "Date.prototype.toLocaleTimeString");
+ const char* method = "Date.prototype.toLocaleTimeString";
+ CHECK_RECEIVER(JSDate, date, method);
RETURN_RESULT_OR_FAILURE(
isolate, JSDateTimeFormat::ToLocaleDateTime(
isolate,
- date, // date
- args.atOrUndefined(isolate, 1), // locales
- args.atOrUndefined(isolate, 2), // options
- JSDateTimeFormat::RequiredOption::kTime, // required
- JSDateTimeFormat::DefaultsOption::kTime)); // defaults
+ date, // date
+ args.atOrUndefined(isolate, 1), // locales
+ args.atOrUndefined(isolate, 2), // options
+ JSDateTimeFormat::RequiredOption::kTime, // required
+ JSDateTimeFormat::DefaultsOption::kTime, // defaults
+ method)); // method
}
#endif // V8_INTL_SUPPORT
diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h
index 95f5273f14..2489538d19 100644
--- a/deps/v8/src/builtins/builtins-definitions.h
+++ b/deps/v8/src/builtins/builtins-definitions.h
@@ -102,7 +102,6 @@ namespace internal {
ASM(ResumeGeneratorTrampoline, ResumeGenerator) \
\
/* String helpers */ \
- TFC(StringCharAt, StringAt) \
TFC(StringCodePointAt, StringAt) \
TFC(StringFromCodePointAt, StringAtAsString) \
TFC(StringEqual, Compare) \
@@ -219,9 +218,7 @@ namespace internal {
TFH(KeyedLoadIC_Slow, LoadWithVector) \
TFH(KeyedStoreIC_Megamorphic, Store) \
TFH(KeyedStoreIC_Slow, StoreWithVector) \
- TFH(LoadGlobalIC_Slow, LoadWithVector) \
TFH(LoadIC_FunctionPrototype, LoadWithVector) \
- TFH(LoadIC_Slow, LoadWithVector) \
TFH(LoadIC_StringLength, LoadWithVector) \
TFH(LoadIC_StringWrapperLength, LoadWithVector) \
TFH(LoadIC_NoFeedback, Load) \
@@ -230,7 +227,6 @@ namespace internal {
TFH(StoreInArrayLiteralIC_Slow, StoreWithVector) \
TFH(KeyedLoadIC_SloppyArguments, LoadWithVector) \
TFH(LoadIndexedInterceptorIC, LoadWithVector) \
- TFH(StoreInterceptorIC, StoreWithVector) \
TFH(KeyedStoreIC_SloppyArguments_Standard, StoreWithVector) \
TFH(KeyedStoreIC_SloppyArguments_GrowNoTransitionHandleCOW, StoreWithVector) \
TFH(KeyedStoreIC_SloppyArguments_NoTransitionIgnoreOOB, StoreWithVector) \
@@ -568,6 +564,9 @@ namespace internal {
SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
CPP(AsyncFunctionConstructor) \
\
+ /* Iterator Protocol */ \
+ TFC(GetIteratorWithFeedbackLazyDeoptContinuation, GetIteratorStackParameter) \
+ \
/* Global object */ \
CPP(GlobalDecodeURI) \
CPP(GlobalDecodeURIComponent) \
@@ -616,6 +615,10 @@ namespace internal {
TFS(IterableToList, kIterable, kIteratorFn) \
TFS(IterableToListWithSymbolLookup, kIterable) \
TFS(IterableToListMayPreserveHoles, kIterable, kIteratorFn) \
+ TFS(IterableToFixedArrayForWasm, kIterable, kExpectedLength) \
+ \
+ /* #sec-createstringlistfromiterable */ \
+ TFS(StringListFromIterable, kIterable) \
\
/* Map */ \
TFS(FindOrderedHashMapEntry, kTable, kKey) \
@@ -845,28 +848,13 @@ namespace internal {
CPP(RegExpLeftContextGetter) \
/* ES #sec-regexp.prototype.compile */ \
TFJ(RegExpPrototypeCompile, 2, kReceiver, kPattern, kFlags) \
- /* ES #sec-regexp.prototype.exec */ \
- TFJ(RegExpPrototypeExec, 1, kReceiver, kString) \
- /* https://tc39.github.io/proposal-string-matchall/ */ \
- TFJ(RegExpPrototypeMatchAll, 1, kReceiver, kString) \
- /* ES #sec-regexp.prototype-@@search */ \
- TFJ(RegExpPrototypeSearch, 1, kReceiver, kString) \
CPP(RegExpPrototypeToString) \
CPP(RegExpRightContextGetter) \
\
- /* ES #sec-regexp.prototype-@@split */ \
- TFJ(RegExpPrototypeSplit, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* RegExp helpers */ \
TFS(RegExpExecAtom, kRegExp, kString, kLastIndex, kMatchInfo) \
TFS(RegExpExecInternal, kRegExp, kString, kLastIndex, kMatchInfo) \
ASM(RegExpInterpreterTrampoline, CCall) \
- TFS(RegExpPrototypeExecSlow, kReceiver, kString) \
- TFS(RegExpSearchFast, kReceiver, kPattern) \
- TFS(RegExpSplit, kRegExp, kString, kLimit) \
- \
- /* RegExp String Iterator */ \
- /* https://tc39.github.io/proposal-string-matchall/ */ \
- TFJ(RegExpStringIteratorPrototypeNext, 0, kReceiver) \
\
/* Set */ \
TFJ(SetConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
@@ -1117,7 +1105,6 @@ namespace internal {
TFS(SetProperty, kReceiver, kKey, kValue) \
TFS(SetPropertyInLiteral, kReceiver, kKey, kValue) \
ASM(MemCopyUint8Uint8, CCall) \
- ASM(MemCopyUint16Uint8, CCall) \
ASM(MemMove, CCall) \
\
/* Trace */ \
@@ -1131,7 +1118,14 @@ namespace internal {
CPP(FinalizationGroupRegister) \
CPP(FinalizationGroupUnregister) \
CPP(WeakRefConstructor) \
- CPP(WeakRefDeref)
+ CPP(WeakRefDeref) \
+ \
+ /* Async modules */ \
+ TFJ(AsyncModuleEvaluate, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ \
+ /* CallAsyncModule* are spec anonymyous functions */ \
+ CPP(CallAsyncModuleFulfilled) \
+ CPP(CallAsyncModuleRejected)
#ifdef V8_INTL_SUPPORT
#define BUILTIN_LIST_INTL(CPP, TFJ, TFS) \
diff --git a/deps/v8/src/builtins/builtins-function-gen.cc b/deps/v8/src/builtins/builtins-function-gen.cc
index ee1f67d434..f0853e9bd9 100644
--- a/deps/v8/src/builtins/builtins-function-gen.cc
+++ b/deps/v8/src/builtins/builtins-function-gen.cc
@@ -15,14 +15,12 @@ namespace internal {
TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) {
Label slow(this);
- // TODO(ishell): use constants from Descriptor once the JSFunction linkage
- // arguments are reordered.
TNode<Int32T> argc =
UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
- Node* context = Parameter(Descriptor::kContext);
- Node* new_target = Parameter(Descriptor::kJSNewTarget);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget));
- CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
+ CodeStubArguments args(this, argc);
// Check that receiver has instance type of JS_FUNCTION_TYPE
TNode<Object> receiver = args.GetReceiver();
@@ -85,21 +83,20 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) {
// Choose the right bound function map based on whether the target is
// constructable.
Comment("Choose the right bound function map");
- VARIABLE(bound_function_map, MachineRepresentation::kTagged);
+ TVARIABLE(Map, bound_function_map);
{
Label with_constructor(this);
- VariableList vars({&bound_function_map}, zone());
TNode<NativeContext> native_context = LoadNativeContext(context);
- Label map_done(this, vars);
+ Label map_done(this, &bound_function_map);
GotoIf(IsConstructorMap(receiver_map), &with_constructor);
- bound_function_map.Bind(LoadContextElement(
+ bound_function_map = CAST(LoadContextElement(
native_context, Context::BOUND_FUNCTION_WITHOUT_CONSTRUCTOR_MAP_INDEX));
Goto(&map_done);
BIND(&with_constructor);
- bound_function_map.Bind(LoadContextElement(
+ bound_function_map = CAST(LoadContextElement(
native_context, Context::BOUND_FUNCTION_WITH_CONSTRUCTOR_MAP_INDEX));
Goto(&map_done);
@@ -115,30 +112,28 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) {
// Allocate the arguments array.
Comment("Allocate the arguments array");
- VARIABLE(argument_array, MachineRepresentation::kTagged);
+ TVARIABLE(FixedArray, argument_array);
{
Label empty_arguments(this);
Label arguments_done(this, &argument_array);
GotoIf(Uint32LessThanOrEqual(argc, Int32Constant(1)), &empty_arguments);
TNode<IntPtrT> elements_length =
Signed(ChangeUint32ToWord(Unsigned(Int32Sub(argc, Int32Constant(1)))));
- TNode<FixedArray> elements = CAST(AllocateFixedArray(
- PACKED_ELEMENTS, elements_length, kAllowLargeObjectAllocation));
- VARIABLE(index, MachineType::PointerRepresentation());
- index.Bind(IntPtrConstant(0));
+ argument_array = CAST(AllocateFixedArray(PACKED_ELEMENTS, elements_length,
+ kAllowLargeObjectAllocation));
+ TVARIABLE(IntPtrT, index, IntPtrConstant(0));
VariableList foreach_vars({&index}, zone());
args.ForEach(
foreach_vars,
- [this, elements, &index](Node* arg) {
- StoreFixedArrayElement(elements, index.value(), arg);
+ [&](TNode<Object> arg) {
+ StoreFixedArrayElement(argument_array.value(), index.value(), arg);
Increment(&index);
},
IntPtrConstant(1));
- argument_array.Bind(elements);
Goto(&arguments_done);
BIND(&empty_arguments);
- argument_array.Bind(EmptyFixedArrayConstant());
+ argument_array = EmptyFixedArrayConstant();
Goto(&arguments_done);
BIND(&arguments_done);
@@ -146,16 +141,16 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) {
// Determine bound receiver.
Comment("Determine bound receiver");
- VARIABLE(bound_receiver, MachineRepresentation::kTagged);
+ TVARIABLE(Object, bound_receiver);
{
Label has_receiver(this);
Label receiver_done(this, &bound_receiver);
GotoIf(Word32NotEqual(argc, Int32Constant(0)), &has_receiver);
- bound_receiver.Bind(UndefinedConstant());
+ bound_receiver = UndefinedConstant();
Goto(&receiver_done);
BIND(&has_receiver);
- bound_receiver.Bind(args.AtIndex(0));
+ bound_receiver = args.AtIndex(0);
Goto(&receiver_done);
BIND(&receiver_done);
@@ -196,10 +191,10 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) {
// ES6 #sec-function.prototype-@@hasinstance
TF_BUILTIN(FunctionPrototypeHasInstance, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* f = Parameter(Descriptor::kReceiver);
- Node* v = Parameter(Descriptor::kV);
- Node* result = OrdinaryHasInstance(context, f, v);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> f = CAST(Parameter(Descriptor::kReceiver));
+ TNode<Object> v = CAST(Parameter(Descriptor::kV));
+ TNode<Oddball> result = OrdinaryHasInstance(context, f, v);
Return(result);
}
diff --git a/deps/v8/src/builtins/builtins-generator-gen.cc b/deps/v8/src/builtins/builtins-generator-gen.cc
index d884c417fc..0a4b3b205b 100644
--- a/deps/v8/src/builtins/builtins-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-generator-gen.cc
@@ -19,19 +19,25 @@ class GeneratorBuiltinsAssembler : public CodeStubAssembler {
: CodeStubAssembler(state) {}
protected:
+ // Currently, AsyncModules in V8 are built on top of JSAsyncFunctionObjects
+ // with an initial yield. Thus, we need some way to 'resume' the
+ // underlying JSAsyncFunctionObject owned by an AsyncModule. To support this
+ // the body of resume is factored out below, and shared by JSGeneratorObject
+ // prototype methods as well as AsyncModuleEvaluate. The only difference
+ // between AsyncModuleEvaluate and JSGeneratorObject::PrototypeNext is
+ // the expected reciever.
+ void InnerResume(CodeStubArguments* args, Node* receiver, Node* value,
+ Node* context, JSGeneratorObject::ResumeMode resume_mode,
+ char const* const method_name);
void GeneratorPrototypeResume(CodeStubArguments* args, Node* receiver,
Node* value, Node* context,
JSGeneratorObject::ResumeMode resume_mode,
char const* const method_name);
};
-void GeneratorBuiltinsAssembler::GeneratorPrototypeResume(
+void GeneratorBuiltinsAssembler::InnerResume(
CodeStubArguments* args, Node* receiver, Node* value, Node* context,
JSGeneratorObject::ResumeMode resume_mode, char const* const method_name) {
- // Check if the {receiver} is actually a JSGeneratorObject.
- ThrowIfNotInstanceType(context, receiver, JS_GENERATOR_OBJECT_TYPE,
- method_name);
-
// Check if the {receiver} is running or already closed.
TNode<Smi> receiver_continuation =
CAST(LoadObjectField(receiver, JSGeneratorObject::kContinuationOffset));
@@ -111,17 +117,46 @@ void GeneratorBuiltinsAssembler::GeneratorPrototypeResume(
}
}
+void GeneratorBuiltinsAssembler::GeneratorPrototypeResume(
+ CodeStubArguments* args, Node* receiver, Node* value, Node* context,
+ JSGeneratorObject::ResumeMode resume_mode, char const* const method_name) {
+ // Check if the {receiver} is actually a JSGeneratorObject.
+ ThrowIfNotInstanceType(context, receiver, JS_GENERATOR_OBJECT_TYPE,
+ method_name);
+ InnerResume(args, receiver, value, context, resume_mode, method_name);
+}
+
+TF_BUILTIN(AsyncModuleEvaluate, GeneratorBuiltinsAssembler) {
+ const int kValueArg = 0;
+
+ TNode<Int32T> argc =
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
+ CodeStubArguments args(this, argc);
+
+ TNode<Object> receiver = args.GetReceiver();
+ TNode<Object> value = args.GetOptionalArgumentValue(kValueArg);
+ TNode<Context> context = Cast(Parameter(Descriptor::kContext));
+
+ // AsyncModules act like JSAsyncFunctions. Thus we check here
+ // that the {receiver} is a JSAsyncFunction.
+ char const* const method_name = "[AsyncModule].evaluate";
+ ThrowIfNotInstanceType(context, receiver, JS_ASYNC_FUNCTION_OBJECT_TYPE,
+ method_name);
+ InnerResume(&args, receiver, value, context, JSGeneratorObject::kNext,
+ method_name);
+}
+
// ES6 #sec-generator.prototype.next
TF_BUILTIN(GeneratorPrototypeNext, GeneratorBuiltinsAssembler) {
const int kValueArg = 0;
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<Int32T> argc =
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
TNode<Object> receiver = args.GetReceiver();
TNode<Object> value = args.GetOptionalArgumentValue(kValueArg);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
GeneratorPrototypeResume(&args, receiver, value, context,
JSGeneratorObject::kNext,
@@ -132,13 +167,13 @@ TF_BUILTIN(GeneratorPrototypeNext, GeneratorBuiltinsAssembler) {
TF_BUILTIN(GeneratorPrototypeReturn, GeneratorBuiltinsAssembler) {
const int kValueArg = 0;
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<Int32T> argc =
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
TNode<Object> receiver = args.GetReceiver();
TNode<Object> value = args.GetOptionalArgumentValue(kValueArg);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
GeneratorPrototypeResume(&args, receiver, value, context,
JSGeneratorObject::kReturn,
@@ -149,13 +184,13 @@ TF_BUILTIN(GeneratorPrototypeReturn, GeneratorBuiltinsAssembler) {
TF_BUILTIN(GeneratorPrototypeThrow, GeneratorBuiltinsAssembler) {
const int kExceptionArg = 0;
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<Int32T> argc =
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
TNode<Object> receiver = args.GetReceiver();
TNode<Object> exception = args.GetOptionalArgumentValue(kExceptionArg);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
GeneratorPrototypeResume(&args, receiver, exception, context,
JSGeneratorObject::kThrow,
diff --git a/deps/v8/src/builtins/builtins-handler-gen.cc b/deps/v8/src/builtins/builtins-handler-gen.cc
index eae8690f1f..ef912eabf1 100644
--- a/deps/v8/src/builtins/builtins-handler-gen.cc
+++ b/deps/v8/src/builtins/builtins-handler-gen.cc
@@ -48,8 +48,8 @@ TF_BUILTIN(LoadIC_StringLength, CodeStubAssembler) {
}
TF_BUILTIN(LoadIC_StringWrapperLength, CodeStubAssembler) {
- Node* value = Parameter(Descriptor::kReceiver);
- Node* string = LoadJSPrimitiveWrapperValue(value);
+ TNode<JSPrimitiveWrapper> value = CAST(Parameter(Descriptor::kReceiver));
+ TNode<String> string = CAST(LoadJSPrimitiveWrapperValue(value));
Return(LoadStringLengthAsSmi(string));
}
@@ -388,15 +388,6 @@ TF_BUILTIN(StoreFastElementIC_NoTransitionHandleCOW, HandlerBuiltinsAssembler) {
Generate_StoreFastElementIC(STORE_HANDLE_COW);
}
-TF_BUILTIN(LoadGlobalIC_Slow, CodeStubAssembler) {
- Node* name = Parameter(Descriptor::kName);
- Node* slot = Parameter(Descriptor::kSlot);
- Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
-
- TailCallRuntime(Runtime::kLoadGlobalIC_Slow, context, name, slot, vector);
-}
-
TF_BUILTIN(LoadIC_FunctionPrototype, CodeStubAssembler) {
Node* receiver = Parameter(Descriptor::kReceiver);
Node* name = Parameter(Descriptor::kName);
@@ -411,14 +402,6 @@ TF_BUILTIN(LoadIC_FunctionPrototype, CodeStubAssembler) {
TailCallRuntime(Runtime::kLoadIC_Miss, context, receiver, name, slot, vector);
}
-TF_BUILTIN(LoadIC_Slow, CodeStubAssembler) {
- Node* receiver = Parameter(Descriptor::kReceiver);
- Node* name = Parameter(Descriptor::kName);
- Node* context = Parameter(Descriptor::kContext);
-
- TailCallRuntime(Runtime::kGetProperty, context, receiver, name);
-}
-
TF_BUILTIN(StoreGlobalIC_Slow, CodeStubAssembler) {
Node* receiver = Parameter(Descriptor::kReceiver);
Node* name = Parameter(Descriptor::kName);
@@ -491,17 +474,6 @@ TF_BUILTIN(KeyedStoreIC_SloppyArguments_NoTransitionHandleCOW,
Generate_KeyedStoreIC_SloppyArguments();
}
-TF_BUILTIN(StoreInterceptorIC, CodeStubAssembler) {
- Node* receiver = Parameter(Descriptor::kReceiver);
- Node* name = Parameter(Descriptor::kName);
- Node* value = Parameter(Descriptor::kValue);
- Node* slot = Parameter(Descriptor::kSlot);
- Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
- TailCallRuntime(Runtime::kStorePropertyWithInterceptor, context, value, slot,
- vector, receiver, name);
-}
-
TF_BUILTIN(LoadIndexedInterceptorIC, CodeStubAssembler) {
Node* receiver = Parameter(Descriptor::kReceiver);
Node* key = Parameter(Descriptor::kName);
diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc
index 445c8c9517..0625b8affc 100644
--- a/deps/v8/src/builtins/builtins-internal-gen.cc
+++ b/deps/v8/src/builtins/builtins-internal-gen.cc
@@ -18,9 +18,6 @@
namespace v8 {
namespace internal {
-template <typename T>
-using TNode = compiler::TNode<T>;
-
// -----------------------------------------------------------------------------
// Stack checks.
@@ -32,12 +29,14 @@ void Builtins::Generate_StackCheck(MacroAssembler* masm) {
// TurboFan support builtins.
TF_BUILTIN(CopyFastSmiOrObjectElements, CodeStubAssembler) {
- Node* object = Parameter(Descriptor::kObject);
+ TNode<JSObject> js_object = CAST(Parameter(Descriptor::kObject));
// Load the {object}s elements.
- TNode<Object> source = LoadObjectField(object, JSObject::kElementsOffset);
- Node* target = CloneFixedArray(source, ExtractFixedArrayFlag::kFixedArrays);
- StoreObjectField(object, JSObject::kElementsOffset, target);
+ TNode<FixedArrayBase> source =
+ CAST(LoadObjectField(js_object, JSObject::kElementsOffset));
+ TNode<FixedArrayBase> target =
+ CloneFixedArray(source, ExtractFixedArrayFlag::kFixedArrays);
+ StoreObjectField(js_object, JSObject::kElementsOffset, target);
Return(target);
}
@@ -47,7 +46,7 @@ TF_BUILTIN(GrowFastDoubleElements, CodeStubAssembler) {
Node* context = Parameter(Descriptor::kContext);
Label runtime(this, Label::kDeferred);
- Node* elements = LoadElements(object);
+ TNode<FixedArrayBase> elements = LoadElements(object);
elements = TryGrowElementsCapacity(object, elements, PACKED_DOUBLE_ELEMENTS,
key, &runtime);
Return(elements);
@@ -62,7 +61,7 @@ TF_BUILTIN(GrowFastSmiOrObjectElements, CodeStubAssembler) {
Node* context = Parameter(Descriptor::kContext);
Label runtime(this, Label::kDeferred);
- Node* elements = LoadElements(object);
+ TNode<FixedArrayBase> elements = LoadElements(object);
elements =
TryGrowElementsCapacity(object, elements, PACKED_ELEMENTS, key, &runtime);
Return(elements);
@@ -274,25 +273,24 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
return TaggedEqual(remembered_set, SmiConstant(EMIT_REMEMBERED_SET));
}
- void CallCFunction1WithCallerSavedRegistersMode(MachineType return_type,
- MachineType arg0_type,
- Node* function, Node* arg0,
- Node* mode, Label* next) {
+ void CallCFunction2WithCallerSavedRegistersMode(
+ MachineType return_type, MachineType arg0_type, MachineType arg1_type,
+ Node* function, Node* arg0, Node* arg1, Node* mode, Label* next) {
Label dont_save_fp(this), save_fp(this);
Branch(ShouldSkipFPRegs(mode), &dont_save_fp, &save_fp);
BIND(&dont_save_fp);
{
- CallCFunctionWithCallerSavedRegisters(function, return_type,
- kDontSaveFPRegs,
- std::make_pair(arg0_type, arg0));
+ CallCFunctionWithCallerSavedRegisters(
+ function, return_type, kDontSaveFPRegs,
+ std::make_pair(arg0_type, arg0), std::make_pair(arg1_type, arg1));
Goto(next);
}
BIND(&save_fp);
{
- CallCFunctionWithCallerSavedRegisters(function, return_type,
- kSaveFPRegs,
- std::make_pair(arg0_type, arg0));
+ CallCFunctionWithCallerSavedRegisters(function, return_type, kSaveFPRegs,
+ std::make_pair(arg0_type, arg0),
+ std::make_pair(arg1_type, arg1));
Goto(next);
}
}
@@ -321,34 +319,82 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
}
}
- void InsertToStoreBufferAndGoto(Node* isolate, Node* slot, Node* mode,
- Label* next) {
- TNode<ExternalReference> store_buffer_top_addr =
- ExternalConstant(ExternalReference::store_buffer_top(this->isolate()));
- Node* store_buffer_top =
- Load(MachineType::Pointer(), store_buffer_top_addr);
- StoreNoWriteBarrier(MachineType::PointerRepresentation(), store_buffer_top,
- slot);
- TNode<WordT> new_store_buffer_top =
- IntPtrAdd(store_buffer_top, IntPtrConstant(kSystemPointerSize));
- StoreNoWriteBarrier(MachineType::PointerRepresentation(),
- store_buffer_top_addr, new_store_buffer_top);
-
- TNode<WordT> test =
- WordAnd(new_store_buffer_top,
- IntPtrConstant(Heap::store_buffer_mask_constant()));
-
- Label overflow(this);
- Branch(IntPtrEqual(test, IntPtrConstant(0)), &overflow, next);
-
- BIND(&overflow);
- {
- TNode<ExternalReference> function =
- ExternalConstant(ExternalReference::store_buffer_overflow_function());
- CallCFunction1WithCallerSavedRegistersMode(MachineType::Int32(),
- MachineType::Pointer(),
- function, isolate, mode, next);
- }
+ void InsertIntoRememberedSetAndGotoSlow(Node* isolate, TNode<IntPtrT> object,
+ TNode<IntPtrT> slot, Node* mode,
+ Label* next) {
+ TNode<IntPtrT> page = PageFromAddress(object);
+ TNode<ExternalReference> function =
+ ExternalConstant(ExternalReference::insert_remembered_set_function());
+ CallCFunction2WithCallerSavedRegistersMode(
+ MachineType::Int32(), MachineType::Pointer(), MachineType::Pointer(),
+ function, page, slot, mode, next);
+ }
+
+ void InsertIntoRememberedSetAndGoto(Node* isolate, TNode<IntPtrT> object,
+ TNode<IntPtrT> slot, Node* mode,
+ Label* next) {
+ Label slow_path(this);
+ TNode<IntPtrT> page = PageFromAddress(object);
+
+ // Load address of SlotSet
+ TNode<IntPtrT> slot_set_array = LoadSlotSetArray(page, &slow_path);
+ TNode<IntPtrT> slot_offset = IntPtrSub(slot, page);
+
+ // Load bucket
+ TNode<IntPtrT> bucket = LoadBucket(slot_set_array, slot_offset, &slow_path);
+
+ // Update cell
+ SetBitInCell(bucket, slot_offset);
+
+ Goto(next);
+
+ BIND(&slow_path);
+ InsertIntoRememberedSetAndGotoSlow(isolate, object, slot, mode, next);
+ }
+
+ TNode<IntPtrT> LoadSlotSetArray(TNode<IntPtrT> page, Label* slow_path) {
+ TNode<IntPtrT> slot_set_array = UncheckedCast<IntPtrT>(
+ Load(MachineType::Pointer(), page,
+ IntPtrConstant(MemoryChunk::kOldToNewSlotSetOffset)));
+ GotoIf(WordEqual(slot_set_array, IntPtrConstant(0)), slow_path);
+
+ return slot_set_array;
+ }
+
+ TNode<IntPtrT> LoadBucket(TNode<IntPtrT> slot_set_array,
+ TNode<WordT> slot_offset, Label* slow_path) {
+ // Assume here that SlotSet only contains of buckets
+ DCHECK_EQ(SlotSet::kSize, SlotSet::kBuckets * sizeof(SlotSet::Bucket));
+ TNode<WordT> bucket_index =
+ WordShr(slot_offset, SlotSet::kBitsPerBucketLog2 + kTaggedSizeLog2);
+ TNode<IntPtrT> bucket = UncheckedCast<IntPtrT>(
+ Load(MachineType::Pointer(), slot_set_array,
+ WordShl(bucket_index, kSystemPointerSizeLog2)));
+ GotoIf(WordEqual(bucket, IntPtrConstant(0)), slow_path);
+ return bucket;
+ }
+
+ void SetBitInCell(TNode<IntPtrT> bucket, TNode<WordT> slot_offset) {
+ // Load cell value
+ TNode<WordT> cell_offset = WordAnd(
+ WordShr(slot_offset, SlotSet::kBitsPerCellLog2 + kTaggedSizeLog2 -
+ SlotSet::kCellSizeBytesLog2),
+ IntPtrConstant((SlotSet::kCellsPerBucket - 1)
+ << SlotSet::kCellSizeBytesLog2));
+ TNode<IntPtrT> cell_address =
+ UncheckedCast<IntPtrT>(IntPtrAdd(bucket, cell_offset));
+ TNode<IntPtrT> old_cell_value =
+ ChangeInt32ToIntPtr(Load<Int32T>(cell_address));
+
+ // Calculate new cell value
+ TNode<WordT> bit_index = WordAnd(WordShr(slot_offset, kTaggedSizeLog2),
+ IntPtrConstant(SlotSet::kBitsPerCell - 1));
+ TNode<IntPtrT> new_cell_value = UncheckedCast<IntPtrT>(
+ WordOr(old_cell_value, WordShl(IntPtrConstant(1), bit_index)));
+
+ // Update cell value
+ StoreNoWriteBarrier(MachineRepresentation::kWord32, cell_address,
+ TruncateIntPtrToInt32(new_cell_value));
}
};
@@ -399,7 +445,10 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
TNode<ExternalReference> isolate_constant =
ExternalConstant(ExternalReference::isolate_address(isolate()));
Node* fp_mode = Parameter(Descriptor::kFPMode);
- InsertToStoreBufferAndGoto(isolate_constant, slot, fp_mode, &exit);
+ TNode<IntPtrT> object =
+ BitcastTaggedToWord(Parameter(Descriptor::kObject));
+ InsertIntoRememberedSetAndGoto(isolate_constant, object, slot, fp_mode,
+ &exit);
}
BIND(&store_buffer_incremental_wb);
@@ -407,8 +456,10 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
TNode<ExternalReference> isolate_constant =
ExternalConstant(ExternalReference::isolate_address(isolate()));
Node* fp_mode = Parameter(Descriptor::kFPMode);
- InsertToStoreBufferAndGoto(isolate_constant, slot, fp_mode,
- &incremental_wb);
+ TNode<IntPtrT> object =
+ BitcastTaggedToWord(Parameter(Descriptor::kObject));
+ InsertIntoRememberedSetAndGoto(isolate_constant, object, slot, fp_mode,
+ &incremental_wb);
}
}
@@ -532,8 +583,8 @@ TF_BUILTIN(DeleteProperty, DeletePropertyBaseAssembler) {
TNode<Smi> language_mode = CAST(Parameter(Descriptor::kLanguageMode));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- VARIABLE(var_index, MachineType::PointerRepresentation());
- VARIABLE(var_unique, MachineRepresentation::kTagged, key);
+ TVARIABLE(IntPtrT, var_index);
+ TVARIABLE(Name, var_unique);
Label if_index(this), if_unique_name(this), if_notunique(this),
if_notfound(this), slow(this), if_proxy(this);
@@ -554,8 +605,7 @@ TF_BUILTIN(DeleteProperty, DeletePropertyBaseAssembler) {
BIND(&if_unique_name);
{
Comment("key is unique name");
- TNode<Name> unique = CAST(var_unique.value());
- CheckForAssociatedProtector(unique, &slow);
+ CheckForAssociatedProtector(var_unique.value(), &slow);
Label dictionary(this), dont_delete(this);
GotoIf(IsDictionaryMap(receiver_map), &dictionary);
@@ -570,8 +620,8 @@ TF_BUILTIN(DeleteProperty, DeletePropertyBaseAssembler) {
TNode<NameDictionary> properties =
CAST(LoadSlowProperties(CAST(receiver)));
- DeleteDictionaryProperty(receiver, properties, unique, context,
- &dont_delete, &if_notfound);
+ DeleteDictionaryProperty(receiver, properties, var_unique.value(),
+ context, &dont_delete, &if_notfound);
}
BIND(&dont_delete);
@@ -587,7 +637,7 @@ TF_BUILTIN(DeleteProperty, DeletePropertyBaseAssembler) {
{
// If the string was not found in the string table, then no object can
// have a property with that name.
- TryInternalizeString(key, &if_index, &var_index, &if_unique_name,
+ TryInternalizeString(CAST(key), &if_index, &var_index, &if_unique_name,
&var_unique, &if_notfound, &slow);
}
@@ -719,11 +769,11 @@ TF_BUILTIN(SetDataProperties, SetOrCopyDataPropertiesAssembler) {
}
TF_BUILTIN(ForInEnumerate, CodeStubAssembler) {
- Node* receiver = Parameter(Descriptor::kReceiver);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<HeapObject> receiver = CAST(Parameter(Descriptor::kReceiver));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Label if_empty(this), if_runtime(this, Label::kDeferred);
- Node* receiver_map = CheckEnumCache(receiver, &if_empty, &if_runtime);
+ TNode<Map> receiver_map = CheckEnumCache(receiver, &if_empty, &if_runtime);
Return(receiver_map);
BIND(&if_empty);
@@ -934,12 +984,6 @@ void Builtins::Generate_MemCopyUint8Uint8(MacroAssembler* masm) {
}
#endif // !defined(V8_TARGET_ARCH_ARM) && !defined(V8_TARGET_ARCH_MIPS)
-#ifndef V8_TARGET_ARCH_ARM
-void Builtins::Generate_MemCopyUint16Uint8(MacroAssembler* masm) {
- masm->Call(BUILTIN_CODE(masm->isolate(), Illegal), RelocInfo::CODE_TARGET);
-}
-#endif // V8_TARGET_ARCH_ARM
-
#ifndef V8_TARGET_ARCH_IA32
void Builtins::Generate_MemMove(MacroAssembler* masm) {
masm->Call(BUILTIN_CODE(masm->isolate(), Illegal), RelocInfo::CODE_TARGET);
diff --git a/deps/v8/src/builtins/builtins-intl-gen.cc b/deps/v8/src/builtins/builtins-intl-gen.cc
index 1a9a3b7fd9..2330553721 100644
--- a/deps/v8/src/builtins/builtins-intl-gen.cc
+++ b/deps/v8/src/builtins/builtins-intl-gen.cc
@@ -17,9 +17,6 @@
namespace v8 {
namespace internal {
-template <class T>
-using TNode = compiler::TNode<T>;
-
class IntlBuiltinsAssembler : public CodeStubAssembler {
public:
explicit IntlBuiltinsAssembler(compiler::CodeAssemblerState* state)
@@ -30,6 +27,16 @@ class IntlBuiltinsAssembler : public CodeStubAssembler {
const char* method_name);
TNode<JSArray> AllocateEmptyJSArray(TNode<Context> context);
+
+ TNode<IntPtrT> PointerToSeqStringData(TNode<String> seq_string) {
+ CSA_ASSERT(this,
+ IsSequentialStringInstanceType(LoadInstanceType(seq_string)));
+ STATIC_ASSERT(SeqOneByteString::kHeaderSize ==
+ SeqTwoByteString::kHeaderSize);
+ return IntPtrAdd(
+ BitcastTaggedToWord(seq_string),
+ IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ }
};
TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) {
@@ -61,35 +68,35 @@ TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) {
&call_c);
{
- Node* const dst_ptr = PointerToSeqStringData(dst);
- VARIABLE(var_cursor, MachineType::PointerRepresentation(),
- IntPtrConstant(0));
+ const TNode<IntPtrT> dst_ptr = PointerToSeqStringData(dst);
+ TVARIABLE(IntPtrT, var_cursor, IntPtrConstant(0));
- TNode<RawPtrT> const start_address = to_direct.PointerToData(&call_c);
+ TNode<IntPtrT> const start_address =
+ ReinterpretCast<IntPtrT>(to_direct.PointerToData(&call_c));
TNode<IntPtrT> const end_address =
Signed(IntPtrAdd(start_address, ChangeUint32ToWord(length)));
TNode<ExternalReference> const to_lower_table_addr =
ExternalConstant(ExternalReference::intl_to_latin1_lower_table());
- VARIABLE(var_did_change, MachineRepresentation::kWord32, Int32Constant(0));
+ TVARIABLE(Word32T, var_did_change, Int32Constant(0));
VariableList push_vars({&var_cursor, &var_did_change}, zone());
- BuildFastLoop(
+ BuildFastLoop<IntPtrT>(
push_vars, start_address, end_address,
- [=, &var_cursor, &var_did_change](Node* current) {
+ [&](TNode<IntPtrT> current) {
TNode<Uint8T> c = Load<Uint8T>(current);
TNode<Uint8T> lower =
Load<Uint8T>(to_lower_table_addr, ChangeInt32ToIntPtr(c));
StoreNoWriteBarrier(MachineRepresentation::kWord8, dst_ptr,
var_cursor.value(), lower);
- var_did_change.Bind(
- Word32Or(Word32NotEqual(c, lower), var_did_change.value()));
+ var_did_change =
+ Word32Or(Word32NotEqual(c, lower), var_did_change.value());
Increment(&var_cursor);
},
- kCharSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+ kCharSize, IndexAdvanceMode::kPost);
// Return the original string if it remained unchanged in order to preserve
// e.g. internalization and private symbols (such as the preserved object
@@ -110,9 +117,9 @@ TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) {
MachineType type_tagged = MachineType::AnyTagged();
- Node* const result = CallCFunction(function_addr, type_tagged,
- std::make_pair(type_tagged, src),
- std::make_pair(type_tagged, dst));
+ const TNode<String> result = CAST(CallCFunction(
+ function_addr, type_tagged, std::make_pair(type_tagged, src),
+ std::make_pair(type_tagged, dst)));
Return(result);
}
@@ -142,7 +149,7 @@ void IntlBuiltinsAssembler::ListFormatCommon(TNode<Context> context,
TNode<Int32T> argc,
Runtime::FunctionId format_func_id,
const char* method_name) {
- CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
+ CodeStubArguments args(this, argc);
// Label has_list(this);
// 1. Let lf be this value.
@@ -151,32 +158,18 @@ void IntlBuiltinsAssembler::ListFormatCommon(TNode<Context> context,
// 3. If lf does not have an [[InitializedListFormat]] internal slot, throw a
// TypeError exception.
- ThrowIfNotInstanceType(context, receiver, JS_INTL_LIST_FORMAT_TYPE,
- method_name);
+ ThrowIfNotInstanceType(context, receiver, JS_LIST_FORMAT_TYPE, method_name);
TNode<JSListFormat> list_format = CAST(receiver);
- // 4. If list is not provided or is undefined, then
TNode<Object> list = args.GetOptionalArgumentValue(0);
- Label has_list(this);
- {
- GotoIfNot(IsUndefined(list), &has_list);
- if (format_func_id == Runtime::kFormatList) {
- // a. Return an empty String.
- args.PopAndReturn(EmptyStringConstant());
- } else {
- DCHECK_EQ(format_func_id, Runtime::kFormatListToParts);
- // a. Return an empty Array.
- args.PopAndReturn(AllocateEmptyJSArray(context));
- }
- }
- BIND(&has_list);
{
- // 5. Let x be ? IterableToList(list).
- TNode<Object> x =
- CallBuiltin(Builtins::kIterableToListWithSymbolLookup, context, list);
+ // 4. Let stringList be ? StringListFromIterable(list).
+ TNode<Object> string_list =
+ CallBuiltin(Builtins::kStringListFromIterable, context, list);
- // 6. Return ? FormatList(lf, x).
- args.PopAndReturn(CallRuntime(format_func_id, context, list_format, x));
+ // 6. Return ? FormatList(lf, stringList).
+ args.PopAndReturn(
+ CallRuntime(format_func_id, context, list_format, string_list));
}
}
diff --git a/deps/v8/src/builtins/builtins-intl.cc b/deps/v8/src/builtins/builtins-intl.cc
index ff8e96f4f5..81954a481f 100644
--- a/deps/v8/src/builtins/builtins-intl.cc
+++ b/deps/v8/src/builtins/builtins-intl.cc
@@ -83,13 +83,8 @@ BUILTIN(NumberFormatPrototypeFormatToParts) {
Handle<Object> x;
if (args.length() >= 2) {
- if (FLAG_harmony_intl_bigint) {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, x, Object::ToNumeric(isolate, args.at(1)));
- } else {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, x,
- Object::ToNumber(isolate, args.at(1)));
- }
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, x,
+ Object::ToNumeric(isolate, args.at(1)));
} else {
x = isolate->factory()->nan_value();
}
@@ -282,8 +277,8 @@ Object LegacyFormatConstructor(BuiltinArguments args, Isolate* isolate,
// 3. Perform ? Initialize<T>(Format, locales, options).
Handle<T> format;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, format,
- T::New(isolate, map, locales, options));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, format, T::New(isolate, map, locales, options, method));
// 4. Let this be the this value.
Handle<Object> receiver = args.receiver();
@@ -367,7 +362,8 @@ Object DisallowCallConstructor(BuiltinArguments args, Isolate* isolate,
* Common code shared by Collator and V8BreakIterator
*/
template <class T>
-Object CallOrConstructConstructor(BuiltinArguments args, Isolate* isolate) {
+Object CallOrConstructConstructor(BuiltinArguments args, Isolate* isolate,
+ const char* method) {
Handle<JSReceiver> new_target;
if (args.new_target()->IsUndefined(isolate)) {
@@ -386,7 +382,8 @@ Object CallOrConstructConstructor(BuiltinArguments args, Isolate* isolate) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, map, JSFunction::GetDerivedMap(isolate, target, new_target));
- RETURN_RESULT_OR_FAILURE(isolate, T::New(isolate, map, locales, options));
+ RETURN_RESULT_OR_FAILURE(isolate,
+ T::New(isolate, map, locales, options, method));
}
} // namespace
@@ -466,13 +463,8 @@ BUILTIN(NumberFormatInternalFormatNumber) {
// 4. Let x be ? ToNumeric(value).
Handle<Object> numeric_obj;
- if (FLAG_harmony_intl_bigint) {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, numeric_obj,
- Object::ToNumeric(isolate, value));
- } else {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, numeric_obj,
- Object::ToNumber(isolate, value));
- }
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, numeric_obj,
+ Object::ToNumeric(isolate, value));
icu::number::LocalizedNumberFormatter* icu_localized_number_formatter =
number_format->icu_number_formatter().raw();
@@ -884,7 +876,7 @@ BUILTIN(CollatorConstructor) {
isolate->CountUsage(v8::Isolate::UseCounterFeature::kCollator);
- return CallOrConstructConstructor<JSCollator>(args, isolate);
+ return CallOrConstructConstructor<JSCollator>(args, isolate, "Intl.Collator");
}
BUILTIN(CollatorPrototypeResolvedOptions) {
@@ -1069,7 +1061,8 @@ BUILTIN(SegmenterPrototypeSegment) {
BUILTIN(V8BreakIteratorConstructor) {
HandleScope scope(isolate);
- return CallOrConstructConstructor<JSV8BreakIterator>(args, isolate);
+ return CallOrConstructConstructor<JSV8BreakIterator>(args, isolate,
+ "Intl.v8BreakIterator");
}
BUILTIN(V8BreakIteratorPrototypeResolvedOptions) {
diff --git a/deps/v8/src/builtins/builtins-iterator-gen.cc b/deps/v8/src/builtins/builtins-iterator-gen.cc
index 7bd5acfdcd..2f8761902b 100644
--- a/deps/v8/src/builtins/builtins-iterator-gen.cc
+++ b/deps/v8/src/builtins/builtins-iterator-gen.cc
@@ -241,6 +241,104 @@ TF_BUILTIN(IterableToList, IteratorBuiltinsAssembler) {
Return(IterableToList(context, iterable, iterator_fn));
}
+TF_BUILTIN(IterableToFixedArrayForWasm, IteratorBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> iterable = CAST(Parameter(Descriptor::kIterable));
+ TNode<Smi> expected_length = CAST(Parameter(Descriptor::kExpectedLength));
+
+ TNode<Object> iterator_fn = GetIteratorMethod(context, iterable);
+
+ IteratorRecord iterator_record = GetIterator(context, iterable, iterator_fn);
+
+ GrowableFixedArray values(state());
+
+ Variable* vars[] = {values.var_array(), values.var_length(),
+ values.var_capacity()};
+ Label loop_start(this, 3, vars), compare_length(this), done(this);
+ Goto(&loop_start);
+ BIND(&loop_start);
+ {
+ TNode<JSReceiver> next =
+ IteratorStep(context, iterator_record, &compare_length);
+ TNode<Object> next_value = IteratorValue(context, next);
+ values.Push(next_value);
+ Goto(&loop_start);
+ }
+
+ BIND(&compare_length);
+ GotoIf(WordEqual(SmiUntag(expected_length), values.var_length()->value()),
+ &done);
+ Return(CallRuntime(
+ Runtime::kThrowTypeError, context,
+ SmiConstant(MessageTemplate::kWasmTrapMultiReturnLengthMismatch)));
+
+ BIND(&done);
+ Return(values.var_array()->value());
+}
+
+TNode<JSArray> IteratorBuiltinsAssembler::StringListFromIterable(
+ TNode<Context> context, TNode<Object> iterable) {
+ Label done(this);
+ GrowableFixedArray list(state());
+ // 1. If iterable is undefined, then
+ // a. Return a new empty List.
+ GotoIf(IsUndefined(iterable), &done);
+
+ // 2. Let iteratorRecord be ? GetIterator(items).
+ IteratorRecord iterator_record = GetIterator(context, iterable);
+
+ // 3. Let list be a new empty List.
+
+ Variable* vars[] = {list.var_array(), list.var_length(), list.var_capacity()};
+ Label loop_start(this, 3, vars);
+ Goto(&loop_start);
+ // 4. Let next be true.
+ // 5. Repeat, while next is not false
+ Label if_isnotstringtype(this, Label::kDeferred),
+ if_exception(this, Label::kDeferred);
+ BIND(&loop_start);
+ {
+ // a. Set next to ? IteratorStep(iteratorRecord).
+ TNode<JSReceiver> next = IteratorStep(context, iterator_record, &done);
+ // b. If next is not false, then
+ // i. Let nextValue be ? IteratorValue(next).
+ TNode<Object> next_value = IteratorValue(context, next);
+ // ii. If Type(nextValue) is not String, then
+ GotoIf(TaggedIsSmi(next_value), &if_isnotstringtype);
+ TNode<Uint16T> next_value_type = LoadInstanceType(CAST(next_value));
+ GotoIfNot(IsStringInstanceType(next_value_type), &if_isnotstringtype);
+ // iii. Append nextValue to the end of the List list.
+ list.Push(next_value);
+ Goto(&loop_start);
+ // 5.b.ii
+ BIND(&if_isnotstringtype);
+ {
+ // 1. Let error be ThrowCompletion(a newly created TypeError object).
+ TVARIABLE(Object, var_exception);
+ TNode<Object> ret = CallRuntime(
+ Runtime::kThrowTypeError, context,
+ SmiConstant(MessageTemplate::kIterableYieldedNonString), next_value);
+ GotoIfException(ret, &if_exception, &var_exception);
+ Unreachable();
+
+ // 2. Return ? IteratorClose(iteratorRecord, error).
+ BIND(&if_exception);
+ IteratorCloseOnException(context, iterator_record, var_exception.value());
+ }
+ }
+
+ BIND(&done);
+ // 6. Return list.
+ return list.ToJSArray(context);
+}
+
+TF_BUILTIN(StringListFromIterable, IteratorBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> iterable = CAST(Parameter(Descriptor::kIterable));
+
+ Return(StringListFromIterable(context, iterable));
+}
+
// This builtin always returns a new JSArray and is thus safe to use even in the
// presence of code that may call back into user-JS. This builtin will take the
// fast path if the iterable is a fast array and the Array prototype and the
@@ -354,5 +452,19 @@ TF_BUILTIN(IterableToListWithSymbolLookup, IteratorBuiltinsAssembler) {
}
}
+TF_BUILTIN(GetIteratorWithFeedbackLazyDeoptContinuation,
+ IteratorBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ TNode<Smi> callSlot = CAST(Parameter(Descriptor::kCallSlot));
+ TNode<FeedbackVector> feedback = CAST(Parameter(Descriptor::kFeedback));
+ TNode<Object> iteratorMethod = CAST(Parameter(Descriptor::kResult));
+
+ TNode<Object> result =
+ CallBuiltin(Builtins::kCallIteratorWithFeedback, context, receiver,
+ iteratorMethod, callSlot, feedback);
+ Return(result);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-iterator-gen.h b/deps/v8/src/builtins/builtins-iterator-gen.h
index 2a0a510f73..7d6e7d5b81 100644
--- a/deps/v8/src/builtins/builtins-iterator-gen.h
+++ b/deps/v8/src/builtins/builtins-iterator-gen.h
@@ -68,6 +68,11 @@ class IteratorBuiltinsAssembler : public CodeStubAssembler {
TNode<JSArray> IterableToList(TNode<Context> context, TNode<Object> iterable,
TNode<Object> iterator_fn);
+ // Currently at https://tc39.github.io/proposal-intl-list-format/
+ // #sec-createstringlistfromiterable
+ TNode<JSArray> StringListFromIterable(TNode<Context> context,
+ TNode<Object> iterable);
+
void FastIterableToList(TNode<Context> context, TNode<Object> iterable,
TVariable<Object>* var_result, Label* slow);
};
diff --git a/deps/v8/src/builtins/builtins-math-gen.cc b/deps/v8/src/builtins/builtins-math-gen.cc
index 42d0162f38..3bae7c06c3 100644
--- a/deps/v8/src/builtins/builtins-math-gen.cc
+++ b/deps/v8/src/builtins/builtins-math-gen.cc
@@ -143,20 +143,18 @@ void MathBuiltinsAssembler::MathRoundingOperation(
}
void MathBuiltinsAssembler::MathMaxMin(
- Node* context, Node* argc,
+ TNode<Context> context, TNode<Int32T> argc,
TNode<Float64T> (CodeStubAssembler::*float64op)(SloppyTNode<Float64T>,
SloppyTNode<Float64T>),
double default_val) {
- CodeStubArguments arguments(this, ChangeInt32ToIntPtr(argc));
- argc = arguments.GetLength(INTPTR_PARAMETERS);
+ CodeStubArguments arguments(this, argc);
- VARIABLE(result, MachineRepresentation::kFloat64);
- result.Bind(Float64Constant(default_val));
+ TVARIABLE(Float64T, result, Float64Constant(default_val));
CodeStubAssembler::VariableList vars({&result}, zone());
- arguments.ForEach(vars, [=, &result](Node* arg) {
- Node* float_value = TruncateTaggedToFloat64(context, arg);
- result.Bind((this->*float64op)(result.value(), float_value));
+ arguments.ForEach(vars, [&](TNode<Object> arg) {
+ TNode<Float64T> float_value = TruncateTaggedToFloat64(context, arg);
+ result = (this->*float64op)(result.value(), float_value);
});
arguments.PopAndReturn(ChangeFloat64ToTagged(result.value()));
@@ -181,8 +179,8 @@ TF_BUILTIN(MathImul, CodeStubAssembler) {
Node* context = Parameter(Descriptor::kContext);
Node* x = Parameter(Descriptor::kX);
Node* y = Parameter(Descriptor::kY);
- Node* x_value = TruncateTaggedToWord32(context, x);
- Node* y_value = TruncateTaggedToWord32(context, y);
+ TNode<Word32T> x_value = TruncateTaggedToWord32(context, x);
+ TNode<Word32T> y_value = TruncateTaggedToWord32(context, y);
TNode<Int32T> value = Signed(Int32Mul(x_value, y_value));
TNode<Number> result = ChangeInt32ToTagged(value);
Return(result);
@@ -191,8 +189,8 @@ TF_BUILTIN(MathImul, CodeStubAssembler) {
CodeStubAssembler::Node* MathBuiltinsAssembler::MathPow(Node* context,
Node* base,
Node* exponent) {
- Node* base_value = TruncateTaggedToFloat64(context, base);
- Node* exponent_value = TruncateTaggedToFloat64(context, exponent);
+ TNode<Float64T> base_value = TruncateTaggedToFloat64(context, base);
+ TNode<Float64T> exponent_value = TruncateTaggedToFloat64(context, exponent);
TNode<Float64T> value = Float64Pow(base_value, exponent_value);
return ChangeFloat64ToTagged(value);
}
@@ -260,19 +258,17 @@ TF_BUILTIN(MathTrunc, MathBuiltinsAssembler) {
// ES6 #sec-math.max
TF_BUILTIN(MathMax, MathBuiltinsAssembler) {
- // TODO(ishell): use constants from Descriptor once the JSFunction linkage
- // arguments are reordered.
- Node* context = Parameter(Descriptor::kContext);
- Node* argc = Parameter(Descriptor::kJSActualArgumentsCount);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Int32T> argc =
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
MathMaxMin(context, argc, &CodeStubAssembler::Float64Max, -1.0 * V8_INFINITY);
}
// ES6 #sec-math.min
TF_BUILTIN(MathMin, MathBuiltinsAssembler) {
- // TODO(ishell): use constants from Descriptor once the JSFunction linkage
- // arguments are reordered.
- Node* context = Parameter(Descriptor::kContext);
- Node* argc = Parameter(Descriptor::kJSActualArgumentsCount);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Int32T> argc =
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
MathMaxMin(context, argc, &CodeStubAssembler::Float64Min, V8_INFINITY);
}
diff --git a/deps/v8/src/builtins/builtins-math-gen.h b/deps/v8/src/builtins/builtins-math-gen.h
index 4bb76d9692..4de654fa20 100644
--- a/deps/v8/src/builtins/builtins-math-gen.h
+++ b/deps/v8/src/builtins/builtins-math-gen.h
@@ -21,7 +21,7 @@ class MathBuiltinsAssembler : public CodeStubAssembler {
void MathRoundingOperation(
Node* context, Node* x,
TNode<Float64T> (CodeStubAssembler::*float64op)(SloppyTNode<Float64T>));
- void MathMaxMin(Node* context, Node* argc,
+ void MathMaxMin(TNode<Context> context, TNode<Int32T> argc,
TNode<Float64T> (CodeStubAssembler::*float64op)(
SloppyTNode<Float64T>, SloppyTNode<Float64T>),
double default_val);
diff --git a/deps/v8/src/builtins/builtins-microtask-queue-gen.cc b/deps/v8/src/builtins/builtins-microtask-queue-gen.cc
index 427fd6edb6..62aee3b300 100644
--- a/deps/v8/src/builtins/builtins-microtask-queue-gen.cc
+++ b/deps/v8/src/builtins/builtins-microtask-queue-gen.cc
@@ -14,9 +14,6 @@
namespace v8 {
namespace internal {
-template <typename T>
-using TNode = compiler::TNode<T>;
-
class MicrotaskQueueBuiltinsAssembler : public CodeStubAssembler {
public:
explicit MicrotaskQueueBuiltinsAssembler(compiler::CodeAssemblerState* state)
@@ -60,23 +57,20 @@ TNode<RawPtrT> MicrotaskQueueBuiltinsAssembler::GetMicrotaskQueue(
TNode<RawPtrT> MicrotaskQueueBuiltinsAssembler::GetMicrotaskRingBuffer(
TNode<RawPtrT> microtask_queue) {
- return UncheckedCast<RawPtrT>(
- Load(MachineType::Pointer(), microtask_queue,
- IntPtrConstant(MicrotaskQueue::kRingBufferOffset)));
+ return Load<RawPtrT>(microtask_queue,
+ IntPtrConstant(MicrotaskQueue::kRingBufferOffset));
}
TNode<IntPtrT> MicrotaskQueueBuiltinsAssembler::GetMicrotaskQueueCapacity(
TNode<RawPtrT> microtask_queue) {
- return UncheckedCast<IntPtrT>(
- Load(MachineType::IntPtr(), microtask_queue,
- IntPtrConstant(MicrotaskQueue::kCapacityOffset)));
+ return Load<IntPtrT>(microtask_queue,
+ IntPtrConstant(MicrotaskQueue::kCapacityOffset));
}
TNode<IntPtrT> MicrotaskQueueBuiltinsAssembler::GetMicrotaskQueueSize(
TNode<RawPtrT> microtask_queue) {
- return UncheckedCast<IntPtrT>(
- Load(MachineType::IntPtr(), microtask_queue,
- IntPtrConstant(MicrotaskQueue::kSizeOffset)));
+ return Load<IntPtrT>(microtask_queue,
+ IntPtrConstant(MicrotaskQueue::kSizeOffset));
}
void MicrotaskQueueBuiltinsAssembler::SetMicrotaskQueueSize(
@@ -87,9 +81,8 @@ void MicrotaskQueueBuiltinsAssembler::SetMicrotaskQueueSize(
TNode<IntPtrT> MicrotaskQueueBuiltinsAssembler::GetMicrotaskQueueStart(
TNode<RawPtrT> microtask_queue) {
- return UncheckedCast<IntPtrT>(
- Load(MachineType::IntPtr(), microtask_queue,
- IntPtrConstant(MicrotaskQueue::kStartOffset)));
+ return Load<IntPtrT>(microtask_queue,
+ IntPtrConstant(MicrotaskQueue::kStartOffset));
}
void MicrotaskQueueBuiltinsAssembler::SetMicrotaskQueueStart(
@@ -125,7 +118,7 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
TNode<Map> microtask_map = LoadMap(microtask);
TNode<Uint16T> microtask_type = LoadMapInstanceType(microtask_map);
- VARIABLE(var_exception, MachineRepresentation::kTagged, TheHoleConstant());
+ TVARIABLE(HeapObject, var_exception, TheHoleConstant());
Label if_exception(this, Label::kDeferred);
Label is_callable(this), is_callback(this),
is_promise_fulfill_reaction_job(this),
@@ -295,9 +288,9 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
void MicrotaskQueueBuiltinsAssembler::IncrementFinishedMicrotaskCount(
TNode<RawPtrT> microtask_queue) {
- TNode<IntPtrT> count = UncheckedCast<IntPtrT>(
- Load(MachineType::IntPtr(), microtask_queue,
- IntPtrConstant(MicrotaskQueue::kFinishedMicrotaskCountOffset)));
+ TNode<IntPtrT> count = Load<IntPtrT>(
+ microtask_queue,
+ IntPtrConstant(MicrotaskQueue::kFinishedMicrotaskCountOffset));
TNode<IntPtrT> new_count = IntPtrAdd(count, IntPtrConstant(1));
StoreNoWriteBarrier(
MachineType::PointerRepresentation(), microtask_queue,
@@ -306,6 +299,8 @@ void MicrotaskQueueBuiltinsAssembler::IncrementFinishedMicrotaskCount(
TNode<Context> MicrotaskQueueBuiltinsAssembler::GetCurrentContext() {
auto ref = ExternalReference::Create(kContextAddress, isolate());
+ // TODO(delphick): Add a checked cast. For now this is not possible as context
+ // can actually be Smi(0).
return TNode<Context>::UncheckedCast(LoadFullTagged(ExternalConstant(ref)));
}
@@ -317,15 +312,13 @@ void MicrotaskQueueBuiltinsAssembler::SetCurrentContext(
TNode<IntPtrT> MicrotaskQueueBuiltinsAssembler::GetEnteredContextCount() {
auto ref = ExternalReference::handle_scope_implementer_address(isolate());
- Node* hsi = Load(MachineType::Pointer(), ExternalConstant(ref));
+ TNode<RawPtrT> hsi = Load<RawPtrT>(ExternalConstant(ref));
using ContextStack = DetachableVector<Context>;
TNode<IntPtrT> size_offset =
IntPtrConstant(HandleScopeImplementer::kEnteredContextsOffset +
ContextStack::kSizeOffset);
- TNode<IntPtrT> size =
- UncheckedCast<IntPtrT>(Load(MachineType::IntPtr(), hsi, size_offset));
- return size;
+ return Load<IntPtrT>(hsi, size_offset);
}
void MicrotaskQueueBuiltinsAssembler::EnterMicrotaskContext(
@@ -333,7 +326,7 @@ void MicrotaskQueueBuiltinsAssembler::EnterMicrotaskContext(
CSA_ASSERT(this, IsNativeContext(native_context));
auto ref = ExternalReference::handle_scope_implementer_address(isolate());
- Node* hsi = Load(MachineType::Pointer(), ExternalConstant(ref));
+ TNode<RawPtrT> hsi = Load<RawPtrT>(ExternalConstant(ref));
using ContextStack = DetachableVector<Context>;
TNode<IntPtrT> capacity_offset =
@@ -343,10 +336,8 @@ void MicrotaskQueueBuiltinsAssembler::EnterMicrotaskContext(
IntPtrConstant(HandleScopeImplementer::kEnteredContextsOffset +
ContextStack::kSizeOffset);
- TNode<IntPtrT> capacity =
- UncheckedCast<IntPtrT>(Load(MachineType::IntPtr(), hsi, capacity_offset));
- TNode<IntPtrT> size =
- UncheckedCast<IntPtrT>(Load(MachineType::IntPtr(), hsi, size_offset));
+ TNode<IntPtrT> capacity = Load<IntPtrT>(hsi, capacity_offset);
+ TNode<IntPtrT> size = Load<IntPtrT>(hsi, size_offset);
Label if_append(this), if_grow(this, Label::kDeferred), done(this);
Branch(WordEqual(size, capacity), &if_grow, &if_append);
@@ -355,7 +346,7 @@ void MicrotaskQueueBuiltinsAssembler::EnterMicrotaskContext(
TNode<IntPtrT> data_offset =
IntPtrConstant(HandleScopeImplementer::kEnteredContextsOffset +
ContextStack::kDataOffset);
- Node* data = Load(MachineType::Pointer(), hsi, data_offset);
+ TNode<RawPtrT> data = Load<RawPtrT>(hsi, data_offset);
StoreFullTaggedNoWriteBarrier(data, TimesSystemPointerSize(size),
native_context);
@@ -367,7 +358,7 @@ void MicrotaskQueueBuiltinsAssembler::EnterMicrotaskContext(
TNode<IntPtrT> flag_data_offset =
IntPtrConstant(HandleScopeImplementer::kIsMicrotaskContextOffset +
FlagStack::kDataOffset);
- Node* flag_data = Load(MachineType::Pointer(), hsi, flag_data_offset);
+ TNode<RawPtrT> flag_data = Load<RawPtrT>(hsi, flag_data_offset);
StoreNoWriteBarrier(MachineRepresentation::kWord8, flag_data, size,
BoolConstant(true));
StoreNoWriteBarrier(
@@ -396,7 +387,7 @@ void MicrotaskQueueBuiltinsAssembler::EnterMicrotaskContext(
void MicrotaskQueueBuiltinsAssembler::RewindEnteredContext(
TNode<IntPtrT> saved_entered_context_count) {
auto ref = ExternalReference::handle_scope_implementer_address(isolate());
- Node* hsi = Load(MachineType::Pointer(), ExternalConstant(ref));
+ TNode<RawPtrT> hsi = Load<RawPtrT>(ExternalConstant(ref));
using ContextStack = DetachableVector<Context>;
TNode<IntPtrT> size_offset =
@@ -404,8 +395,7 @@ void MicrotaskQueueBuiltinsAssembler::RewindEnteredContext(
ContextStack::kSizeOffset);
#ifdef ENABLE_VERIFY_CSA
- TNode<IntPtrT> size =
- UncheckedCast<IntPtrT>(Load(MachineType::IntPtr(), hsi, size_offset));
+ TNode<IntPtrT> size = Load<IntPtrT>(hsi, size_offset);
CSA_ASSERT(this, IntPtrLessThan(IntPtrConstant(0), size));
CSA_ASSERT(this, IntPtrLessThanOrEqual(saved_entered_context_count, size));
#endif
@@ -446,8 +436,7 @@ void MicrotaskQueueBuiltinsAssembler::RunPromiseHook(
}
TF_BUILTIN(EnqueueMicrotask, MicrotaskQueueBuiltinsAssembler) {
- TNode<Microtask> microtask =
- UncheckedCast<Microtask>(Parameter(Descriptor::kMicrotask));
+ TNode<Microtask> microtask = CAST(Parameter(Descriptor::kMicrotask));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<NativeContext> native_context = LoadNativeContext(context);
TNode<RawPtrT> microtask_queue = GetMicrotaskQueue(native_context);
@@ -517,8 +506,7 @@ TF_BUILTIN(RunMicrotasks, MicrotaskQueueBuiltinsAssembler) {
TNode<IntPtrT> offset =
CalculateRingBufferOffset(capacity, start, IntPtrConstant(0));
- TNode<RawPtrT> microtask_pointer =
- UncheckedCast<RawPtrT>(Load(MachineType::Pointer(), ring_buffer, offset));
+ TNode<RawPtrT> microtask_pointer = Load<RawPtrT>(ring_buffer, offset);
TNode<Microtask> microtask = CAST(BitcastWordToTagged(microtask_pointer));
TNode<IntPtrT> new_size = IntPtrSub(size, IntPtrConstant(1));
diff --git a/deps/v8/src/builtins/builtins-number-gen.cc b/deps/v8/src/builtins/builtins-number-gen.cc
index 2aa996eba0..fc737b793b 100644
--- a/deps/v8/src/builtins/builtins-number-gen.cc
+++ b/deps/v8/src/builtins/builtins-number-gen.cc
@@ -22,57 +22,58 @@ class NumberBuiltinsAssembler : public CodeStubAssembler {
protected:
template <typename Descriptor>
void EmitBitwiseOp(Operation op) {
- Node* left = Parameter(Descriptor::kLeft);
- Node* right = Parameter(Descriptor::kRight);
- Node* context = Parameter(Descriptor::kContext);
-
- VARIABLE(var_left_word32, MachineRepresentation::kWord32);
- VARIABLE(var_right_word32, MachineRepresentation::kWord32);
- VARIABLE(var_left_bigint, MachineRepresentation::kTagged, left);
- VARIABLE(var_right_bigint, MachineRepresentation::kTagged);
+ TNode<Object> left = CAST(Parameter(Descriptor::kLeft));
+ TNode<Object> right = CAST(Parameter(Descriptor::kRight));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+
+ TVARIABLE(Word32T, var_left_word32);
+ TVARIABLE(Word32T, var_right_word32);
+ TVARIABLE(Object, var_left_maybe_bigint, left);
+ TVARIABLE(Object, var_right_maybe_bigint);
Label if_left_number(this), do_number_op(this);
Label if_left_bigint(this), do_bigint_op(this);
TaggedToWord32OrBigInt(context, left, &if_left_number, &var_left_word32,
- &if_left_bigint, &var_left_bigint);
+ &if_left_bigint, &var_left_maybe_bigint);
BIND(&if_left_number);
TaggedToWord32OrBigInt(context, right, &do_number_op, &var_right_word32,
- &do_bigint_op, &var_right_bigint);
+ &do_bigint_op, &var_right_maybe_bigint);
BIND(&do_number_op);
Return(BitwiseOp(var_left_word32.value(), var_right_word32.value(), op));
// BigInt cases.
BIND(&if_left_bigint);
- TaggedToNumeric(context, right, &do_bigint_op, &var_right_bigint);
+ TaggedToNumeric(context, right, &do_bigint_op, &var_right_maybe_bigint);
BIND(&do_bigint_op);
Return(CallRuntime(Runtime::kBigIntBinaryOp, context,
- var_left_bigint.value(), var_right_bigint.value(),
- SmiConstant(op)));
+ var_left_maybe_bigint.value(),
+ var_right_maybe_bigint.value(), SmiConstant(op)));
}
template <typename Descriptor>
void RelationalComparisonBuiltin(Operation op) {
- Node* lhs = Parameter(Descriptor::kLeft);
- Node* rhs = Parameter(Descriptor::kRight);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Object> lhs = CAST(Parameter(Descriptor::kLeft));
+ TNode<Object> rhs = CAST(Parameter(Descriptor::kRight));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Return(RelationalComparison(op, lhs, rhs, context));
}
template <typename Descriptor>
- void UnaryOp(Variable* var_input, Label* do_smi, Label* do_double,
- Variable* var_input_double, Label* do_bigint);
+ void UnaryOp(TVariable<Object>* var_input, Label* do_smi, Label* do_double,
+ TVariable<Float64T>* var_input_double, Label* do_bigint);
template <typename Descriptor>
- void BinaryOp(Label* smis, Variable* var_left, Variable* var_right,
- Label* doubles, Variable* var_left_double,
- Variable* var_right_double, Label* bigints);
+ void BinaryOp(Label* smis, TVariable<Object>* var_left,
+ TVariable<Object>* var_right, Label* doubles,
+ TVariable<Float64T>* var_left_double,
+ TVariable<Float64T>* var_right_double, Label* bigints);
};
// ES6 #sec-number.isfinite
TF_BUILTIN(NumberIsFinite, CodeStubAssembler) {
- Node* number = Parameter(Descriptor::kNumber);
+ TNode<Object> number = CAST(Parameter(Descriptor::kNumber));
Label return_true(this), return_false(this);
@@ -80,10 +81,11 @@ TF_BUILTIN(NumberIsFinite, CodeStubAssembler) {
GotoIf(TaggedIsSmi(number), &return_true);
// Check if {number} is a HeapNumber.
- GotoIfNot(IsHeapNumber(number), &return_false);
+ TNode<HeapObject> number_heap_object = CAST(number);
+ GotoIfNot(IsHeapNumber(number_heap_object), &return_false);
// Check if {number} contains a finite, non-NaN value.
- TNode<Float64T> number_value = LoadHeapNumberValue(number);
+ TNode<Float64T> number_value = LoadHeapNumberValue(number_heap_object);
BranchIfFloat64IsNaN(Float64Sub(number_value, number_value), &return_false,
&return_true);
@@ -107,7 +109,7 @@ TF_BUILTIN(NumberIsInteger, CodeStubAssembler) {
// ES6 #sec-number.isnan
TF_BUILTIN(NumberIsNaN, CodeStubAssembler) {
- Node* number = Parameter(Descriptor::kNumber);
+ TNode<Object> number = CAST(Parameter(Descriptor::kNumber));
Label return_true(this), return_false(this);
@@ -115,10 +117,11 @@ TF_BUILTIN(NumberIsNaN, CodeStubAssembler) {
GotoIf(TaggedIsSmi(number), &return_false);
// Check if {number} is a HeapNumber.
- GotoIfNot(IsHeapNumber(number), &return_false);
+ TNode<HeapObject> number_heap_object = CAST(number);
+ GotoIfNot(IsHeapNumber(number_heap_object), &return_false);
// Check if {number} contains a NaN value.
- TNode<Float64T> number_value = LoadHeapNumberValue(number);
+ TNode<Float64T> number_value = LoadHeapNumberValue(number_heap_object);
BranchIfFloat64IsNaN(number_value, &return_true, &return_false);
BIND(&return_true);
@@ -136,17 +139,16 @@ TF_BUILTIN(NumberIsSafeInteger, CodeStubAssembler) {
// ES6 #sec-number.parsefloat
TF_BUILTIN(NumberParseFloat, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
// We might need to loop once for ToString conversion.
- VARIABLE(var_input, MachineRepresentation::kTagged,
- Parameter(Descriptor::kString));
+ TVARIABLE(Object, var_input, CAST(Parameter(Descriptor::kString)));
Label loop(this, &var_input);
Goto(&loop);
BIND(&loop);
{
// Load the current {input} value.
- Node* input = var_input.value();
+ TNode<Object> input = var_input.value();
// Check if the {input} is a HeapObject or a Smi.
Label if_inputissmi(this), if_inputisnotsmi(this);
@@ -161,8 +163,9 @@ TF_BUILTIN(NumberParseFloat, CodeStubAssembler) {
BIND(&if_inputisnotsmi);
{
// The {input} is a HeapObject, check if it's already a String.
+ TNode<HeapObject> input_heap_object = CAST(input);
Label if_inputisstring(this), if_inputisnotstring(this);
- TNode<Map> input_map = LoadMap(input);
+ TNode<Map> input_map = LoadMap(input_heap_object);
TNode<Uint16T> input_instance_type = LoadMapInstanceType(input_map);
Branch(IsStringInstanceType(input_instance_type), &if_inputisstring,
&if_inputisnotstring);
@@ -172,7 +175,7 @@ TF_BUILTIN(NumberParseFloat, CodeStubAssembler) {
// The {input} is already a String, check if {input} contains
// a cached array index.
Label if_inputcached(this), if_inputnotcached(this);
- TNode<Uint32T> input_hash = LoadNameHashField(input);
+ TNode<Uint32T> input_hash = LoadNameHashField(CAST(input));
Branch(IsClearWord32(input_hash,
Name::kDoesNotContainCachedArrayIndexMask),
&if_inputcached, &if_inputnotcached);
@@ -204,7 +207,7 @@ TF_BUILTIN(NumberParseFloat, CodeStubAssembler) {
{
// The {input} is already a Number, take care of -0.
Label if_inputiszero(this), if_inputisnotzero(this);
- TNode<Float64T> input_value = LoadHeapNumberValue(input);
+ TNode<Float64T> input_value = LoadHeapNumberValue(input_heap_object);
Branch(Float64Equal(input_value, Float64Constant(0.0)),
&if_inputiszero, &if_inputisnotzero);
@@ -219,7 +222,7 @@ TF_BUILTIN(NumberParseFloat, CodeStubAssembler) {
{
// Need to convert the {input} to String first.
// TODO(bmeurer): This could be more efficient if necessary.
- var_input.Bind(CallBuiltin(Builtins::kToString, context, input));
+ var_input = CallBuiltin(Builtins::kToString, context, input);
Goto(&loop);
}
}
@@ -309,9 +312,9 @@ TF_BUILTIN(ParseInt, CodeStubAssembler) {
// ES6 #sec-number.parseint
TF_BUILTIN(NumberParseInt, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* input = Parameter(Descriptor::kString);
- Node* radix = Parameter(Descriptor::kRadix);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> input = CAST(Parameter(Descriptor::kString));
+ TNode<Object> radix = CAST(Parameter(Descriptor::kRadix));
Return(CallBuiltin(Builtins::kParseInt, context, input, radix));
}
@@ -331,27 +334,29 @@ class AddStubAssembler : public CodeStubAssembler {
: CodeStubAssembler(state) {}
protected:
- void ConvertReceiverAndLoop(Variable* var_value, Label* loop, Node* context) {
+ TNode<Object> ConvertReceiver(TNode<JSReceiver> js_receiver,
+ TNode<Context> context) {
// Call ToPrimitive explicitly without hint (whereas ToNumber
// would pass a "number" hint).
Callable callable = CodeFactory::NonPrimitiveToPrimitive(isolate());
- var_value->Bind(CallStub(callable, context, var_value->value()));
- Goto(loop);
+ return CallStub(callable, context, js_receiver);
}
- void ConvertNonReceiverAndLoop(Variable* var_value, Label* loop,
- Node* context) {
- var_value->Bind(CallBuiltin(Builtins::kNonNumberToNumeric, context,
- var_value->value()));
+ void ConvertNonReceiverAndLoop(TVariable<Object>* var_value, Label* loop,
+ TNode<Context> context) {
+ *var_value =
+ CallBuiltin(Builtins::kNonNumberToNumeric, context, var_value->value());
Goto(loop);
}
- void ConvertAndLoop(Variable* var_value, Node* instance_type, Label* loop,
- Node* context) {
+ void ConvertAndLoop(TVariable<Object>* var_value,
+ TNode<Uint16T> instance_type, Label* loop,
+ TNode<Context> context) {
Label is_not_receiver(this, Label::kDeferred);
GotoIfNot(IsJSReceiverInstanceType(instance_type), &is_not_receiver);
- ConvertReceiverAndLoop(var_value, loop, context);
+ *var_value = ConvertReceiver(CAST(var_value->value()), context);
+ Goto(loop);
BIND(&is_not_receiver);
ConvertNonReceiverAndLoop(var_value, loop, context);
@@ -359,30 +364,26 @@ class AddStubAssembler : public CodeStubAssembler {
};
TF_BUILTIN(Add, AddStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- VARIABLE(var_left, MachineRepresentation::kTagged,
- Parameter(Descriptor::kLeft));
- VARIABLE(var_right, MachineRepresentation::kTagged,
- Parameter(Descriptor::kRight));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TVARIABLE(Object, var_left, CAST(Parameter(Descriptor::kLeft)));
+ TVARIABLE(Object, var_right, CAST(Parameter(Descriptor::kRight)));
// Shared entry for floating point addition.
Label do_double_add(this);
- VARIABLE(var_left_double, MachineRepresentation::kFloat64);
- VARIABLE(var_right_double, MachineRepresentation::kFloat64);
+ TVARIABLE(Float64T, var_left_double);
+ TVARIABLE(Float64T, var_right_double);
// We might need to loop several times due to ToPrimitive, ToString and/or
// ToNumeric conversions.
- VARIABLE(var_result, MachineRepresentation::kTagged);
- Variable* loop_vars[2] = {&var_left, &var_right};
- Label loop(this, 2, loop_vars),
+ Label loop(this, {&var_left, &var_right}),
string_add_convert_left(this, Label::kDeferred),
string_add_convert_right(this, Label::kDeferred),
do_bigint_add(this, Label::kDeferred);
Goto(&loop);
BIND(&loop);
{
- Node* left = var_left.value();
- Node* right = var_right.value();
+ TNode<Object> left = var_left.value();
+ TNode<Object> right = var_right.value();
Label if_left_smi(this), if_left_heapobject(this);
Branch(TaggedIsSmi(left), &if_left_smi, &if_left_heapobject);
@@ -395,27 +396,30 @@ TF_BUILTIN(Add, AddStubAssembler) {
BIND(&if_right_smi);
{
Label if_overflow(this);
- TNode<Smi> result = TrySmiAdd(CAST(left), CAST(right), &if_overflow);
+ TNode<Smi> left_smi = CAST(left);
+ TNode<Smi> right_smi = CAST(right);
+ TNode<Smi> result = TrySmiAdd(left_smi, right_smi, &if_overflow);
Return(result);
BIND(&if_overflow);
{
- var_left_double.Bind(SmiToFloat64(left));
- var_right_double.Bind(SmiToFloat64(right));
+ var_left_double = SmiToFloat64(left_smi);
+ var_right_double = SmiToFloat64(right_smi);
Goto(&do_double_add);
}
} // if_right_smi
BIND(&if_right_heapobject);
{
- TNode<Map> right_map = LoadMap(right);
+ TNode<HeapObject> right_heap_object = CAST(right);
+ TNode<Map> right_map = LoadMap(right_heap_object);
Label if_right_not_number(this, Label::kDeferred);
GotoIfNot(IsHeapNumberMap(right_map), &if_right_not_number);
// {right} is a HeapNumber.
- var_left_double.Bind(SmiToFloat64(left));
- var_right_double.Bind(LoadHeapNumberValue(right));
+ var_left_double = SmiToFloat64(CAST(left));
+ var_right_double = LoadHeapNumberValue(right_heap_object);
Goto(&do_double_add);
BIND(&if_right_not_number);
@@ -431,7 +435,8 @@ TF_BUILTIN(Add, AddStubAssembler) {
BIND(&if_left_heapobject);
{
- TNode<Map> left_map = LoadMap(left);
+ TNode<HeapObject> left_heap_object = CAST(left);
+ TNode<Map> left_map = LoadMap(left_heap_object);
Label if_right_smi(this), if_right_heapobject(this);
Branch(TaggedIsSmi(right), &if_right_smi, &if_right_heapobject);
@@ -441,8 +446,8 @@ TF_BUILTIN(Add, AddStubAssembler) {
GotoIfNot(IsHeapNumberMap(left_map), &if_left_not_number);
// {left} is a HeapNumber, {right} is a Smi.
- var_left_double.Bind(LoadHeapNumberValue(left));
- var_right_double.Bind(SmiToFloat64(right));
+ var_left_double = LoadHeapNumberValue(left_heap_object);
+ var_right_double = SmiToFloat64(CAST(right));
Goto(&do_double_add);
BIND(&if_left_not_number);
@@ -458,7 +463,8 @@ TF_BUILTIN(Add, AddStubAssembler) {
BIND(&if_right_heapobject);
{
- TNode<Map> right_map = LoadMap(right);
+ TNode<HeapObject> right_heap_object = CAST(right);
+ TNode<Map> right_map = LoadMap(right_heap_object);
Label if_left_number(this), if_left_not_number(this, Label::kDeferred);
Branch(IsHeapNumberMap(left_map), &if_left_number, &if_left_not_number);
@@ -469,8 +475,8 @@ TF_BUILTIN(Add, AddStubAssembler) {
GotoIfNot(IsHeapNumberMap(right_map), &if_right_not_number);
// Both {left} and {right} are HeapNumbers.
- var_left_double.Bind(LoadHeapNumberValue(left));
- var_right_double.Bind(LoadHeapNumberValue(right));
+ var_left_double = LoadHeapNumberValue(CAST(left));
+ var_right_double = LoadHeapNumberValue(right_heap_object);
Goto(&do_double_add);
BIND(&if_right_not_number);
@@ -499,7 +505,8 @@ TF_BUILTIN(Add, AddStubAssembler) {
GotoIfNot(IsJSReceiverInstanceType(left_instance_type),
&if_left_not_receiver);
// {left} is a JSReceiver, convert it first.
- ConvertReceiverAndLoop(&var_left, &loop, context);
+ var_left = ConvertReceiver(CAST(var_left.value()), context);
+ Goto(&loop);
BIND(&if_left_bigint);
{
@@ -515,7 +522,8 @@ TF_BUILTIN(Add, AddStubAssembler) {
&if_right_not_receiver);
// {left} is a Primitive, but {right} is a JSReceiver, so convert
// {right} with priority.
- ConvertReceiverAndLoop(&var_right, &loop, context);
+ var_right = ConvertReceiver(CAST(var_right.value()), context);
+ Goto(&loop);
BIND(&if_right_not_receiver);
// Neither {left} nor {right} are JSReceivers.
@@ -553,54 +561,46 @@ TF_BUILTIN(Add, AddStubAssembler) {
}
template <typename Descriptor>
-void NumberBuiltinsAssembler::UnaryOp(Variable* var_input, Label* do_smi,
- Label* do_double,
- Variable* var_input_double,
+void NumberBuiltinsAssembler::UnaryOp(TVariable<Object>* var_input,
+ Label* do_smi, Label* do_double,
+ TVariable<Float64T>* var_input_double,
Label* do_bigint) {
- DCHECK_EQ(var_input->rep(), MachineRepresentation::kTagged);
- DCHECK_IMPLIES(var_input_double != nullptr,
- var_input_double->rep() == MachineRepresentation::kFloat64);
-
- Node* context = Parameter(Descriptor::kContext);
- var_input->Bind(Parameter(Descriptor::kValue));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ *var_input = CAST(Parameter(Descriptor::kValue));
// We might need to loop for ToNumeric conversion.
Label loop(this, {var_input});
Goto(&loop);
BIND(&loop);
- Node* input = var_input->value();
+ TNode<Object> input = var_input->value();
Label not_number(this);
GotoIf(TaggedIsSmi(input), do_smi);
- GotoIfNot(IsHeapNumber(input), &not_number);
+ TNode<HeapObject> input_heap_object = CAST(input);
+ GotoIfNot(IsHeapNumber(input_heap_object), &not_number);
if (var_input_double != nullptr) {
- var_input_double->Bind(LoadHeapNumberValue(input));
+ *var_input_double = LoadHeapNumberValue(input_heap_object);
}
Goto(do_double);
BIND(&not_number);
- GotoIf(IsBigInt(input), do_bigint);
- var_input->Bind(CallBuiltin(Builtins::kNonNumberToNumeric, context, input));
+ GotoIf(IsBigInt(input_heap_object), do_bigint);
+ *var_input = CallBuiltin(Builtins::kNonNumberToNumeric, context, input);
Goto(&loop);
}
template <typename Descriptor>
-void NumberBuiltinsAssembler::BinaryOp(Label* smis, Variable* var_left,
- Variable* var_right, Label* doubles,
- Variable* var_left_double,
- Variable* var_right_double,
+void NumberBuiltinsAssembler::BinaryOp(Label* smis, TVariable<Object>* var_left,
+ TVariable<Object>* var_right,
+ Label* doubles,
+ TVariable<Float64T>* var_left_double,
+ TVariable<Float64T>* var_right_double,
Label* bigints) {
- DCHECK_EQ(var_left->rep(), MachineRepresentation::kTagged);
- DCHECK_EQ(var_right->rep(), MachineRepresentation::kTagged);
- DCHECK_IMPLIES(var_left_double != nullptr,
- var_left_double->rep() == MachineRepresentation::kFloat64);
- DCHECK_IMPLIES(var_right_double != nullptr,
- var_right_double->rep() == MachineRepresentation::kFloat64);
DCHECK_EQ(var_left_double == nullptr, var_right_double == nullptr);
- Node* context = Parameter(Descriptor::kContext);
- var_left->Bind(Parameter(Descriptor::kLeft));
- var_right->Bind(Parameter(Descriptor::kRight));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ *var_left = CAST(Parameter(Descriptor::kLeft));
+ *var_right = CAST(Parameter(Descriptor::kRight));
// We might need to loop for ToNumeric conversions.
Label loop(this, {var_left, var_right});
@@ -613,32 +613,36 @@ void NumberBuiltinsAssembler::BinaryOp(Label* smis, Variable* var_left,
GotoIf(TaggedIsSmi(var_right->value()), smis);
// At this point, var_left is a Smi but var_right is not.
- GotoIfNot(IsHeapNumber(var_right->value()), &right_not_number);
+ TNode<Smi> var_left_smi = CAST(var_left->value());
+ TNode<HeapObject> var_right_heap_object = CAST(var_right->value());
+ GotoIfNot(IsHeapNumber(var_right_heap_object), &right_not_number);
if (var_left_double != nullptr) {
- var_left_double->Bind(SmiToFloat64(var_left->value()));
- var_right_double->Bind(LoadHeapNumberValue(var_right->value()));
+ *var_left_double = SmiToFloat64(var_left_smi);
+ *var_right_double = LoadHeapNumberValue(var_right_heap_object);
}
Goto(doubles);
BIND(&left_not_smi);
{
- GotoIfNot(IsHeapNumber(var_left->value()), &left_not_number);
+ TNode<HeapObject> var_left_heap_object = CAST(var_left->value());
+ GotoIfNot(IsHeapNumber(var_left_heap_object), &left_not_number);
GotoIfNot(TaggedIsSmi(var_right->value()), &right_not_smi);
// At this point, var_left is a HeapNumber and var_right is a Smi.
if (var_left_double != nullptr) {
- var_left_double->Bind(LoadHeapNumberValue(var_left->value()));
- var_right_double->Bind(SmiToFloat64(var_right->value()));
+ *var_left_double = LoadHeapNumberValue(var_left_heap_object);
+ *var_right_double = SmiToFloat64(CAST(var_right->value()));
}
Goto(doubles);
}
BIND(&right_not_smi);
{
- GotoIfNot(IsHeapNumber(var_right->value()), &right_not_number);
+ TNode<HeapObject> var_right_heap_object = CAST(var_right->value());
+ GotoIfNot(IsHeapNumber(var_right_heap_object), &right_not_number);
if (var_left_double != nullptr) {
- var_left_double->Bind(LoadHeapNumberValue(var_left->value()));
- var_right_double->Bind(LoadHeapNumberValue(var_right->value()));
+ *var_left_double = LoadHeapNumberValue(CAST(var_left->value()));
+ *var_right_double = LoadHeapNumberValue(var_right_heap_object);
}
Goto(doubles);
}
@@ -646,37 +650,38 @@ void NumberBuiltinsAssembler::BinaryOp(Label* smis, Variable* var_left,
BIND(&left_not_number);
{
Label left_bigint(this);
- GotoIf(IsBigInt(var_left->value()), &left_bigint);
- var_left->Bind(
- CallBuiltin(Builtins::kNonNumberToNumeric, context, var_left->value()));
+ GotoIf(IsBigInt(CAST(var_left->value())), &left_bigint);
+ *var_left =
+ CallBuiltin(Builtins::kNonNumberToNumeric, context, var_left->value());
Goto(&loop);
BIND(&left_bigint);
{
// Jump to {bigints} if {var_right} is already a Numeric.
GotoIf(TaggedIsSmi(var_right->value()), bigints);
- GotoIf(IsBigInt(var_right->value()), bigints);
- GotoIf(IsHeapNumber(var_right->value()), bigints);
- var_right->Bind(CallBuiltin(Builtins::kNonNumberToNumeric, context,
- var_right->value()));
+ TNode<HeapObject> var_right_heap_object = CAST(var_right->value());
+ GotoIf(IsBigInt(var_right_heap_object), bigints);
+ GotoIf(IsHeapNumber(var_right_heap_object), bigints);
+ *var_right = CallBuiltin(Builtins::kNonNumberToNumeric, context,
+ var_right->value());
Goto(&loop);
}
}
BIND(&right_not_number);
{
- GotoIf(IsBigInt(var_right->value()), bigints);
- var_right->Bind(CallBuiltin(Builtins::kNonNumberToNumeric, context,
- var_right->value()));
+ GotoIf(IsBigInt(CAST(var_right->value())), bigints);
+ *var_right =
+ CallBuiltin(Builtins::kNonNumberToNumeric, context, var_right->value());
Goto(&loop);
}
}
TF_BUILTIN(Subtract, NumberBuiltinsAssembler) {
- VARIABLE(var_left, MachineRepresentation::kTagged);
- VARIABLE(var_right, MachineRepresentation::kTagged);
- VARIABLE(var_left_double, MachineRepresentation::kFloat64);
- VARIABLE(var_right_double, MachineRepresentation::kFloat64);
+ TVARIABLE(Object, var_left);
+ TVARIABLE(Object, var_right);
+ TVARIABLE(Float64T, var_left_double);
+ TVARIABLE(Float64T, var_right_double);
Label do_smi_sub(this), do_double_sub(this), do_bigint_sub(this);
BinaryOp<Descriptor>(&do_smi_sub, &var_left, &var_right, &do_double_sub,
@@ -685,14 +690,15 @@ TF_BUILTIN(Subtract, NumberBuiltinsAssembler) {
BIND(&do_smi_sub);
{
Label if_overflow(this);
- TNode<Smi> result = TrySmiSub(CAST(var_left.value()),
- CAST(var_right.value()), &if_overflow);
+ TNode<Smi> var_left_smi = CAST(var_left.value());
+ TNode<Smi> var_right_smi = CAST(var_right.value());
+ TNode<Smi> result = TrySmiSub(var_left_smi, var_right_smi, &if_overflow);
Return(result);
BIND(&if_overflow);
{
- var_left_double.Bind(SmiToFloat64(var_left.value()));
- var_right_double.Bind(SmiToFloat64(var_right.value()));
+ var_left_double = SmiToFloat64(var_left_smi);
+ var_right_double = SmiToFloat64(var_right_smi);
Goto(&do_double_sub);
}
}
@@ -706,15 +712,15 @@ TF_BUILTIN(Subtract, NumberBuiltinsAssembler) {
BIND(&do_bigint_sub);
{
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Return(CallRuntime(Runtime::kBigIntBinaryOp, context, var_left.value(),
var_right.value(), SmiConstant(Operation::kSubtract)));
}
}
TF_BUILTIN(BitwiseNot, NumberBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- VARIABLE(var_input, MachineRepresentation::kTagged);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TVARIABLE(Object, var_input);
Label do_number(this), do_bigint(this);
UnaryOp<Descriptor>(&var_input, &do_number, &do_number, nullptr, &do_bigint);
@@ -733,8 +739,8 @@ TF_BUILTIN(BitwiseNot, NumberBuiltinsAssembler) {
}
TF_BUILTIN(Decrement, NumberBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- VARIABLE(var_input, MachineRepresentation::kTagged);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TVARIABLE(Object, var_input);
Label do_number(this), do_bigint(this);
UnaryOp<Descriptor>(&var_input, &do_number, &do_number, nullptr, &do_bigint);
@@ -753,8 +759,8 @@ TF_BUILTIN(Decrement, NumberBuiltinsAssembler) {
}
TF_BUILTIN(Increment, NumberBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- VARIABLE(var_input, MachineRepresentation::kTagged);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TVARIABLE(Object, var_input);
Label do_number(this), do_bigint(this);
UnaryOp<Descriptor>(&var_input, &do_number, &do_number, nullptr, &do_bigint);
@@ -772,8 +778,8 @@ TF_BUILTIN(Increment, NumberBuiltinsAssembler) {
}
TF_BUILTIN(Negate, NumberBuiltinsAssembler) {
- VARIABLE(var_input, MachineRepresentation::kTagged);
- VARIABLE(var_input_double, MachineRepresentation::kFloat64);
+ TVARIABLE(Object, var_input);
+ TVARIABLE(Float64T, var_input_double);
Label do_smi(this), do_double(this), do_bigint(this);
UnaryOp<Descriptor>(&var_input, &do_smi, &do_double, &var_input_double,
@@ -791,17 +797,17 @@ TF_BUILTIN(Negate, NumberBuiltinsAssembler) {
BIND(&do_bigint);
{
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Return(CallRuntime(Runtime::kBigIntUnaryOp, context, var_input.value(),
SmiConstant(Operation::kNegate)));
}
}
TF_BUILTIN(Multiply, NumberBuiltinsAssembler) {
- VARIABLE(var_left, MachineRepresentation::kTagged);
- VARIABLE(var_right, MachineRepresentation::kTagged);
- VARIABLE(var_left_double, MachineRepresentation::kFloat64);
- VARIABLE(var_right_double, MachineRepresentation::kFloat64);
+ TVARIABLE(Object, var_left);
+ TVARIABLE(Object, var_right);
+ TVARIABLE(Float64T, var_left_double);
+ TVARIABLE(Float64T, var_right_double);
Label do_smi_mul(this), do_double_mul(this), do_bigint_mul(this);
BinaryOp<Descriptor>(&do_smi_mul, &var_left, &var_right, &do_double_mul,
@@ -818,17 +824,17 @@ TF_BUILTIN(Multiply, NumberBuiltinsAssembler) {
BIND(&do_bigint_mul);
{
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Return(CallRuntime(Runtime::kBigIntBinaryOp, context, var_left.value(),
var_right.value(), SmiConstant(Operation::kMultiply)));
}
}
TF_BUILTIN(Divide, NumberBuiltinsAssembler) {
- VARIABLE(var_left, MachineRepresentation::kTagged);
- VARIABLE(var_right, MachineRepresentation::kTagged);
- VARIABLE(var_left_double, MachineRepresentation::kFloat64);
- VARIABLE(var_right_double, MachineRepresentation::kFloat64);
+ TVARIABLE(Object, var_left);
+ TVARIABLE(Object, var_right);
+ TVARIABLE(Float64T, var_left_double);
+ TVARIABLE(Float64T, var_right_double);
Label do_smi_div(this), do_double_div(this), do_bigint_div(this);
BinaryOp<Descriptor>(&do_smi_div, &var_left, &var_right, &do_double_div,
@@ -889,8 +895,8 @@ TF_BUILTIN(Divide, NumberBuiltinsAssembler) {
// division.
BIND(&bailout);
{
- var_left_double.Bind(SmiToFloat64(dividend));
- var_right_double.Bind(SmiToFloat64(divisor));
+ var_left_double = SmiToFloat64(dividend);
+ var_right_double = SmiToFloat64(divisor);
Goto(&do_double_div);
}
}
@@ -904,17 +910,17 @@ TF_BUILTIN(Divide, NumberBuiltinsAssembler) {
BIND(&do_bigint_div);
{
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Return(CallRuntime(Runtime::kBigIntBinaryOp, context, var_left.value(),
var_right.value(), SmiConstant(Operation::kDivide)));
}
}
TF_BUILTIN(Modulus, NumberBuiltinsAssembler) {
- VARIABLE(var_left, MachineRepresentation::kTagged);
- VARIABLE(var_right, MachineRepresentation::kTagged);
- VARIABLE(var_left_double, MachineRepresentation::kFloat64);
- VARIABLE(var_right_double, MachineRepresentation::kFloat64);
+ TVARIABLE(Object, var_left);
+ TVARIABLE(Object, var_right);
+ TVARIABLE(Float64T, var_left_double);
+ TVARIABLE(Float64T, var_right_double);
Label do_smi_mod(this), do_double_mod(this), do_bigint_mod(this);
BinaryOp<Descriptor>(&do_smi_mod, &var_left, &var_right, &do_double_mod,
@@ -930,17 +936,17 @@ TF_BUILTIN(Modulus, NumberBuiltinsAssembler) {
BIND(&do_bigint_mod);
{
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Return(CallRuntime(Runtime::kBigIntBinaryOp, context, var_left.value(),
var_right.value(), SmiConstant(Operation::kModulus)));
}
}
TF_BUILTIN(Exponentiate, NumberBuiltinsAssembler) {
- VARIABLE(var_left, MachineRepresentation::kTagged);
- VARIABLE(var_right, MachineRepresentation::kTagged);
+ TVARIABLE(Object, var_left);
+ TVARIABLE(Object, var_right);
Label do_number_exp(this), do_bigint_exp(this);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
BinaryOp<Descriptor>(&do_number_exp, &var_left, &var_right, &do_number_exp,
nullptr, nullptr, &do_bigint_exp);
@@ -997,9 +1003,9 @@ TF_BUILTIN(GreaterThanOrEqual, NumberBuiltinsAssembler) {
}
TF_BUILTIN(Equal, CodeStubAssembler) {
- Node* lhs = Parameter(Descriptor::kLeft);
- Node* rhs = Parameter(Descriptor::kRight);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Object> lhs = CAST(Parameter(Descriptor::kLeft));
+ TNode<Object> rhs = CAST(Parameter(Descriptor::kRight));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Return(Equal(lhs, rhs, context));
}
diff --git a/deps/v8/src/builtins/builtins-number.cc b/deps/v8/src/builtins/builtins-number.cc
index d2fb0ff74c..49e7ff27b8 100644
--- a/deps/v8/src/builtins/builtins-number.cc
+++ b/deps/v8/src/builtins/builtins-number.cc
@@ -111,6 +111,7 @@ BUILTIN(NumberPrototypeToFixed) {
// ES6 section 20.1.3.4 Number.prototype.toLocaleString ( [ r1 [ , r2 ] ] )
BUILTIN(NumberPrototypeToLocaleString) {
HandleScope scope(isolate);
+ const char* method = "Number.prototype.toLocaleString";
isolate->CountUsage(v8::Isolate::UseCounterFeature::kNumberToLocaleString);
@@ -123,17 +124,17 @@ BUILTIN(NumberPrototypeToLocaleString) {
// 1. Let x be ? thisNumberValue(this value)
if (!value->IsNumber()) {
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kNotGeneric,
- isolate->factory()->NewStringFromAsciiChecked(
- "Number.prototype.toLocaleString"),
- isolate->factory()->Number_string()));
+ isolate,
+ NewTypeError(MessageTemplate::kNotGeneric,
+ isolate->factory()->NewStringFromAsciiChecked(method),
+ isolate->factory()->Number_string()));
}
#ifdef V8_INTL_SUPPORT
RETURN_RESULT_OR_FAILURE(
isolate,
Intl::NumberToLocaleString(isolate, value, args.atOrUndefined(isolate, 1),
- args.atOrUndefined(isolate, 2)));
+ args.atOrUndefined(isolate, 2), method));
#else
// Turn the {value} into a String.
return *isolate->factory()->NumberToString(value);
diff --git a/deps/v8/src/builtins/builtins-object-gen.cc b/deps/v8/src/builtins/builtins-object-gen.cc
index db9d4ed657..a35990e2f5 100644
--- a/deps/v8/src/builtins/builtins-object-gen.cc
+++ b/deps/v8/src/builtins/builtins-object-gen.cc
@@ -22,29 +22,35 @@ namespace internal {
// -----------------------------------------------------------------------------
// ES6 section 19.1 Object Objects
-using Node = compiler::Node;
-template <class T>
-using TNode = CodeStubAssembler::TNode<T>;
-
class ObjectBuiltinsAssembler : public CodeStubAssembler {
public:
explicit ObjectBuiltinsAssembler(compiler::CodeAssemblerState* state)
: CodeStubAssembler(state) {}
protected:
- void ReturnToStringFormat(Node* context, Node* string);
+ void ReturnToStringFormat(TNode<Context> context, TNode<String> string);
void AddToDictionaryIf(TNode<BoolT> condition,
TNode<NameDictionary> name_dictionary,
Handle<Name> name, TNode<Object> value,
Label* bailout);
- Node* FromPropertyDescriptor(Node* context, Node* desc);
- Node* FromPropertyDetails(Node* context, Node* raw_value, Node* details,
- Label* if_bailout);
- Node* ConstructAccessorDescriptor(Node* context, Node* getter, Node* setter,
- Node* enumerable, Node* configurable);
- Node* ConstructDataDescriptor(Node* context, Node* value, Node* writable,
- Node* enumerable, Node* configurable);
- Node* GetAccessorOrUndefined(Node* accessor, Label* if_bailout);
+ TNode<JSObject> FromPropertyDescriptor(TNode<Context> context,
+ TNode<FixedArray> desc);
+ TNode<JSObject> FromPropertyDetails(TNode<Context> context,
+ TNode<Object> raw_value,
+ TNode<Word32T> details,
+ Label* if_bailout);
+ TNode<JSObject> ConstructAccessorDescriptor(TNode<Context> context,
+ TNode<Object> getter,
+ TNode<Object> setter,
+ TNode<BoolT> enumerable,
+ TNode<BoolT> configurable);
+ TNode<JSObject> ConstructDataDescriptor(TNode<Context> context,
+ TNode<Object> value,
+ TNode<BoolT> writable,
+ TNode<BoolT> enumerable,
+ TNode<BoolT> configurable);
+ TNode<HeapObject> GetAccessorOrUndefined(TNode<HeapObject> accessor,
+ Label* if_bailout);
};
class ObjectEntriesValuesBuiltinsAssembler : public ObjectBuiltinsAssembler {
@@ -79,8 +85,8 @@ class ObjectEntriesValuesBuiltinsAssembler : public ObjectBuiltinsAssembler {
TNode<IntPtrT> size, TNode<Map> array_map, Label* if_empty);
};
-void ObjectBuiltinsAssembler::ReturnToStringFormat(Node* context,
- Node* string) {
+void ObjectBuiltinsAssembler::ReturnToStringFormat(TNode<Context> context,
+ TNode<String> string) {
TNode<String> lhs = StringConstant("[object ");
TNode<String> rhs = StringConstant("]");
@@ -90,11 +96,9 @@ void ObjectBuiltinsAssembler::ReturnToStringFormat(Node* context,
rhs));
}
-Node* ObjectBuiltinsAssembler::ConstructAccessorDescriptor(Node* context,
- Node* getter,
- Node* setter,
- Node* enumerable,
- Node* configurable) {
+TNode<JSObject> ObjectBuiltinsAssembler::ConstructAccessorDescriptor(
+ TNode<Context> context, TNode<Object> getter, TNode<Object> setter,
+ TNode<BoolT> enumerable, TNode<BoolT> configurable) {
TNode<NativeContext> native_context = LoadNativeContext(context);
TNode<Map> map = CAST(LoadContextElement(
native_context, Context::ACCESSOR_PROPERTY_DESCRIPTOR_MAP_INDEX));
@@ -114,11 +118,9 @@ Node* ObjectBuiltinsAssembler::ConstructAccessorDescriptor(Node* context,
return js_desc;
}
-Node* ObjectBuiltinsAssembler::ConstructDataDescriptor(Node* context,
- Node* value,
- Node* writable,
- Node* enumerable,
- Node* configurable) {
+TNode<JSObject> ObjectBuiltinsAssembler::ConstructDataDescriptor(
+ TNode<Context> context, TNode<Object> value, TNode<BoolT> writable,
+ TNode<BoolT> enumerable, TNode<BoolT> configurable) {
TNode<NativeContext> native_context = LoadNativeContext(context);
TNode<Map> map = CAST(LoadContextElement(
native_context, Context::DATA_PROPERTY_DESCRIPTOR_MAP_INDEX));
@@ -260,10 +262,10 @@ TNode<JSArray> ObjectEntriesValuesBuiltinsAssembler::FastGetOwnValuesOrEntries(
TVARIABLE(IntPtrT, var_result_index, IntPtrConstant(0));
TVARIABLE(IntPtrT, var_descriptor_number, IntPtrConstant(0));
- Variable* vars[] = {&var_descriptor_number, &var_result_index};
+ VariableList vars({&var_descriptor_number, &var_result_index}, zone());
// Let desc be ? O.[[GetOwnProperty]](key).
TNode<DescriptorArray> descriptors = LoadMapDescriptors(map);
- Label loop(this, 2, vars), after_loop(this), next_descriptor(this);
+ Label loop(this, vars), after_loop(this), next_descriptor(this);
Branch(IntPtrEqual(var_descriptor_number.value(), object_enum_length),
&after_loop, &loop);
@@ -309,11 +311,10 @@ TNode<JSArray> ObjectEntriesValuesBuiltinsAssembler::FastGetOwnValuesOrEntries(
if (collect_type == CollectType::kEntries) {
// Let entry be CreateArrayFromList(« key, value »).
- Node* array = nullptr;
- Node* elements = nullptr;
+ TNode<JSArray> array;
+ TNode<FixedArrayBase> elements;
std::tie(array, elements) = AllocateUninitializedJSArrayWithElements(
- PACKED_ELEMENTS, array_map, SmiConstant(2), nullptr,
- IntPtrConstant(2));
+ PACKED_ELEMENTS, array_map, SmiConstant(2), {}, IntPtrConstant(2));
StoreFixedArrayElement(CAST(elements), 0, next_key, SKIP_WRITE_BARRIER);
StoreFixedArrayElement(CAST(elements), 1, value, SKIP_WRITE_BARRIER);
value = TNode<JSArray>::UncheckedCast(array);
@@ -321,12 +322,12 @@ TNode<JSArray> ObjectEntriesValuesBuiltinsAssembler::FastGetOwnValuesOrEntries(
StoreFixedArrayElement(values_or_entries, var_result_index.value(),
value);
- Increment(&var_result_index, 1);
+ Increment(&var_result_index);
Goto(&next_descriptor);
BIND(&next_descriptor);
{
- Increment(&var_descriptor_number, 1);
+ Increment(&var_descriptor_number);
Branch(IntPtrEqual(var_result_index.value(), object_enum_length),
&after_loop, &loop);
}
@@ -366,9 +367,9 @@ TF_BUILTIN(ObjectPrototypeToLocaleString, CodeStubAssembler) {
}
TF_BUILTIN(ObjectPrototypeHasOwnProperty, ObjectBuiltinsAssembler) {
- Node* object = Parameter(Descriptor::kReceiver);
- Node* key = Parameter(Descriptor::kKey);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Object> object = CAST(Parameter(Descriptor::kReceiver));
+ TNode<Object> key = CAST(Parameter(Descriptor::kKey));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Label call_runtime(this), return_true(this), return_false(this),
to_primitive(this);
@@ -379,12 +380,12 @@ TF_BUILTIN(ObjectPrototypeHasOwnProperty, ObjectBuiltinsAssembler) {
Branch(TaggedIsSmi(object), &to_primitive, &if_objectisnotsmi);
BIND(&if_objectisnotsmi);
- TNode<Map> map = LoadMap(object);
+ TNode<Map> map = LoadMap(CAST(object));
TNode<Uint16T> instance_type = LoadMapInstanceType(map);
{
- VARIABLE(var_index, MachineType::PointerRepresentation());
- VARIABLE(var_unique, MachineRepresentation::kTagged);
+ TVARIABLE(IntPtrT, var_index);
+ TVARIABLE(Name, var_unique);
Label if_index(this), if_unique_name(this), if_notunique_name(this);
TryToName(key, &if_index, &var_index, &if_unique_name, &var_unique,
@@ -407,7 +408,7 @@ TF_BUILTIN(ObjectPrototypeHasOwnProperty, ObjectBuiltinsAssembler) {
BIND(&if_notunique_name);
{
Label not_in_string_table(this);
- TryInternalizeString(key, &if_index, &var_index, &if_unique_name,
+ TryInternalizeString(CAST(key), &if_index, &var_index, &if_unique_name,
&var_unique, &not_in_string_table, &call_runtime);
BIND(&not_in_string_table);
@@ -422,7 +423,7 @@ TF_BUILTIN(ObjectPrototypeHasOwnProperty, ObjectBuiltinsAssembler) {
}
BIND(&to_primitive);
GotoIf(IsNumber(key), &return_false);
- Branch(IsName(key), &return_false, &call_runtime);
+ Branch(IsName(CAST(key)), &return_false, &call_runtime);
BIND(&return_true);
Return(TrueConstant());
@@ -454,7 +455,7 @@ TF_BUILTIN(ObjectAssign, ObjectBuiltinsAssembler) {
// second argument.
// 4. For each element nextSource of sources, in ascending index order,
args.ForEach(
- [=](Node* next_source) {
+ [=](TNode<Object> next_source) {
CallBuiltin(Builtins::kSetDataProperties, context, to, next_source);
},
IntPtrConstant(1));
@@ -467,17 +468,18 @@ TF_BUILTIN(ObjectAssign, ObjectBuiltinsAssembler) {
// ES #sec-object.keys
TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) {
- Node* object = Parameter(Descriptor::kObject);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Object> object = CAST(Parameter(Descriptor::kObject));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- VARIABLE(var_length, MachineRepresentation::kTagged);
- VARIABLE(var_elements, MachineRepresentation::kTagged);
+ TVARIABLE(Smi, var_length);
+ TVARIABLE(FixedArrayBase, var_elements);
Label if_empty(this, Label::kDeferred), if_empty_elements(this),
if_fast(this), if_slow(this, Label::kDeferred), if_join(this);
// Check if the {object} has a usable enum cache.
GotoIf(TaggedIsSmi(object), &if_slow);
- TNode<Map> object_map = LoadMap(object);
+
+ TNode<Map> object_map = LoadMap(CAST(object));
TNode<Uint32T> object_bit_field3 = LoadMapBitField3(object_map);
TNode<UintPtrT> object_enum_length =
DecodeWordFromWord32<Map::EnumLengthBits>(object_bit_field3);
@@ -487,7 +489,7 @@ TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) {
// Ensure that the {object} doesn't have any elements.
CSA_ASSERT(this, IsJSObjectMap(object_map));
- TNode<FixedArrayBase> object_elements = LoadElements(object);
+ TNode<FixedArrayBase> object_elements = LoadElements(CAST(object));
GotoIf(IsEmptyFixedArray(object_elements), &if_empty_elements);
Branch(IsEmptySlowElementDictionary(object_elements), &if_empty_elements,
&if_slow);
@@ -500,20 +502,20 @@ TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) {
{
// The {object} has a usable enum cache, use that.
TNode<DescriptorArray> object_descriptors = LoadMapDescriptors(object_map);
- TNode<EnumCache> object_enum_cache = CAST(
- LoadObjectField(object_descriptors, DescriptorArray::kEnumCacheOffset));
+ TNode<EnumCache> object_enum_cache = LoadObjectField<EnumCache>(
+ object_descriptors, DescriptorArray::kEnumCacheOffset);
TNode<Object> object_enum_keys =
LoadObjectField(object_enum_cache, EnumCache::kKeysOffset);
// Allocate a JSArray and copy the elements from the {object_enum_keys}.
- Node* array = nullptr;
- Node* elements = nullptr;
+ TNode<JSArray> array;
+ TNode<FixedArrayBase> elements;
TNode<NativeContext> native_context = LoadNativeContext(context);
TNode<Map> array_map =
LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
TNode<Smi> array_length = SmiTag(Signed(object_enum_length));
std::tie(array, elements) = AllocateUninitializedJSArrayWithElements(
- PACKED_ELEMENTS, array_map, array_length, nullptr, object_enum_length,
+ PACKED_ELEMENTS, array_map, array_length, {}, object_enum_length,
INTPTR_PARAMETERS);
CopyFixedArrayElements(PACKED_ELEMENTS, object_enum_keys, elements,
object_enum_length, SKIP_WRITE_BARRIER);
@@ -523,8 +525,8 @@ TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) {
BIND(&if_empty);
{
// The {object} doesn't have any enumerable keys.
- var_length.Bind(SmiConstant(0));
- var_elements.Bind(EmptyFixedArrayConstant());
+ var_length = SmiConstant(0);
+ var_elements = EmptyFixedArrayConstant();
Goto(&if_join);
}
@@ -533,8 +535,8 @@ TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) {
// Let the runtime compute the elements.
TNode<FixedArray> elements =
CAST(CallRuntime(Runtime::kObjectKeys, context, object));
- var_length.Bind(LoadObjectField(elements, FixedArray::kLengthOffset));
- var_elements.Bind(elements);
+ var_length = LoadObjectField<Smi>(elements, FixedArray::kLengthOffset);
+ var_elements = elements;
Goto(&if_join);
}
@@ -544,19 +546,19 @@ TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) {
TNode<NativeContext> native_context = LoadNativeContext(context);
TNode<Map> array_map =
LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
- TNode<JSArray> array = AllocateJSArray(
- array_map, CAST(var_elements.value()), CAST(var_length.value()));
+ TNode<JSArray> array =
+ AllocateJSArray(array_map, var_elements.value(), var_length.value());
Return(array);
}
}
// ES #sec-object.getOwnPropertyNames
TF_BUILTIN(ObjectGetOwnPropertyNames, ObjectBuiltinsAssembler) {
- Node* object = Parameter(Descriptor::kObject);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Object> object = CAST(Parameter(Descriptor::kObject));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- VARIABLE(var_length, MachineRepresentation::kTagged);
- VARIABLE(var_elements, MachineRepresentation::kTagged);
+ TVARIABLE(Smi, var_length);
+ TVARIABLE(FixedArrayBase, var_elements);
Label if_empty(this, Label::kDeferred), if_empty_elements(this),
if_fast(this), try_fast(this, Label::kDeferred),
if_slow(this, Label::kDeferred), if_join(this);
@@ -564,10 +566,11 @@ TF_BUILTIN(ObjectGetOwnPropertyNames, ObjectBuiltinsAssembler) {
// Take the slow path if the {object} IsCustomElementsReceiverInstanceType or
// has any elements.
GotoIf(TaggedIsSmi(object), &if_slow);
- TNode<Map> object_map = LoadMap(object);
+
+ TNode<Map> object_map = LoadMap(CAST(object));
TNode<Uint16T> instance_type = LoadMapInstanceType(object_map);
GotoIf(IsCustomElementsReceiverInstanceType(instance_type), &if_slow);
- TNode<FixedArrayBase> object_elements = LoadElements(object);
+ TNode<FixedArrayBase> object_elements = LoadElements(CAST(object));
GotoIf(IsEmptyFixedArray(object_elements), &if_empty_elements);
Branch(IsEmptySlowElementDictionary(object_elements), &if_empty_elements,
&if_slow);
@@ -600,14 +603,14 @@ TF_BUILTIN(ObjectGetOwnPropertyNames, ObjectBuiltinsAssembler) {
LoadObjectField(object_enum_cache, EnumCache::kKeysOffset);
// Allocate a JSArray and copy the elements from the {object_enum_keys}.
- Node* array = nullptr;
- Node* elements = nullptr;
TNode<NativeContext> native_context = LoadNativeContext(context);
TNode<Map> array_map =
LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
TNode<Smi> array_length = SmiTag(Signed(object_enum_length));
+ TNode<JSArray> array;
+ TNode<FixedArrayBase> elements;
std::tie(array, elements) = AllocateUninitializedJSArrayWithElements(
- PACKED_ELEMENTS, array_map, array_length, nullptr, object_enum_length,
+ PACKED_ELEMENTS, array_map, array_length, {}, object_enum_length,
INTPTR_PARAMETERS);
CopyFixedArrayElements(PACKED_ELEMENTS, object_enum_keys, elements,
object_enum_length, SKIP_WRITE_BARRIER);
@@ -619,16 +622,16 @@ TF_BUILTIN(ObjectGetOwnPropertyNames, ObjectBuiltinsAssembler) {
// Let the runtime compute the elements and try initializing enum cache.
TNode<FixedArray> elements = CAST(CallRuntime(
Runtime::kObjectGetOwnPropertyNamesTryFast, context, object));
- var_length.Bind(LoadObjectField(elements, FixedArray::kLengthOffset));
- var_elements.Bind(elements);
+ var_length = LoadObjectField<Smi>(elements, FixedArray::kLengthOffset);
+ var_elements = elements;
Goto(&if_join);
}
BIND(&if_empty);
{
// The {object} doesn't have any enumerable keys.
- var_length.Bind(SmiConstant(0));
- var_elements.Bind(EmptyFixedArrayConstant());
+ var_length = SmiConstant(0);
+ var_elements = EmptyFixedArrayConstant();
Goto(&if_join);
}
@@ -637,8 +640,8 @@ TF_BUILTIN(ObjectGetOwnPropertyNames, ObjectBuiltinsAssembler) {
// Let the runtime compute the elements.
TNode<FixedArray> elements =
CAST(CallRuntime(Runtime::kObjectGetOwnPropertyNames, context, object));
- var_length.Bind(LoadObjectField(elements, FixedArray::kLengthOffset));
- var_elements.Bind(elements);
+ var_length = LoadObjectField<Smi>(elements, FixedArray::kLengthOffset);
+ var_elements = elements;
Goto(&if_join);
}
@@ -648,8 +651,8 @@ TF_BUILTIN(ObjectGetOwnPropertyNames, ObjectBuiltinsAssembler) {
TNode<NativeContext> native_context = LoadNativeContext(context);
TNode<Map> array_map =
LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
- TNode<JSArray> array = AllocateJSArray(
- array_map, CAST(var_elements.value()), CAST(var_length.value()));
+ TNode<JSArray> array =
+ AllocateJSArray(array_map, var_elements.value(), var_length.value());
Return(array);
}
}
@@ -672,9 +675,9 @@ TF_BUILTIN(ObjectEntries, ObjectEntriesValuesBuiltinsAssembler) {
// ES #sec-object.prototype.isprototypeof
TF_BUILTIN(ObjectPrototypeIsPrototypeOf, ObjectBuiltinsAssembler) {
- Node* receiver = Parameter(Descriptor::kReceiver);
- Node* value = Parameter(Descriptor::kValue);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Label if_receiverisnullorundefined(this, Label::kDeferred),
if_valueisnotreceiver(this, Label::kDeferred);
@@ -685,31 +688,35 @@ TF_BUILTIN(ObjectPrototypeIsPrototypeOf, ObjectBuiltinsAssembler) {
// immediately aborts and returns false anyways.
GotoIf(TaggedIsSmi(value), &if_valueisnotreceiver);
- // Check if {receiver} is either null or undefined and in that case,
- // invoke the ToObject builtin, which raises the appropriate error.
- // Otherwise we don't need to invoke ToObject, since {receiver} is
- // either already a JSReceiver, in which case ToObject is a no-op,
- // or it's a Primitive and ToObject would allocate a fresh JSPrimitiveWrapper
- // wrapper, which wouldn't be identical to any existing JSReceiver
- // found in the prototype chain of {value}, hence it will return
- // false no matter if we search for the Primitive {receiver} or
- // a newly allocated JSPrimitiveWrapper wrapper for {receiver}.
- GotoIf(IsNull(receiver), &if_receiverisnullorundefined);
- GotoIf(IsUndefined(receiver), &if_receiverisnullorundefined);
-
- // Loop through the prototype chain looking for the {receiver}.
- Return(HasInPrototypeChain(context, value, receiver));
-
- BIND(&if_receiverisnullorundefined);
{
- // If {value} is a primitive HeapObject, we need to return
- // false instead of throwing an exception per order of the
- // steps in the specification, so check that first here.
- GotoIfNot(IsJSReceiver(value), &if_valueisnotreceiver);
-
- // Simulate the ToObject invocation on {receiver}.
- ToObject(context, receiver);
- Unreachable();
+ TNode<HeapObject> value_heap_object = CAST(value);
+
+ // Check if {receiver} is either null or undefined and in that case,
+ // invoke the ToObject builtin, which raises the appropriate error.
+ // Otherwise we don't need to invoke ToObject, since {receiver} is
+ // either already a JSReceiver, in which case ToObject is a no-op,
+ // or it's a Primitive and ToObject would allocate a fresh
+ // JSPrimitiveWrapper wrapper, which wouldn't be identical to any existing
+ // JSReceiver found in the prototype chain of {value}, hence it will return
+ // false no matter if we search for the Primitive {receiver} or
+ // a newly allocated JSPrimitiveWrapper wrapper for {receiver}.
+ GotoIf(IsNull(receiver), &if_receiverisnullorundefined);
+ GotoIf(IsUndefined(receiver), &if_receiverisnullorundefined);
+
+ // Loop through the prototype chain looking for the {receiver}.
+ Return(HasInPrototypeChain(context, value_heap_object, receiver));
+
+ BIND(&if_receiverisnullorundefined);
+ {
+ // If {value} is a primitive HeapObject, we need to return
+ // false instead of throwing an exception per order of the
+ // steps in the specification, so check that first here.
+ GotoIfNot(IsJSReceiver(value_heap_object), &if_valueisnotreceiver);
+
+ // Simulate the ToObject invocation on {receiver}.
+ ToObject(context, receiver);
+ Unreachable();
+ }
}
BIND(&if_valueisnotreceiver);
@@ -731,14 +738,18 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
if_regexp(this), if_string(this), if_symbol(this, Label::kDeferred),
if_value(this), if_bigint(this, Label::kDeferred);
- Node* receiver = Parameter(Descriptor::kReceiver);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+
+ TVARIABLE(String, var_default);
+ TVARIABLE(HeapObject, var_holder);
// This is arranged to check the likely cases first.
- VARIABLE(var_default, MachineRepresentation::kTagged);
- VARIABLE(var_holder, MachineRepresentation::kTagged, receiver);
GotoIf(TaggedIsSmi(receiver), &if_number);
- TNode<Map> receiver_map = LoadMap(receiver);
+
+ TNode<HeapObject> reciever_heap_object = CAST(receiver);
+ TNode<Map> receiver_map = LoadMap(reciever_heap_object);
+ var_holder = reciever_heap_object;
TNode<Uint16T> receiver_instance_type = LoadMapInstanceType(receiver_map);
GotoIf(IsPrimitiveInstanceType(receiver_instance_type), &if_primitive);
const struct {
@@ -747,8 +758,8 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
} kJumpTable[] = {{JS_OBJECT_TYPE, &if_object},
{JS_ARRAY_TYPE, &if_array},
{JS_FUNCTION_TYPE, &if_function},
- {JS_REGEXP_TYPE, &if_regexp},
- {JS_ARGUMENTS_TYPE, &if_arguments},
+ {JS_REG_EXP_TYPE, &if_regexp},
+ {JS_ARGUMENTS_OBJECT_TYPE, &if_arguments},
{JS_DATE_TYPE, &if_date},
{JS_BOUND_FUNCTION_TYPE, &if_function},
{JS_API_OBJECT_TYPE, &if_apiobject},
@@ -769,30 +780,31 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
BIND(&if_apiobject);
{
// Lookup the @@toStringTag property on the {receiver}.
- VARIABLE(var_tag, MachineRepresentation::kTagged,
- GetProperty(context, receiver,
- isolate()->factory()->to_string_tag_symbol()));
+ TVARIABLE(Object, var_tag,
+ GetProperty(context, receiver,
+ isolate()->factory()->to_string_tag_symbol()));
Label if_tagisnotstring(this), if_tagisstring(this);
GotoIf(TaggedIsSmi(var_tag.value()), &if_tagisnotstring);
- Branch(IsString(var_tag.value()), &if_tagisstring, &if_tagisnotstring);
+ Branch(IsString(CAST(var_tag.value())), &if_tagisstring,
+ &if_tagisnotstring);
BIND(&if_tagisnotstring);
{
- var_tag.Bind(CallRuntime(Runtime::kClassOf, context, receiver));
+ var_tag = CallRuntime(Runtime::kClassOf, context, receiver);
Goto(&if_tagisstring);
}
BIND(&if_tagisstring);
- ReturnToStringFormat(context, var_tag.value());
+ ReturnToStringFormat(context, CAST(var_tag.value()));
}
BIND(&if_arguments);
{
- var_default.Bind(ArgumentsToStringConstant());
+ var_default = ArgumentsToStringConstant();
Goto(&checkstringtag);
}
BIND(&if_array);
{
- var_default.Bind(ArrayToStringConstant());
+ var_default = ArrayToStringConstant();
Goto(&checkstringtag);
}
@@ -801,30 +813,30 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
TNode<NativeContext> native_context = LoadNativeContext(context);
TNode<JSFunction> boolean_constructor = CAST(
LoadContextElement(native_context, Context::BOOLEAN_FUNCTION_INDEX));
- TNode<Map> boolean_initial_map = CAST(LoadObjectField(
- boolean_constructor, JSFunction::kPrototypeOrInitialMapOffset));
- TNode<Object> boolean_prototype =
- LoadObjectField(boolean_initial_map, Map::kPrototypeOffset);
- var_default.Bind(BooleanToStringConstant());
- var_holder.Bind(boolean_prototype);
+ TNode<Map> boolean_initial_map = LoadObjectField<Map>(
+ boolean_constructor, JSFunction::kPrototypeOrInitialMapOffset);
+ TNode<HeapObject> boolean_prototype =
+ LoadObjectField<HeapObject>(boolean_initial_map, Map::kPrototypeOffset);
+ var_default = BooleanToStringConstant();
+ var_holder = boolean_prototype;
Goto(&checkstringtag);
}
BIND(&if_date);
{
- var_default.Bind(DateToStringConstant());
+ var_default = DateToStringConstant();
Goto(&checkstringtag);
}
BIND(&if_error);
{
- var_default.Bind(ErrorToStringConstant());
+ var_default = ErrorToStringConstant();
Goto(&checkstringtag);
}
BIND(&if_function);
{
- var_default.Bind(FunctionToStringConstant());
+ var_default = FunctionToStringConstant();
Goto(&checkstringtag);
}
@@ -833,19 +845,19 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
TNode<NativeContext> native_context = LoadNativeContext(context);
TNode<JSFunction> number_constructor = CAST(
LoadContextElement(native_context, Context::NUMBER_FUNCTION_INDEX));
- TNode<Map> number_initial_map = CAST(LoadObjectField(
- number_constructor, JSFunction::kPrototypeOrInitialMapOffset));
- TNode<Object> number_prototype =
- LoadObjectField(number_initial_map, Map::kPrototypeOffset);
- var_default.Bind(NumberToStringConstant());
- var_holder.Bind(number_prototype);
+ TNode<Map> number_initial_map = LoadObjectField<Map>(
+ number_constructor, JSFunction::kPrototypeOrInitialMapOffset);
+ TNode<HeapObject> number_prototype =
+ LoadObjectField<HeapObject>(number_initial_map, Map::kPrototypeOffset);
+ var_default = NumberToStringConstant();
+ var_holder = number_prototype;
Goto(&checkstringtag);
}
BIND(&if_object);
{
- CSA_ASSERT(this, IsJSReceiver(receiver));
- var_default.Bind(ObjectToStringConstant());
+ CSA_ASSERT(this, IsJSReceiver(CAST(receiver)));
+ var_default = ObjectToStringConstant();
Goto(&checkstringtag);
}
@@ -885,24 +897,25 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
});
// Lookup the @@toStringTag property on the {receiver}.
- VARIABLE(var_tag, MachineRepresentation::kTagged,
- GetProperty(context, receiver,
- isolate()->factory()->to_string_tag_symbol()));
+ TVARIABLE(Object, var_tag,
+ GetProperty(context, receiver,
+ isolate()->factory()->to_string_tag_symbol()));
Label if_tagisnotstring(this), if_tagisstring(this);
GotoIf(TaggedIsSmi(var_tag.value()), &if_tagisnotstring);
- Branch(IsString(var_tag.value()), &if_tagisstring, &if_tagisnotstring);
+ Branch(IsString(CAST(var_tag.value())), &if_tagisstring,
+ &if_tagisnotstring);
BIND(&if_tagisnotstring);
{
- var_tag.Bind(builtin_tag);
+ var_tag = builtin_tag;
Goto(&if_tagisstring);
}
BIND(&if_tagisstring);
- ReturnToStringFormat(context, var_tag.value());
+ ReturnToStringFormat(context, CAST(var_tag.value()));
}
BIND(&if_regexp);
{
- var_default.Bind(RegexpToStringConstant());
+ var_default = RegexpToStringConstant();
Goto(&checkstringtag);
}
@@ -911,12 +924,12 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
TNode<NativeContext> native_context = LoadNativeContext(context);
TNode<JSFunction> string_constructor = CAST(
LoadContextElement(native_context, Context::STRING_FUNCTION_INDEX));
- TNode<Map> string_initial_map = CAST(LoadObjectField(
- string_constructor, JSFunction::kPrototypeOrInitialMapOffset));
- TNode<Object> string_prototype =
- LoadObjectField(string_initial_map, Map::kPrototypeOffset);
- var_default.Bind(StringToStringConstant());
- var_holder.Bind(string_prototype);
+ TNode<Map> string_initial_map = LoadObjectField<Map>(
+ string_constructor, JSFunction::kPrototypeOrInitialMapOffset);
+ TNode<HeapObject> string_prototype =
+ LoadObjectField<HeapObject>(string_initial_map, Map::kPrototypeOffset);
+ var_default = StringToStringConstant();
+ var_holder = string_prototype;
Goto(&checkstringtag);
}
@@ -925,12 +938,12 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
TNode<NativeContext> native_context = LoadNativeContext(context);
TNode<JSFunction> symbol_constructor = CAST(
LoadContextElement(native_context, Context::SYMBOL_FUNCTION_INDEX));
- TNode<Map> symbol_initial_map = CAST(LoadObjectField(
- symbol_constructor, JSFunction::kPrototypeOrInitialMapOffset));
- TNode<Object> symbol_prototype =
- LoadObjectField(symbol_initial_map, Map::kPrototypeOffset);
- var_default.Bind(ObjectToStringConstant());
- var_holder.Bind(symbol_prototype);
+ TNode<Map> symbol_initial_map = LoadObjectField<Map>(
+ symbol_constructor, JSFunction::kPrototypeOrInitialMapOffset);
+ TNode<HeapObject> symbol_prototype =
+ LoadObjectField<HeapObject>(symbol_initial_map, Map::kPrototypeOffset);
+ var_default = ObjectToStringConstant();
+ var_holder = symbol_prototype;
Goto(&checkstringtag);
}
@@ -939,12 +952,12 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
TNode<NativeContext> native_context = LoadNativeContext(context);
TNode<JSFunction> bigint_constructor = CAST(
LoadContextElement(native_context, Context::BIGINT_FUNCTION_INDEX));
- TNode<Map> bigint_initial_map = CAST(LoadObjectField(
- bigint_constructor, JSFunction::kPrototypeOrInitialMapOffset));
- TNode<Object> bigint_prototype =
- LoadObjectField(bigint_initial_map, Map::kPrototypeOffset);
- var_default.Bind(ObjectToStringConstant());
- var_holder.Bind(bigint_prototype);
+ TNode<Map> bigint_initial_map = LoadObjectField<Map>(
+ bigint_constructor, JSFunction::kPrototypeOrInitialMapOffset);
+ TNode<HeapObject> bigint_prototype =
+ LoadObjectField<HeapObject>(bigint_initial_map, Map::kPrototypeOffset);
+ var_default = ObjectToStringConstant();
+ var_holder = bigint_prototype;
Goto(&checkstringtag);
}
@@ -956,12 +969,13 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
if_value_is_bigint(this, Label::kDeferred),
if_value_is_string(this, Label::kDeferred);
- Node* receiver_value = LoadJSPrimitiveWrapperValue(receiver);
+ TNode<Object> receiver_value =
+ LoadJSPrimitiveWrapperValue(CAST(reciever_heap_object));
// We need to start with the object to see if the value was a subclass
// which might have interesting properties.
- var_holder.Bind(receiver);
+ var_holder = reciever_heap_object;
GotoIf(TaggedIsSmi(receiver_value), &if_value_is_number);
- TNode<Map> receiver_value_map = LoadMap(receiver_value);
+ TNode<Map> receiver_value_map = LoadMap(CAST(receiver_value));
GotoIf(IsHeapNumberMap(receiver_value_map), &if_value_is_number);
GotoIf(IsBooleanMap(receiver_value_map), &if_value_is_boolean);
GotoIf(IsSymbolMap(receiver_value_map), &if_value_is_symbol);
@@ -974,31 +988,31 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
BIND(&if_value_is_number);
{
- var_default.Bind(NumberToStringConstant());
+ var_default = NumberToStringConstant();
Goto(&checkstringtag);
}
BIND(&if_value_is_boolean);
{
- var_default.Bind(BooleanToStringConstant());
+ var_default = BooleanToStringConstant();
Goto(&checkstringtag);
}
BIND(&if_value_is_string);
{
- var_default.Bind(StringToStringConstant());
+ var_default = StringToStringConstant();
Goto(&checkstringtag);
}
BIND(&if_value_is_bigint);
{
- var_default.Bind(ObjectToStringConstant());
+ var_default = ObjectToStringConstant();
Goto(&checkstringtag);
}
BIND(&if_value_is_symbol);
{
- var_default.Bind(ObjectToStringConstant());
+ var_default = ObjectToStringConstant();
Goto(&checkstringtag);
}
}
@@ -1013,13 +1027,13 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
Goto(&loop);
BIND(&loop);
{
- Node* holder = var_holder.value();
+ TNode<HeapObject> holder = var_holder.value();
GotoIf(IsNull(holder), &return_default);
TNode<Map> holder_map = LoadMap(holder);
TNode<Uint32T> holder_bit_field3 = LoadMapBitField3(holder_map);
GotoIf(IsSetWord32<Map::MayHaveInterestingSymbolsBit>(holder_bit_field3),
&return_generic);
- var_holder.Bind(LoadMapPrototype(holder_map));
+ var_holder = LoadMapPrototype(holder_map);
Goto(&loop);
}
@@ -1029,7 +1043,7 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
ToStringTagSymbolConstant());
GotoIf(TaggedIsSmi(tag), &return_default);
GotoIfNot(IsString(CAST(tag)), &return_default);
- ReturnToStringFormat(context, tag);
+ ReturnToStringFormat(context, CAST(tag));
}
BIND(&return_default);
@@ -1058,28 +1072,28 @@ TF_BUILTIN(CreateObjectWithoutProperties, ObjectBuiltinsAssembler) {
BranchIfJSReceiver(prototype, &prototype_jsreceiver, &call_runtime);
}
- VARIABLE(map, MachineRepresentation::kTagged);
- VARIABLE(properties, MachineRepresentation::kTagged);
+ TVARIABLE(Map, map);
+ TVARIABLE(HeapObject, properties);
Label instantiate_map(this);
BIND(&prototype_null);
{
Comment("Prototype is null");
- map.Bind(LoadContextElement(native_context,
- Context::SLOW_OBJECT_WITH_NULL_PROTOTYPE_MAP));
- properties.Bind(AllocateNameDictionary(NameDictionary::kInitialCapacity));
+ map = CAST(LoadContextElement(
+ native_context, Context::SLOW_OBJECT_WITH_NULL_PROTOTYPE_MAP));
+ properties = AllocateNameDictionary(NameDictionary::kInitialCapacity);
Goto(&instantiate_map);
}
BIND(&prototype_jsreceiver);
{
Comment("Prototype is JSReceiver");
- properties.Bind(EmptyFixedArrayConstant());
+ properties = EmptyFixedArrayConstant();
TNode<HeapObject> object_function = CAST(
LoadContextElement(native_context, Context::OBJECT_FUNCTION_INDEX));
- TNode<Object> object_function_map = LoadObjectField(
+ TNode<Map> object_function_map = LoadObjectField<Map>(
object_function, JSFunction::kPrototypeOrInitialMapOffset);
- map.Bind(object_function_map);
+ map = object_function_map;
GotoIf(TaggedEqual(prototype, LoadMapPrototype(map.value())),
&instantiate_map);
Comment("Try loading the prototype info");
@@ -1087,8 +1101,8 @@ TF_BUILTIN(CreateObjectWithoutProperties, ObjectBuiltinsAssembler) {
LoadMapPrototypeInfo(LoadMap(CAST(prototype)), &call_runtime);
TNode<MaybeObject> maybe_map = LoadMaybeWeakObjectField(
prototype_info, PrototypeInfo::kObjectCreateMapOffset);
- GotoIf(IsStrongReferenceTo(maybe_map, UndefinedConstant()), &call_runtime);
- map.Bind(GetHeapObjectAssumeWeak(maybe_map, &call_runtime));
+ GotoIf(TaggedEqual(maybe_map, UndefinedConstant()), &call_runtime);
+ map = CAST(GetHeapObjectAssumeWeak(maybe_map, &call_runtime));
Goto(&instantiate_map);
}
@@ -1153,28 +1167,28 @@ TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) {
// Create a new object with the given prototype.
BIND(&no_properties);
{
- VARIABLE(map, MachineRepresentation::kTagged);
- VARIABLE(properties, MachineRepresentation::kTagged);
+ TVARIABLE(Map, map);
+ TVARIABLE(HeapObject, properties);
Label non_null_proto(this), instantiate_map(this), good(this);
Branch(IsNull(prototype), &good, &non_null_proto);
BIND(&good);
{
- map.Bind(LoadContextElement(
+ map = CAST(LoadContextElement(
context, Context::SLOW_OBJECT_WITH_NULL_PROTOTYPE_MAP));
- properties.Bind(AllocateNameDictionary(NameDictionary::kInitialCapacity));
+ properties = AllocateNameDictionary(NameDictionary::kInitialCapacity);
Goto(&instantiate_map);
}
BIND(&non_null_proto);
{
- properties.Bind(EmptyFixedArrayConstant());
+ properties = EmptyFixedArrayConstant();
TNode<HeapObject> object_function =
CAST(LoadContextElement(context, Context::OBJECT_FUNCTION_INDEX));
- TNode<Object> object_function_map = LoadObjectField(
+ TNode<Map> object_function_map = LoadObjectField<Map>(
object_function, JSFunction::kPrototypeOrInitialMapOffset);
- map.Bind(object_function_map);
+ map = object_function_map;
GotoIf(TaggedEqual(prototype, LoadMapPrototype(map.value())),
&instantiate_map);
// Try loading the prototype info.
@@ -1183,9 +1197,8 @@ TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) {
Comment("Load ObjectCreateMap from PrototypeInfo");
TNode<MaybeObject> maybe_map = LoadMaybeWeakObjectField(
prototype_info, PrototypeInfo::kObjectCreateMapOffset);
- GotoIf(IsStrongReferenceTo(maybe_map, UndefinedConstant()),
- &call_runtime);
- map.Bind(GetHeapObjectAssumeWeak(maybe_map, &call_runtime));
+ GotoIf(TaggedEqual(maybe_map, UndefinedConstant()), &call_runtime);
+ map = CAST(GetHeapObjectAssumeWeak(maybe_map, &call_runtime));
Goto(&instantiate_map);
}
@@ -1207,8 +1220,8 @@ TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) {
// ES #sec-object.is
TF_BUILTIN(ObjectIs, ObjectBuiltinsAssembler) {
- Node* const left = Parameter(Descriptor::kLeft);
- Node* const right = Parameter(Descriptor::kRight);
+ TNode<Object> const left = CAST(Parameter(Descriptor::kLeft));
+ TNode<Object> const right = CAST(Parameter(Descriptor::kRight));
Label return_true(this), return_false(this);
BranchIfSameValue(left, right, &return_true, &return_false);
@@ -1221,9 +1234,9 @@ TF_BUILTIN(ObjectIs, ObjectBuiltinsAssembler) {
}
TF_BUILTIN(CreateIterResultObject, ObjectBuiltinsAssembler) {
- Node* const value = Parameter(Descriptor::kValue);
- Node* const done = Parameter(Descriptor::kDone);
- Node* const context = Parameter(Descriptor::kContext);
+ TNode<Object> const value = CAST(Parameter(Descriptor::kValue));
+ TNode<Oddball> const done = CAST(Parameter(Descriptor::kDone));
+ TNode<Context> const context = CAST(Parameter(Descriptor::kContext));
TNode<NativeContext> const native_context = LoadNativeContext(context);
TNode<Map> const map = CAST(
@@ -1238,53 +1251,53 @@ TF_BUILTIN(CreateIterResultObject, ObjectBuiltinsAssembler) {
}
TF_BUILTIN(HasProperty, ObjectBuiltinsAssembler) {
- Node* key = Parameter(Descriptor::kKey);
- Node* object = Parameter(Descriptor::kObject);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Object> key = CAST(Parameter(Descriptor::kKey));
+ TNode<Object> object = CAST(Parameter(Descriptor::kObject));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Return(HasProperty(context, object, key, kHasProperty));
}
TF_BUILTIN(InstanceOf, ObjectBuiltinsAssembler) {
- Node* object = Parameter(Descriptor::kLeft);
- Node* callable = Parameter(Descriptor::kRight);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Object> object = CAST(Parameter(Descriptor::kLeft));
+ TNode<Object> callable = CAST(Parameter(Descriptor::kRight));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Return(InstanceOf(object, callable, context));
}
// ES6 section 7.3.19 OrdinaryHasInstance ( C, O )
TF_BUILTIN(OrdinaryHasInstance, ObjectBuiltinsAssembler) {
- Node* constructor = Parameter(Descriptor::kLeft);
- Node* object = Parameter(Descriptor::kRight);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Object> constructor = CAST(Parameter(Descriptor::kLeft));
+ TNode<Object> object = CAST(Parameter(Descriptor::kRight));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Return(OrdinaryHasInstance(context, constructor, object));
}
TF_BUILTIN(GetSuperConstructor, ObjectBuiltinsAssembler) {
- Node* object = Parameter(Descriptor::kObject);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<JSFunction> object = CAST(Parameter(Descriptor::kObject));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Return(GetSuperConstructor(context, object));
}
TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) {
- Node* closure = Parameter(Descriptor::kClosure);
- Node* receiver = Parameter(Descriptor::kReceiver);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<JSFunction> closure = CAST(Parameter(Descriptor::kClosure));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
// Get the initial map from the function, jumping to the runtime if we don't
// have one.
Label done(this), runtime(this);
GotoIfNot(IsFunctionWithPrototypeSlotMap(LoadMap(closure)), &runtime);
- TNode<HeapObject> maybe_map =
- CAST(LoadObjectField(closure, JSFunction::kPrototypeOrInitialMapOffset));
+ TNode<HeapObject> maybe_map = LoadObjectField<HeapObject>(
+ closure, JSFunction::kPrototypeOrInitialMapOffset);
GotoIf(DoesntHaveInstanceType(maybe_map, MAP_TYPE), &runtime);
TNode<Map> map = CAST(maybe_map);
- TNode<SharedFunctionInfo> shared =
- CAST(LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset));
+ TNode<SharedFunctionInfo> shared = LoadObjectField<SharedFunctionInfo>(
+ closure, JSFunction::kSharedFunctionInfoOffset);
TNode<BytecodeArray> bytecode_array =
LoadSharedFunctionInfoBytecodeArray(shared);
@@ -1293,7 +1306,7 @@ TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) {
MachineType::Uint16()));
TNode<IntPtrT> frame_size = ChangeInt32ToIntPtr(LoadObjectField(
bytecode_array, BytecodeArray::kFrameSizeOffset, MachineType::Int32()));
- TNode<WordT> size =
+ TNode<IntPtrT> size =
IntPtrAdd(WordSar(frame_size, IntPtrConstant(kTaggedSizeLog2)),
formal_parameter_count);
TNode<FixedArrayBase> parameters_and_registers =
@@ -1337,16 +1350,17 @@ TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) {
// ES6 section 19.1.2.7 Object.getOwnPropertyDescriptor ( O, P )
TF_BUILTIN(ObjectGetOwnPropertyDescriptor, ObjectBuiltinsAssembler) {
- Node* argc = Parameter(Descriptor::kJSActualArgumentsCount);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Int32T> argc =
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
CSA_ASSERT(this, IsUndefined(Parameter(Descriptor::kJSNewTarget)));
- CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
+ CodeStubArguments args(this, argc);
TNode<Object> object_input = args.GetOptionalArgumentValue(0);
TNode<Object> key = args.GetOptionalArgumentValue(1);
// 1. Let obj be ? ToObject(O).
- TNode<JSReceiver> object = ToObject_Inline(CAST(context), object_input);
+ TNode<JSReceiver> object = ToObject_Inline(context, object_input);
// 2. Let key be ? ToPropertyKey(P).
key = CallBuiltin(Builtins::kToName, context, key);
@@ -1359,9 +1373,8 @@ TF_BUILTIN(ObjectGetOwnPropertyDescriptor, ObjectBuiltinsAssembler) {
TNode<Uint16T> instance_type = LoadMapInstanceType(map);
GotoIf(IsSpecialReceiverInstanceType(instance_type), &call_runtime);
{
- VARIABLE(var_index, MachineType::PointerRepresentation(),
- IntPtrConstant(0));
- VARIABLE(var_name, MachineRepresentation::kTagged);
+ TVARIABLE(IntPtrT, var_index, IntPtrConstant(0));
+ TVARIABLE(Name, var_name);
TryToName(key, &if_keyisindex, &var_index, &if_iskeyunique, &var_name,
&call_runtime, &if_notunique_name);
@@ -1369,8 +1382,9 @@ TF_BUILTIN(ObjectGetOwnPropertyDescriptor, ObjectBuiltinsAssembler) {
BIND(&if_notunique_name);
{
Label not_in_string_table(this);
- TryInternalizeString(key, &if_keyisindex, &var_index, &if_iskeyunique,
- &var_name, &not_in_string_table, &call_runtime);
+ TryInternalizeString(CAST(key), &if_keyisindex, &var_index,
+ &if_iskeyunique, &var_name, &not_in_string_table,
+ &call_runtime);
BIND(&not_in_string_table);
{
@@ -1384,9 +1398,9 @@ TF_BUILTIN(ObjectGetOwnPropertyDescriptor, ObjectBuiltinsAssembler) {
{
Label if_found_value(this), return_empty(this), if_not_found(this);
- VARIABLE(var_value, MachineRepresentation::kTagged);
- VARIABLE(var_details, MachineRepresentation::kWord32);
- VARIABLE(var_raw_value, MachineRepresentation::kTagged);
+ TVARIABLE(Object, var_value);
+ TVARIABLE(Word32T, var_details);
+ TVARIABLE(Object, var_raw_value);
TryGetOwnProperty(context, object, object, map, instance_type,
var_name.value(), &if_found_value, &var_value,
@@ -1394,13 +1408,13 @@ TF_BUILTIN(ObjectGetOwnPropertyDescriptor, ObjectBuiltinsAssembler) {
&if_not_found, kReturnAccessorPair);
BIND(&if_found_value);
- // 4. Return FromPropertyDescriptor(desc).
- Node* js_desc = FromPropertyDetails(context, var_value.value(),
- var_details.value(), &call_runtime);
+ // 4. Return FromPropertyDetails(desc).
+ TNode<JSObject> js_desc = FromPropertyDetails(
+ context, var_value.value(), var_details.value(), &call_runtime);
args.PopAndReturn(js_desc);
BIND(&return_empty);
- var_value.Bind(UndefinedConstant());
+ var_value = UndefinedConstant();
args.PopAndReturn(UndefinedConstant());
BIND(&if_not_found);
@@ -1421,7 +1435,7 @@ TF_BUILTIN(ObjectGetOwnPropertyDescriptor, ObjectBuiltinsAssembler) {
TNode<FixedArray> desc_array = CAST(desc);
// 4. Return FromPropertyDescriptor(desc).
- Node* js_desc = FromPropertyDescriptor(context, desc_array);
+ TNode<JSObject> js_desc = FromPropertyDescriptor(context, desc_array);
args.PopAndReturn(js_desc);
}
BIND(&return_undefined);
@@ -1440,14 +1454,14 @@ void ObjectBuiltinsAssembler::AddToDictionaryIf(
BIND(&done);
}
-Node* ObjectBuiltinsAssembler::FromPropertyDescriptor(Node* context,
- Node* desc) {
- VARIABLE(js_descriptor, MachineRepresentation::kTagged);
+TNode<JSObject> ObjectBuiltinsAssembler::FromPropertyDescriptor(
+ TNode<Context> context, TNode<FixedArray> desc) {
+ TVARIABLE(JSObject, js_descriptor);
TNode<Int32T> flags = LoadAndUntagToWord32ObjectField(
desc, PropertyDescriptorObject::kFlagsOffset);
- TNode<Word32T> has_flags =
+ TNode<Int32T> has_flags =
Word32And(flags, Int32Constant(PropertyDescriptorObject::kHasMask));
Label if_accessor_desc(this), if_data_desc(this), if_generic_desc(this),
@@ -1465,21 +1479,21 @@ Node* ObjectBuiltinsAssembler::FromPropertyDescriptor(Node* context,
BIND(&if_accessor_desc);
{
- js_descriptor.Bind(ConstructAccessorDescriptor(
+ js_descriptor = ConstructAccessorDescriptor(
context, LoadObjectField(desc, PropertyDescriptorObject::kGetOffset),
LoadObjectField(desc, PropertyDescriptorObject::kSetOffset),
IsSetWord32<PropertyDescriptorObject::IsEnumerableBit>(flags),
- IsSetWord32<PropertyDescriptorObject::IsConfigurableBit>(flags)));
+ IsSetWord32<PropertyDescriptorObject::IsConfigurableBit>(flags));
Goto(&return_desc);
}
BIND(&if_data_desc);
{
- js_descriptor.Bind(ConstructDataDescriptor(
+ js_descriptor = ConstructDataDescriptor(
context, LoadObjectField(desc, PropertyDescriptorObject::kValueOffset),
IsSetWord32<PropertyDescriptorObject::IsWritableBit>(flags),
IsSetWord32<PropertyDescriptorObject::IsEnumerableBit>(flags),
- IsSetWord32<PropertyDescriptorObject::IsConfigurableBit>(flags)));
+ IsSetWord32<PropertyDescriptorObject::IsConfigurableBit>(flags));
Goto(&return_desc);
}
@@ -1529,7 +1543,7 @@ Node* ObjectBuiltinsAssembler::FromPropertyDescriptor(Node* context,
IsSetWord32<PropertyDescriptorObject::IsConfigurableBit>(flags)),
&bailout);
- js_descriptor.Bind(js_desc);
+ js_descriptor = js_desc;
Goto(&return_desc);
BIND(&bailout);
@@ -1541,36 +1555,36 @@ Node* ObjectBuiltinsAssembler::FromPropertyDescriptor(Node* context,
return js_descriptor.value();
}
-Node* ObjectBuiltinsAssembler::FromPropertyDetails(Node* context,
- Node* raw_value,
- Node* details,
- Label* if_bailout) {
- VARIABLE(js_descriptor, MachineRepresentation::kTagged);
+TNode<JSObject> ObjectBuiltinsAssembler::FromPropertyDetails(
+ TNode<Context> context, TNode<Object> raw_value, TNode<Word32T> details,
+ Label* if_bailout) {
+ TVARIABLE(JSObject, js_descriptor);
Label if_accessor_desc(this), if_data_desc(this), return_desc(this);
BranchIfAccessorPair(raw_value, &if_accessor_desc, &if_data_desc);
BIND(&if_accessor_desc);
{
- TNode<Object> getter =
- LoadObjectField(raw_value, AccessorPair::kGetterOffset);
- TNode<Object> setter =
- LoadObjectField(raw_value, AccessorPair::kSetterOffset);
- js_descriptor.Bind(ConstructAccessorDescriptor(
+ TNode<AccessorPair> accessor_pair_value = CAST(raw_value);
+ TNode<HeapObject> getter = LoadObjectField<HeapObject>(
+ accessor_pair_value, AccessorPair::kGetterOffset);
+ TNode<HeapObject> setter = LoadObjectField<HeapObject>(
+ accessor_pair_value, AccessorPair::kSetterOffset);
+ js_descriptor = ConstructAccessorDescriptor(
context, GetAccessorOrUndefined(getter, if_bailout),
GetAccessorOrUndefined(setter, if_bailout),
IsNotSetWord32(details, PropertyDetails::kAttributesDontEnumMask),
- IsNotSetWord32(details, PropertyDetails::kAttributesDontDeleteMask)));
+ IsNotSetWord32(details, PropertyDetails::kAttributesDontDeleteMask));
Goto(&return_desc);
}
BIND(&if_data_desc);
{
- js_descriptor.Bind(ConstructDataDescriptor(
+ js_descriptor = ConstructDataDescriptor(
context, raw_value,
IsNotSetWord32(details, PropertyDetails::kAttributesReadOnlyMask),
IsNotSetWord32(details, PropertyDetails::kAttributesDontEnumMask),
- IsNotSetWord32(details, PropertyDetails::kAttributesDontDeleteMask)));
+ IsNotSetWord32(details, PropertyDetails::kAttributesDontDeleteMask));
Goto(&return_desc);
}
@@ -1578,20 +1592,20 @@ Node* ObjectBuiltinsAssembler::FromPropertyDetails(Node* context,
return js_descriptor.value();
}
-Node* ObjectBuiltinsAssembler::GetAccessorOrUndefined(Node* accessor,
- Label* if_bailout) {
+TNode<HeapObject> ObjectBuiltinsAssembler::GetAccessorOrUndefined(
+ TNode<HeapObject> accessor, Label* if_bailout) {
Label bind_undefined(this, Label::kDeferred), return_result(this);
- VARIABLE(result, MachineRepresentation::kTagged);
+ TVARIABLE(HeapObject, result);
GotoIf(IsNull(accessor), &bind_undefined);
- result.Bind(accessor);
+ result = accessor;
TNode<Map> map = LoadMap(accessor);
// TODO(ishell): probe template instantiations cache.
GotoIf(IsFunctionTemplateInfoMap(map), if_bailout);
Goto(&return_result);
BIND(&bind_undefined);
- result.Bind(UndefinedConstant());
+ result = UndefinedConstant();
Goto(&return_result);
BIND(&return_result);
diff --git a/deps/v8/src/builtins/builtins-promise-gen.cc b/deps/v8/src/builtins/builtins-promise-gen.cc
index a1da55e0d9..b20b288c3d 100644
--- a/deps/v8/src/builtins/builtins-promise-gen.cc
+++ b/deps/v8/src/builtins/builtins-promise-gen.cc
@@ -21,11 +21,10 @@ namespace v8 {
namespace internal {
using Node = compiler::Node;
-template <class T>
-using TNode = CodeStubAssembler::TNode<T>;
using IteratorRecord = TorqueStructIteratorRecord;
-Node* PromiseBuiltinsAssembler::AllocateJSPromise(Node* context) {
+TNode<JSPromise> PromiseBuiltinsAssembler::AllocateJSPromise(
+ TNode<Context> context) {
TNode<NativeContext> const native_context = LoadNativeContext(context);
TNode<JSFunction> const promise_fun =
CAST(LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX));
@@ -39,7 +38,7 @@ Node* PromiseBuiltinsAssembler::AllocateJSPromise(Node* context) {
RootIndex::kEmptyFixedArray);
StoreObjectFieldRoot(promise, JSPromise::kElementsOffset,
RootIndex::kEmptyFixedArray);
- return promise;
+ return CAST(promise);
}
void PromiseBuiltinsAssembler::PromiseInit(Node* promise) {
@@ -54,13 +53,14 @@ void PromiseBuiltinsAssembler::PromiseInit(Node* promise) {
}
}
-Node* PromiseBuiltinsAssembler::AllocateAndInitJSPromise(Node* context) {
+TNode<JSPromise> PromiseBuiltinsAssembler::AllocateAndInitJSPromise(
+ TNode<Context> context) {
return AllocateAndInitJSPromise(context, UndefinedConstant());
}
-Node* PromiseBuiltinsAssembler::AllocateAndInitJSPromise(Node* context,
- Node* parent) {
- Node* const instance = AllocateJSPromise(context);
+TNode<JSPromise> PromiseBuiltinsAssembler::AllocateAndInitJSPromise(
+ TNode<Context> context, TNode<Object> parent) {
+ const TNode<JSPromise> instance = AllocateJSPromise(context);
PromiseInit(instance);
Label out(this);
@@ -72,11 +72,12 @@ Node* PromiseBuiltinsAssembler::AllocateAndInitJSPromise(Node* context,
return instance;
}
-Node* PromiseBuiltinsAssembler::AllocateAndSetJSPromise(
- Node* context, v8::Promise::PromiseState status, Node* result) {
+TNode<JSPromise> PromiseBuiltinsAssembler::AllocateAndSetJSPromise(
+ TNode<Context> context, v8::Promise::PromiseState status,
+ TNode<Object> result) {
DCHECK_NE(Promise::kPending, status);
- Node* const instance = AllocateJSPromise(context);
+ const TNode<JSPromise> instance = AllocateJSPromise(context);
StoreObjectFieldNoWriteBarrier(instance, JSPromise::kReactionsOrResultOffset,
result);
STATIC_ASSERT(JSPromise::kStatusShift == 0);
@@ -97,22 +98,23 @@ Node* PromiseBuiltinsAssembler::AllocateAndSetJSPromise(
return instance;
}
-std::pair<Node*, Node*>
+std::pair<TNode<JSFunction>, TNode<JSFunction>>
PromiseBuiltinsAssembler::CreatePromiseResolvingFunctions(
- Node* promise, Node* debug_event, Node* native_context) {
- Node* const promise_context = CreatePromiseResolvingFunctionsContext(
+ TNode<JSPromise> promise, TNode<Object> debug_event,
+ TNode<NativeContext> native_context) {
+ const TNode<Context> promise_context = CreatePromiseResolvingFunctionsContext(
promise, debug_event, native_context);
- TNode<Object> const map = LoadContextElement(
- native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
- TNode<Object> const resolve_info = LoadContextElement(
+ const TNode<Map> map = CAST(LoadContextElement(
+ native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX));
+ const TNode<SharedFunctionInfo> resolve_info = CAST(LoadContextElement(
native_context,
- Context::PROMISE_CAPABILITY_DEFAULT_RESOLVE_SHARED_FUN_INDEX);
- Node* const resolve =
+ Context::PROMISE_CAPABILITY_DEFAULT_RESOLVE_SHARED_FUN_INDEX));
+ const TNode<JSFunction> resolve =
AllocateFunctionWithMapAndContext(map, resolve_info, promise_context);
- TNode<Object> const reject_info = LoadContextElement(
+ const TNode<SharedFunctionInfo> reject_info = CAST(LoadContextElement(
native_context,
- Context::PROMISE_CAPABILITY_DEFAULT_REJECT_SHARED_FUN_INDEX);
- Node* const reject =
+ Context::PROMISE_CAPABILITY_DEFAULT_REJECT_SHARED_FUN_INDEX));
+ const TNode<JSFunction> reject =
AllocateFunctionWithMapAndContext(map, reject_info, promise_context);
return std::make_pair(resolve, reject);
}
@@ -196,7 +198,7 @@ TF_BUILTIN(NewPromiseCapability, PromiseBuiltinsAssembler) {
BIND(&if_fast_promise_capability);
{
- Node* promise =
+ TNode<JSPromise> promise =
AllocateAndInitJSPromise(native_context, UndefinedConstant());
Node* resolve = nullptr;
@@ -226,14 +228,15 @@ TF_BUILTIN(NewPromiseCapability, PromiseBuiltinsAssembler) {
StoreObjectFieldRoot(capability, PromiseCapability::kRejectOffset,
RootIndex::kUndefinedValue);
- Node* executor_context =
- CreatePromiseGetCapabilitiesExecutorContext(capability, native_context);
- TNode<Object> executor_info = LoadContextElement(
- native_context, Context::PROMISE_GET_CAPABILITIES_EXECUTOR_SHARED_FUN);
- TNode<Object> function_map = LoadContextElement(
- native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
- TNode<JSFunction> executor = CAST(AllocateFunctionWithMapAndContext(
- function_map, executor_info, executor_context));
+ TNode<Context> executor_context =
+ CAST(CreatePromiseGetCapabilitiesExecutorContext(capability,
+ native_context));
+ TNode<SharedFunctionInfo> executor_info = CAST(LoadContextElement(
+ native_context, Context::PROMISE_GET_CAPABILITIES_EXECUTOR_SHARED_FUN));
+ TNode<Map> function_map = CAST(LoadContextElement(
+ native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX));
+ TNode<JSFunction> executor = AllocateFunctionWithMapAndContext(
+ function_map, executor_info, executor_context);
TNode<JSReceiver> promise =
Construct(native_context, CAST(constructor), executor);
@@ -258,14 +261,14 @@ TF_BUILTIN(NewPromiseCapability, PromiseBuiltinsAssembler) {
ThrowTypeError(context, MessageTemplate::kPromiseNonCallable);
}
-Node* PromiseBuiltinsAssembler::CreatePromiseContext(Node* native_context,
- int slots) {
+TNode<Context> PromiseBuiltinsAssembler::CreatePromiseContext(
+ TNode<NativeContext> native_context, int slots) {
DCHECK_GE(slots, Context::MIN_CONTEXT_SLOTS);
TNode<HeapObject> const context =
AllocateInNewSpace(FixedArray::SizeFor(slots));
InitializeFunctionContext(native_context, context, slots);
- return context;
+ return CAST(context);
}
Node* PromiseBuiltinsAssembler::CreatePromiseAllResolveElementContext(
@@ -278,8 +281,8 @@ Node* PromiseBuiltinsAssembler::CreatePromiseAllResolveElementContext(
TNode<JSArray> values_array = AllocateJSArray(
PACKED_ELEMENTS, array_map, IntPtrConstant(0), SmiConstant(0));
- Node* const context = CreatePromiseContext(
- native_context, PromiseBuiltins::kPromiseAllResolveElementLength);
+ TNode<Context> const context = CreatePromiseContext(
+ CAST(native_context), PromiseBuiltins::kPromiseAllResolveElementLength);
StoreContextElementNoWriteBarrier(
context, PromiseBuiltins::kPromiseAllResolveElementRemainingSlot,
SmiConstant(1));
@@ -301,12 +304,12 @@ PromiseBuiltinsAssembler::CreatePromiseAllResolveElementFunction(
index, SmiConstant(PropertyArray::HashField::kMax)));
CSA_ASSERT(this, IsNativeContext(native_context));
- TNode<Object> const map = LoadContextElement(
- native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
- TNode<Object> const resolve_info =
- LoadContextElement(native_context, slot_index);
+ const TNode<Map> map = CAST(LoadContextElement(
+ native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX));
+ const TNode<SharedFunctionInfo> resolve_info =
+ CAST(LoadContextElement(native_context, slot_index));
TNode<JSFunction> resolve =
- Cast(AllocateFunctionWithMapAndContext(map, resolve_info, context));
+ AllocateFunctionWithMapAndContext(map, resolve_info, CAST(context));
STATIC_ASSERT(PropertyArray::kNoHashSentinel == 0);
StoreObjectFieldNoWriteBarrier(resolve, JSFunction::kPropertiesOrHashOffset,
@@ -315,9 +318,10 @@ PromiseBuiltinsAssembler::CreatePromiseAllResolveElementFunction(
return resolve;
}
-Node* PromiseBuiltinsAssembler::CreatePromiseResolvingFunctionsContext(
- Node* promise, Node* debug_event, Node* native_context) {
- Node* const context = CreatePromiseContext(
+TNode<Context> PromiseBuiltinsAssembler::CreatePromiseResolvingFunctionsContext(
+ TNode<JSPromise> promise, TNode<Object> debug_event,
+ TNode<NativeContext> native_context) {
+ const TNode<Context> context = CreatePromiseContext(
native_context, PromiseBuiltins::kPromiseContextLength);
StoreContextElementNoWriteBarrier(context, PromiseBuiltins::kPromiseSlot,
promise);
@@ -331,7 +335,8 @@ Node* PromiseBuiltinsAssembler::CreatePromiseResolvingFunctionsContext(
Node* PromiseBuiltinsAssembler::CreatePromiseGetCapabilitiesExecutorContext(
Node* promise_capability, Node* native_context) {
int kContextLength = PromiseBuiltins::kCapabilitiesContextLength;
- Node* context = CreatePromiseContext(native_context, kContextLength);
+ TNode<Context> context =
+ CreatePromiseContext(CAST(native_context), kContextLength);
StoreContextElementNoWriteBarrier(context, PromiseBuiltins::kCapabilitySlot,
promise_capability);
return context;
@@ -386,14 +391,12 @@ void PromiseBuiltinsAssembler::PromiseSetHandledHint(Node* promise) {
// ES #sec-performpromisethen
void PromiseBuiltinsAssembler::PerformPromiseThen(
- Node* context, Node* promise, Node* on_fulfilled, Node* on_rejected,
- Node* result_promise_or_capability) {
- CSA_ASSERT(this, TaggedIsNotSmi(promise));
- CSA_ASSERT(this, IsJSPromise(promise));
+ TNode<Context> context, TNode<JSPromise> promise,
+ TNode<HeapObject> on_fulfilled, TNode<HeapObject> on_rejected,
+ TNode<HeapObject> result_promise_or_capability) {
CSA_ASSERT(this,
Word32Or(IsCallable(on_fulfilled), IsUndefined(on_fulfilled)));
CSA_ASSERT(this, Word32Or(IsCallable(on_rejected), IsUndefined(on_rejected)));
- CSA_ASSERT(this, TaggedIsNotSmi(result_promise_or_capability));
CSA_ASSERT(
this,
Word32Or(Word32Or(IsJSPromise(result_promise_or_capability),
@@ -411,9 +414,9 @@ void PromiseBuiltinsAssembler::PerformPromiseThen(
// PromiseReaction holding both the onFulfilled and onRejected callbacks.
// Once the {promise} is resolved we decide on the concrete handler to
// push onto the microtask queue.
- TNode<Object> const promise_reactions =
+ const TNode<Object> promise_reactions =
LoadObjectField(promise, JSPromise::kReactionsOrResultOffset);
- Node* const reaction =
+ const TNode<PromiseReaction> reaction =
AllocatePromiseReaction(promise_reactions, result_promise_or_capability,
on_fulfilled, on_rejected);
StoreObjectField(promise, JSPromise::kReactionsOrResultOffset, reaction);
@@ -422,10 +425,9 @@ void PromiseBuiltinsAssembler::PerformPromiseThen(
BIND(&if_notpending);
{
- VARIABLE(var_map, MachineRepresentation::kTagged);
- VARIABLE(var_handler, MachineRepresentation::kTagged);
- VARIABLE(var_handler_context, MachineRepresentation::kTagged,
- UndefinedConstant());
+ TVARIABLE(Map, var_map);
+ TVARIABLE(HeapObject, var_handler);
+ TVARIABLE(Object, var_handler_context, UndefinedConstant());
Label if_fulfilled(this), if_rejected(this, Label::kDeferred),
enqueue(this);
Branch(IsPromiseStatus(status, v8::Promise::kFulfilled), &if_fulfilled,
@@ -433,15 +435,15 @@ void PromiseBuiltinsAssembler::PerformPromiseThen(
BIND(&if_fulfilled);
{
- var_map.Bind(PromiseFulfillReactionJobTaskMapConstant());
- var_handler.Bind(on_fulfilled);
+ var_map = PromiseFulfillReactionJobTaskMapConstant();
+ var_handler = on_fulfilled;
Label use_fallback(this, Label::kDeferred), done(this);
ExtractHandlerContext(on_fulfilled, &var_handler_context);
Branch(IsUndefined(var_handler_context.value()), &use_fallback, &done);
BIND(&use_fallback);
- var_handler_context.Bind(context);
+ var_handler_context = context;
ExtractHandlerContext(on_rejected, &var_handler_context);
Goto(&done);
@@ -452,15 +454,15 @@ void PromiseBuiltinsAssembler::PerformPromiseThen(
BIND(&if_rejected);
{
CSA_ASSERT(this, IsPromiseStatus(status, v8::Promise::kRejected));
- var_map.Bind(PromiseRejectReactionJobTaskMapConstant());
- var_handler.Bind(on_rejected);
+ var_map = PromiseRejectReactionJobTaskMapConstant();
+ var_handler = on_rejected;
Label use_fallback(this, Label::kDeferred), done(this);
ExtractHandlerContext(on_rejected, &var_handler_context);
Branch(IsUndefined(var_handler_context.value()), &use_fallback, &done);
BIND(&use_fallback);
- var_handler_context.Bind(context);
+ var_handler_context = context;
ExtractHandlerContext(on_fulfilled, &var_handler_context);
Goto(&done);
BIND(&done);
@@ -474,8 +476,8 @@ void PromiseBuiltinsAssembler::PerformPromiseThen(
{
TNode<Object> argument =
LoadObjectField(promise, JSPromise::kReactionsOrResultOffset);
- Node* microtask = AllocatePromiseReactionJobTask(
- var_map.value(), var_handler_context.value(), argument,
+ TNode<PromiseReactionJobTask> microtask = AllocatePromiseReactionJobTask(
+ var_map.value(), CAST(var_handler_context.value()), argument,
var_handler.value(), result_promise_or_capability);
CallBuiltin(Builtins::kEnqueueMicrotask, var_handler_context.value(),
microtask);
@@ -489,13 +491,15 @@ void PromiseBuiltinsAssembler::PerformPromiseThen(
// ES #sec-performpromisethen
TF_BUILTIN(PerformPromiseThen, PromiseBuiltinsAssembler) {
- Node* const context = Parameter(Descriptor::kContext);
- Node* const promise = Parameter(Descriptor::kPromise);
- Node* const on_fulfilled = Parameter(Descriptor::kOnFulfilled);
- Node* const on_rejected = Parameter(Descriptor::kOnRejected);
- Node* const result_promise = Parameter(Descriptor::kResultPromise);
+ const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const TNode<JSPromise> promise = CAST(Parameter(Descriptor::kPromise));
+ const TNode<HeapObject> on_fulfilled =
+ CAST(Parameter(Descriptor::kOnFulfilled));
+ const TNode<HeapObject> on_rejected =
+ CAST(Parameter(Descriptor::kOnRejected));
+ const TNode<HeapObject> result_promise =
+ CAST(Parameter(Descriptor::kResultPromise));
- CSA_ASSERT(this, TaggedIsNotSmi(result_promise));
CSA_ASSERT(
this, Word32Or(IsJSPromise(result_promise), IsUndefined(result_promise)));
@@ -504,9 +508,9 @@ TF_BUILTIN(PerformPromiseThen, PromiseBuiltinsAssembler) {
Return(result_promise);
}
-Node* PromiseBuiltinsAssembler::AllocatePromiseReaction(
- Node* next, Node* promise_or_capability, Node* fulfill_handler,
- Node* reject_handler) {
+TNode<PromiseReaction> PromiseBuiltinsAssembler::AllocatePromiseReaction(
+ TNode<Object> next, TNode<HeapObject> promise_or_capability,
+ TNode<HeapObject> fulfill_handler, TNode<HeapObject> reject_handler) {
TNode<HeapObject> const reaction = Allocate(PromiseReaction::kSize);
StoreMapNoWriteBarrier(reaction, RootIndex::kPromiseReactionMap);
StoreObjectFieldNoWriteBarrier(reaction, PromiseReaction::kNextOffset, next);
@@ -517,12 +521,13 @@ Node* PromiseBuiltinsAssembler::AllocatePromiseReaction(
reaction, PromiseReaction::kFulfillHandlerOffset, fulfill_handler);
StoreObjectFieldNoWriteBarrier(
reaction, PromiseReaction::kRejectHandlerOffset, reject_handler);
- return reaction;
+ return CAST(reaction);
}
-Node* PromiseBuiltinsAssembler::AllocatePromiseReactionJobTask(
- Node* map, Node* context, Node* argument, Node* handler,
- Node* promise_or_capability) {
+TNode<PromiseReactionJobTask>
+PromiseBuiltinsAssembler::AllocatePromiseReactionJobTask(
+ TNode<Map> map, TNode<Context> context, TNode<Object> argument,
+ TNode<HeapObject> handler, TNode<HeapObject> promise_or_capability) {
TNode<HeapObject> const microtask =
Allocate(PromiseReactionJobTask::kSizeOfAllPromiseReactionJobTasks);
StoreMapNoWriteBarrier(microtask, map);
@@ -535,12 +540,14 @@ Node* PromiseBuiltinsAssembler::AllocatePromiseReactionJobTask(
StoreObjectFieldNoWriteBarrier(
microtask, PromiseReactionJobTask::kPromiseOrCapabilityOffset,
promise_or_capability);
- return microtask;
+ return CAST(microtask);
}
-Node* PromiseBuiltinsAssembler::AllocatePromiseResolveThenableJobTask(
- Node* promise_to_resolve, Node* then, Node* thenable, Node* context) {
- TNode<HeapObject> const microtask =
+TNode<PromiseResolveThenableJobTask>
+PromiseBuiltinsAssembler::AllocatePromiseResolveThenableJobTask(
+ TNode<JSPromise> promise_to_resolve, TNode<JSReceiver> then,
+ TNode<JSReceiver> thenable, TNode<Context> context) {
+ const TNode<HeapObject> microtask =
Allocate(PromiseResolveThenableJobTask::kSize);
StoreMapNoWriteBarrier(microtask,
RootIndex::kPromiseResolveThenableJobTaskMap);
@@ -553,7 +560,7 @@ Node* PromiseBuiltinsAssembler::AllocatePromiseResolveThenableJobTask(
microtask, PromiseResolveThenableJobTask::kThenOffset, then);
StoreObjectFieldNoWriteBarrier(
microtask, PromiseResolveThenableJobTask::kThenableOffset, thenable);
- return microtask;
+ return CAST(microtask);
}
// ES #sec-triggerpromisereactions
@@ -1003,7 +1010,7 @@ TF_BUILTIN(PromiseConstructor, PromiseBuiltinsAssembler) {
BIND(&if_targetisnotmodified);
{
- Node* const instance = AllocateAndInitJSPromise(context);
+ TNode<JSPromise> const instance = AllocateAndInitJSPromise(context);
var_result.Bind(instance);
Goto(&debug_push);
}
@@ -1035,7 +1042,7 @@ TF_BUILTIN(PromiseConstructor, PromiseBuiltinsAssembler) {
Node *resolve, *reject;
std::tie(resolve, reject) = CreatePromiseResolvingFunctions(
- var_result.value(), TrueConstant(), native_context);
+ CAST(var_result.value()), TrueConstant(), native_context);
Node* const maybe_exception = CallJS(
CodeFactory::Call(isolate, ConvertReceiverMode::kNullOrUndefined),
@@ -1080,8 +1087,8 @@ TF_BUILTIN(PromiseConstructor, PromiseBuiltinsAssembler) {
// V8 Extras: v8.createPromise(parent)
TF_BUILTIN(PromiseInternalConstructor, PromiseBuiltinsAssembler) {
- Node* const parent = Parameter(Descriptor::kParent);
- Node* const context = Parameter(Descriptor::kContext);
+ const TNode<Object> parent = CAST(Parameter(Descriptor::kParent));
+ const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Return(AllocateAndInitJSPromise(context, parent));
}
@@ -1127,14 +1134,15 @@ TF_BUILTIN(PromiseInternalResolve, PromiseBuiltinsAssembler) {
// Promise.prototype.then ( onFulfilled, onRejected )
TF_BUILTIN(PromisePrototypeThen, PromiseBuiltinsAssembler) {
// 1. Let promise be the this value.
- Node* const promise = Parameter(Descriptor::kReceiver);
- Node* const on_fulfilled = Parameter(Descriptor::kOnFulfilled);
- Node* const on_rejected = Parameter(Descriptor::kOnRejected);
- Node* const context = Parameter(Descriptor::kContext);
+ const TNode<Object> maybe_promise = CAST(Parameter(Descriptor::kReceiver));
+ const TNode<Object> on_fulfilled = CAST(Parameter(Descriptor::kOnFulfilled));
+ const TNode<Object> on_rejected = CAST(Parameter(Descriptor::kOnRejected));
+ const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
// 2. If IsPromise(promise) is false, throw a TypeError exception.
- ThrowIfNotInstanceType(context, promise, JS_PROMISE_TYPE,
+ ThrowIfNotInstanceType(context, maybe_promise, JS_PROMISE_TYPE,
"Promise.prototype.then");
+ TNode<JSPromise> js_promise = CAST(maybe_promise);
// 3. Let C be ? SpeciesConstructor(promise, %Promise%).
Label fast_promise_capability(this), slow_constructor(this, Label::kDeferred),
@@ -1142,26 +1150,27 @@ TF_BUILTIN(PromisePrototypeThen, PromiseBuiltinsAssembler) {
TNode<NativeContext> const native_context = LoadNativeContext(context);
TNode<JSFunction> promise_fun =
CAST(LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX));
- TNode<Map> const promise_map = LoadMap(promise);
+ TNode<Map> const promise_map = LoadMap(js_promise);
BranchIfPromiseSpeciesLookupChainIntact(
native_context, promise_map, &fast_promise_capability, &slow_constructor);
BIND(&slow_constructor);
TNode<JSReceiver> constructor =
- SpeciesConstructor(native_context, promise, promise_fun);
+ SpeciesConstructor(native_context, js_promise, promise_fun);
Branch(TaggedEqual(constructor, promise_fun), &fast_promise_capability,
&slow_promise_capability);
// 4. Let resultCapability be ? NewPromiseCapability(C).
Label perform_promise_then(this);
- VARIABLE(var_result_promise, MachineRepresentation::kTagged);
- VARIABLE(var_result_promise_or_capability, MachineRepresentation::kTagged);
+ TVARIABLE(Object, var_result_promise);
+ TVARIABLE(HeapObject, var_result_promise_or_capability);
BIND(&fast_promise_capability);
{
- Node* const result_promise = AllocateAndInitJSPromise(context, promise);
- var_result_promise_or_capability.Bind(result_promise);
- var_result_promise.Bind(result_promise);
+ const TNode<JSPromise> result_promise =
+ AllocateAndInitJSPromise(context, js_promise);
+ var_result_promise_or_capability = result_promise;
+ var_result_promise = result_promise;
Goto(&perform_promise_then);
}
@@ -1170,9 +1179,9 @@ TF_BUILTIN(PromisePrototypeThen, PromiseBuiltinsAssembler) {
TNode<Oddball> const debug_event = TrueConstant();
TNode<PromiseCapability> const capability = CAST(CallBuiltin(
Builtins::kNewPromiseCapability, context, constructor, debug_event));
- var_result_promise.Bind(
- LoadObjectField(capability, PromiseCapability::kPromiseOffset));
- var_result_promise_or_capability.Bind(capability);
+ var_result_promise =
+ LoadObjectField(capability, PromiseCapability::kPromiseOffset);
+ var_result_promise_or_capability = capability;
Goto(&perform_promise_then);
}
@@ -1187,30 +1196,30 @@ TF_BUILTIN(PromisePrototypeThen, PromiseBuiltinsAssembler) {
// 3. If IsCallable(onFulfilled) is false, then
// a. Set onFulfilled to undefined.
- VARIABLE(var_on_fulfilled, MachineRepresentation::kTagged, on_fulfilled);
+ TVARIABLE(Object, var_on_fulfilled, on_fulfilled);
Label if_fulfilled_done(this), if_fulfilled_notcallable(this);
GotoIf(TaggedIsSmi(on_fulfilled), &if_fulfilled_notcallable);
- Branch(IsCallable(on_fulfilled), &if_fulfilled_done,
+ Branch(IsCallable(CAST(on_fulfilled)), &if_fulfilled_done,
&if_fulfilled_notcallable);
BIND(&if_fulfilled_notcallable);
- var_on_fulfilled.Bind(UndefinedConstant());
+ var_on_fulfilled = UndefinedConstant();
Goto(&if_fulfilled_done);
BIND(&if_fulfilled_done);
// 4. If IsCallable(onRejected) is false, then
// a. Set onRejected to undefined.
- VARIABLE(var_on_rejected, MachineRepresentation::kTagged, on_rejected);
+ TVARIABLE(Object, var_on_rejected, on_rejected);
Label if_rejected_done(this), if_rejected_notcallable(this);
GotoIf(TaggedIsSmi(on_rejected), &if_rejected_notcallable);
- Branch(IsCallable(on_rejected), &if_rejected_done,
+ Branch(IsCallable(CAST(on_rejected)), &if_rejected_done,
&if_rejected_notcallable);
BIND(&if_rejected_notcallable);
- var_on_rejected.Bind(UndefinedConstant());
+ var_on_rejected = UndefinedConstant();
Goto(&if_rejected_done);
BIND(&if_rejected_done);
- PerformPromiseThen(context, promise, var_on_fulfilled.value(),
- var_on_rejected.value(),
+ PerformPromiseThen(context, js_promise, CAST(var_on_fulfilled.value()),
+ CAST(var_on_rejected.value()),
var_result_promise_or_capability.value());
Return(var_result_promise.value());
}
@@ -1522,7 +1531,7 @@ TF_BUILTIN(PromiseResolve, PromiseBuiltinsAssembler) {
// create NewPromiseCapability.
BIND(&if_nativepromise);
{
- Node* const result = AllocateAndInitJSPromise(context);
+ TNode<JSPromise> const result = AllocateAndInitJSPromise(context);
CallBuiltin(Builtins::kResolvePromise, context, result, value);
Return(result);
}
@@ -1592,7 +1601,7 @@ TF_BUILTIN(PromiseReject, PromiseBuiltinsAssembler) {
BIND(&if_nativepromise);
{
- Node* const promise =
+ TNode<JSPromise> const promise =
AllocateAndSetJSPromise(context, v8::Promise::kRejected, reason);
CallRuntime(Runtime::kPromiseRejectEventFromStack, context, promise,
reason);
@@ -1621,21 +1630,21 @@ TF_BUILTIN(PromiseReject, PromiseBuiltinsAssembler) {
std::pair<Node*, Node*> PromiseBuiltinsAssembler::CreatePromiseFinallyFunctions(
Node* on_finally, Node* constructor, Node* native_context) {
- Node* const promise_context = CreatePromiseContext(
- native_context, PromiseBuiltins::kPromiseFinallyContextLength);
+ const TNode<Context> promise_context = CreatePromiseContext(
+ CAST(native_context), PromiseBuiltins::kPromiseFinallyContextLength);
StoreContextElementNoWriteBarrier(
promise_context, PromiseBuiltins::kOnFinallySlot, on_finally);
StoreContextElementNoWriteBarrier(
promise_context, PromiseBuiltins::kConstructorSlot, constructor);
- TNode<Object> const map = LoadContextElement(
- native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
- TNode<Object> const then_finally_info = LoadContextElement(
- native_context, Context::PROMISE_THEN_FINALLY_SHARED_FUN);
- Node* const then_finally = AllocateFunctionWithMapAndContext(
+ const TNode<Map> map = CAST(LoadContextElement(
+ native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX));
+ const TNode<SharedFunctionInfo> then_finally_info = CAST(LoadContextElement(
+ native_context, Context::PROMISE_THEN_FINALLY_SHARED_FUN));
+ TNode<JSFunction> const then_finally = AllocateFunctionWithMapAndContext(
map, then_finally_info, promise_context);
- TNode<Object> const catch_finally_info = LoadContextElement(
- native_context, Context::PROMISE_CATCH_FINALLY_SHARED_FUN);
- Node* const catch_finally = AllocateFunctionWithMapAndContext(
+ const TNode<SharedFunctionInfo> catch_finally_info = CAST(LoadContextElement(
+ native_context, Context::PROMISE_CATCH_FINALLY_SHARED_FUN));
+ TNode<JSFunction> const catch_finally = AllocateFunctionWithMapAndContext(
map, catch_finally_info, promise_context);
return std::make_pair(then_finally, catch_finally);
}
@@ -1650,15 +1659,16 @@ TF_BUILTIN(PromiseValueThunkFinally, PromiseBuiltinsAssembler) {
Node* PromiseBuiltinsAssembler::CreateValueThunkFunction(Node* value,
Node* native_context) {
- Node* const value_thunk_context = CreatePromiseContext(
- native_context, PromiseBuiltins::kPromiseValueThunkOrReasonContextLength);
+ const TNode<Context> value_thunk_context = CreatePromiseContext(
+ CAST(native_context),
+ PromiseBuiltins::kPromiseValueThunkOrReasonContextLength);
StoreContextElementNoWriteBarrier(value_thunk_context,
PromiseBuiltins::kValueSlot, value);
- TNode<Object> const map = LoadContextElement(
- native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
- TNode<Object> const value_thunk_info = LoadContextElement(
- native_context, Context::PROMISE_VALUE_THUNK_FINALLY_SHARED_FUN);
- Node* const value_thunk = AllocateFunctionWithMapAndContext(
+ const TNode<Map> map = CAST(LoadContextElement(
+ native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX));
+ const TNode<SharedFunctionInfo> value_thunk_info = CAST(LoadContextElement(
+ native_context, Context::PROMISE_VALUE_THUNK_FINALLY_SHARED_FUN));
+ TNode<JSFunction> const value_thunk = AllocateFunctionWithMapAndContext(
map, value_thunk_info, value_thunk_context);
return value_thunk;
}
@@ -1711,15 +1721,16 @@ TF_BUILTIN(PromiseThrowerFinally, PromiseBuiltinsAssembler) {
Node* PromiseBuiltinsAssembler::CreateThrowerFunction(Node* reason,
Node* native_context) {
- Node* const thrower_context = CreatePromiseContext(
- native_context, PromiseBuiltins::kPromiseValueThunkOrReasonContextLength);
+ const TNode<Context> thrower_context = CreatePromiseContext(
+ CAST(native_context),
+ PromiseBuiltins::kPromiseValueThunkOrReasonContextLength);
StoreContextElementNoWriteBarrier(thrower_context,
PromiseBuiltins::kValueSlot, reason);
- TNode<Object> const map = LoadContextElement(
- native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
- TNode<Object> const thrower_info = LoadContextElement(
- native_context, Context::PROMISE_THROWER_FINALLY_SHARED_FUN);
- Node* const thrower =
+ const TNode<Map> map = CAST(LoadContextElement(
+ native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX));
+ const TNode<SharedFunctionInfo> thrower_info = CAST(LoadContextElement(
+ native_context, Context::PROMISE_THROWER_FINALLY_SHARED_FUN));
+ TNode<JSFunction> const thrower =
AllocateFunctionWithMapAndContext(map, thrower_info, thrower_context);
return thrower;
}
@@ -1919,7 +1930,7 @@ TF_BUILTIN(ResolvePromise, PromiseBuiltinsAssembler) {
Label do_enqueue(this), if_fulfill(this), if_reject(this, Label::kDeferred),
if_runtime(this, Label::kDeferred);
TVARIABLE(Object, var_reason);
- TVARIABLE(Object, var_then);
+ TVARIABLE(JSReceiver, var_then);
// If promise hook is enabled or the debugger is active, let
// the runtime handle this operation, which greatly reduces
@@ -1955,7 +1966,8 @@ TF_BUILTIN(ResolvePromise, PromiseBuiltinsAssembler) {
BIND(&if_fast);
{
// The {resolution} is a native Promise in this case.
- var_then = LoadContextElement(native_context, Context::PROMISE_THEN_INDEX);
+ var_then =
+ CAST(LoadContextElement(native_context, Context::PROMISE_THEN_INDEX));
Goto(&do_enqueue);
}
@@ -1987,7 +1999,7 @@ TF_BUILTIN(ResolvePromise, PromiseBuiltinsAssembler) {
GotoIf(TaggedIsSmi(then), &if_fulfill);
TNode<Map> const then_map = LoadMap(CAST(then));
GotoIfNot(IsCallableMap(then_map), &if_fulfill);
- var_then = then;
+ var_then = CAST(then);
Goto(&do_enqueue);
}
@@ -1995,8 +2007,9 @@ TF_BUILTIN(ResolvePromise, PromiseBuiltinsAssembler) {
{
// 12. Perform EnqueueJob("PromiseJobs", PromiseResolveThenableJob,
// «promise, resolution, thenAction»).
- Node* const task = AllocatePromiseResolveThenableJobTask(
- promise, var_then.value(), resolution, native_context);
+ const TNode<PromiseResolveThenableJobTask> task =
+ AllocatePromiseResolveThenableJobTask(promise, var_then.value(),
+ CAST(resolution), native_context);
TailCallBuiltin(Builtins::kEnqueueMicrotask, native_context, task);
}
@@ -2150,8 +2163,9 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll(
// Register the PromiseReaction immediately on the {next_value}, not
// passing any chained promise since neither async_hooks nor DevTools
// are enabled, so there's no use of the resulting promise.
- PerformPromiseThen(native_context, next_value, resolve_element_fun,
- reject_element_fun, UndefinedConstant());
+ PerformPromiseThen(native_context, CAST(next_value),
+ CAST(resolve_element_fun), CAST(reject_element_fun),
+ UndefinedConstant());
Goto(&loop);
}
diff --git a/deps/v8/src/builtins/builtins-promise-gen.h b/deps/v8/src/builtins/builtins-promise-gen.h
index 633e3321aa..b2ae8fe876 100644
--- a/deps/v8/src/builtins/builtins-promise-gen.h
+++ b/deps/v8/src/builtins/builtins-promise-gen.h
@@ -22,29 +22,34 @@ class V8_EXPORT_PRIVATE PromiseBuiltinsAssembler : public CodeStubAssembler {
//
// This uses undefined as the parent promise for the promise init
// hook.
- Node* AllocateAndInitJSPromise(Node* context);
+ TNode<JSPromise> AllocateAndInitJSPromise(TNode<Context> context);
// This uses the given parent as the parent promise for the promise
// init hook.
- Node* AllocateAndInitJSPromise(Node* context, Node* parent);
+ TNode<JSPromise> AllocateAndInitJSPromise(TNode<Context> context,
+ TNode<Object> parent);
// This allocates and initializes a promise with the given state and
// fields.
- Node* AllocateAndSetJSPromise(Node* context, v8::Promise::PromiseState status,
- Node* result);
+ TNode<JSPromise> AllocateAndSetJSPromise(TNode<Context> context,
+ v8::Promise::PromiseState status,
+ TNode<Object> result);
- Node* AllocatePromiseReaction(Node* next, Node* promise_or_capability,
- Node* fulfill_handler, Node* reject_handler);
+ TNode<PromiseReaction> AllocatePromiseReaction(
+ TNode<Object> next, TNode<HeapObject> promise_or_capability,
+ TNode<HeapObject> fulfill_handler, TNode<HeapObject> reject_handler);
- Node* AllocatePromiseReactionJobTask(Node* map, Node* context, Node* argument,
- Node* handler,
- Node* promise_or_capability);
- Node* AllocatePromiseResolveThenableJobTask(Node* promise_to_resolve,
- Node* then, Node* thenable,
- Node* context);
+ TNode<PromiseReactionJobTask> AllocatePromiseReactionJobTask(
+ TNode<Map> map, TNode<Context> context, TNode<Object> argument,
+ TNode<HeapObject> handler, TNode<HeapObject> promise_or_capability);
- std::pair<Node*, Node*> CreatePromiseResolvingFunctions(Node* promise,
- Node* debug_event,
- Node* native_context);
+ TNode<PromiseResolveThenableJobTask> AllocatePromiseResolveThenableJobTask(
+ TNode<JSPromise> promise_to_resolve, TNode<JSReceiver> then,
+ TNode<JSReceiver> thenable, TNode<Context> context);
+
+ std::pair<TNode<JSFunction>, TNode<JSFunction>>
+ CreatePromiseResolvingFunctions(TNode<JSPromise> promise,
+ TNode<Object> debug_event,
+ TNode<NativeContext> native_context);
Node* PromiseHasHandler(Node* promise);
@@ -62,8 +67,9 @@ class V8_EXPORT_PRIVATE PromiseBuiltinsAssembler : public CodeStubAssembler {
Node* native_context,
int slot_index);
- Node* CreatePromiseResolvingFunctionsContext(Node* promise, Node* debug_event,
- Node* native_context);
+ TNode<Context> CreatePromiseResolvingFunctionsContext(
+ TNode<JSPromise> promise, TNode<Object> debug_event,
+ TNode<NativeContext> native_context);
Node* CreatePromiseGetCapabilitiesExecutorContext(Node* promise_capability,
Node* native_context);
@@ -74,11 +80,13 @@ class V8_EXPORT_PRIVATE PromiseBuiltinsAssembler : public CodeStubAssembler {
void PromiseSetHasHandler(Node* promise);
void PromiseSetHandledHint(Node* promise);
- void PerformPromiseThen(Node* context, Node* promise, Node* on_fulfilled,
- Node* on_rejected,
- Node* result_promise_or_capability);
+ void PerformPromiseThen(TNode<Context> context, TNode<JSPromise> promise,
+ TNode<HeapObject> on_fulfilled,
+ TNode<HeapObject> on_rejected,
+ TNode<HeapObject> result_promise_or_capability);
- Node* CreatePromiseContext(Node* native_context, int slots);
+ TNode<Context> CreatePromiseContext(TNode<NativeContext> native_context,
+ int slots);
Node* TriggerPromiseReactions(Node* context, Node* promise, Node* result,
PromiseReaction::Type type);
@@ -161,7 +169,7 @@ class V8_EXPORT_PRIVATE PromiseBuiltinsAssembler : public CodeStubAssembler {
v8::Promise::PromiseState expected);
void PromiseSetStatus(Node* promise, v8::Promise::PromiseState status);
- Node* AllocateJSPromise(Node* context);
+ TNode<JSPromise> AllocateJSPromise(TNode<Context> context);
void ExtractHandlerContext(Node* handler, Variable* var_context);
void Generate_PromiseAll(
diff --git a/deps/v8/src/builtins/builtins-proxy-gen.cc b/deps/v8/src/builtins/builtins-proxy-gen.cc
index bb1137735c..71d4e8226f 100644
--- a/deps/v8/src/builtins/builtins-proxy-gen.cc
+++ b/deps/v8/src/builtins/builtins-proxy-gen.cc
@@ -14,7 +14,7 @@
namespace v8 {
namespace internal {
-compiler::TNode<JSProxy> ProxiesCodeStubAssembler::AllocateProxy(
+TNode<JSProxy> ProxiesCodeStubAssembler::AllocateProxy(
TNode<Context> context, TNode<JSReceiver> target,
TNode<JSReceiver> handler) {
VARIABLE(map, MachineRepresentation::kTagged);
@@ -59,7 +59,8 @@ compiler::TNode<JSProxy> ProxiesCodeStubAssembler::AllocateProxy(
}
Node* ProxiesCodeStubAssembler::AllocateJSArrayForCodeStubArguments(
- Node* context, CodeStubArguments& args, Node* argc, ParameterMode mode) {
+ Node* context, const CodeStubArguments& args, Node* argc,
+ ParameterMode mode) {
Comment("AllocateJSArrayForCodeStubArguments");
Label if_empty_array(this), allocate_js_array(this);
@@ -80,7 +81,7 @@ Node* ProxiesCodeStubAssembler::AllocateJSArrayForCodeStubArguments(
GotoIf(SmiGreaterThan(length, SmiConstant(FixedArray::kMaxRegularLength)),
&if_large_object);
- args.ForEach(list, [=, &offset](Node* arg) {
+ args.ForEach(list, [&](TNode<Object> arg) {
StoreNoWriteBarrier(MachineRepresentation::kTagged, allocated_elements,
offset.value(), arg);
Increment(&offset, kTaggedSize);
@@ -89,7 +90,7 @@ Node* ProxiesCodeStubAssembler::AllocateJSArrayForCodeStubArguments(
BIND(&if_large_object);
{
- args.ForEach(list, [=, &offset](Node* arg) {
+ args.ForEach(list, [&](TNode<Object> arg) {
Store(allocated_elements, offset.value(), arg);
Increment(&offset, kTaggedSize);
});
@@ -124,20 +125,19 @@ Node* ProxiesCodeStubAssembler::CreateProxyRevokeFunctionContext(
return context;
}
-compiler::TNode<JSFunction>
-ProxiesCodeStubAssembler::AllocateProxyRevokeFunction(TNode<Context> context,
- TNode<JSProxy> proxy) {
+TNode<JSFunction> ProxiesCodeStubAssembler::AllocateProxyRevokeFunction(
+ TNode<Context> context, TNode<JSProxy> proxy) {
TNode<NativeContext> const native_context = LoadNativeContext(context);
- Node* const proxy_context =
- CreateProxyRevokeFunctionContext(proxy, native_context);
- TNode<Object> const revoke_map = LoadContextElement(
- native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
- TNode<Object> const revoke_info =
- LoadContextElement(native_context, Context::PROXY_REVOKE_SHARED_FUN);
+ const TNode<Context> proxy_context =
+ CAST(CreateProxyRevokeFunctionContext(proxy, native_context));
+ const TNode<Map> revoke_map = CAST(LoadContextElement(
+ native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX));
+ const TNode<SharedFunctionInfo> revoke_info = CAST(
+ LoadContextElement(native_context, Context::PROXY_REVOKE_SHARED_FUN));
- return CAST(AllocateFunctionWithMapAndContext(revoke_map, revoke_info,
- proxy_context));
+ return AllocateFunctionWithMapAndContext(revoke_map, revoke_info,
+ proxy_context);
}
TF_BUILTIN(CallProxy, ProxiesCodeStubAssembler) {
diff --git a/deps/v8/src/builtins/builtins-proxy-gen.h b/deps/v8/src/builtins/builtins-proxy-gen.h
index cb51faf575..03b3749bf5 100644
--- a/deps/v8/src/builtins/builtins-proxy-gen.h
+++ b/deps/v8/src/builtins/builtins-proxy-gen.h
@@ -39,10 +39,9 @@ class ProxiesCodeStubAssembler : public CodeStubAssembler {
kProxyContextLength,
};
- Node* AllocateJSArrayForCodeStubArguments(
- Node* context,
- CodeStubArguments& args, // NOLINT(runtime/references)
- Node* argc, ParameterMode mode);
+ Node* AllocateJSArrayForCodeStubArguments(Node* context,
+ const CodeStubArguments& args,
+ Node* argc, ParameterMode mode);
private:
Node* CreateProxyRevokeFunctionContext(Node* proxy, Node* native_context);
diff --git a/deps/v8/src/builtins/builtins-reflect-gen.cc b/deps/v8/src/builtins/builtins-reflect-gen.cc
index 744a443ecc..6cffd6ed55 100644
--- a/deps/v8/src/builtins/builtins-reflect-gen.cc
+++ b/deps/v8/src/builtins/builtins-reflect-gen.cc
@@ -11,12 +11,12 @@ namespace internal {
// ES section #sec-reflect.has
TF_BUILTIN(ReflectHas, CodeStubAssembler) {
- Node* target = Parameter(Descriptor::kTarget);
- Node* key = Parameter(Descriptor::kKey);
+ TNode<Object> target = CAST(Parameter(Descriptor::kTarget));
+ TNode<Object> key = CAST(Parameter(Descriptor::kKey));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- ThrowIfNotJSReceiver(context, CAST(target),
- MessageTemplate::kCalledOnNonObject, "Reflect.has");
+ ThrowIfNotJSReceiver(context, target, MessageTemplate::kCalledOnNonObject,
+ "Reflect.has");
Return(CallBuiltin(Builtins::kHasProperty, context, target, key));
}
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc
index f879d70c67..4bc0b6ad74 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.cc
+++ b/deps/v8/src/builtins/builtins-regexp-gen.cc
@@ -23,8 +23,6 @@ namespace v8 {
namespace internal {
using compiler::Node;
-template <class T>
-using TNode = compiler::TNode<T>;
// Tail calls the regular expression interpreter.
// static
@@ -80,7 +78,8 @@ TNode<RawPtrT> RegExpBuiltinsAssembler::LoadCodeObjectEntry(TNode<Code> code) {
TNode<JSRegExpResult> RegExpBuiltinsAssembler::AllocateRegExpResult(
TNode<Context> context, TNode<Smi> length, TNode<Smi> index,
- TNode<String> input, TNode<FixedArray>* elements_out) {
+ TNode<String> input, TNode<RegExpMatchInfo> match_info,
+ TNode<FixedArray>* elements_out) {
CSA_ASSERT(this, SmiLessThanOrEqual(
length, SmiConstant(JSArray::kMaxFastArrayLength)));
CSA_ASSERT(this, SmiGreaterThan(length, SmiConstant(0)));
@@ -90,9 +89,8 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::AllocateRegExpResult(
const ElementsKind elements_kind = PACKED_ELEMENTS;
TNode<Map> map = CAST(LoadContextElement(LoadNativeContext(context),
Context::REGEXP_RESULT_MAP_INDEX));
- Node* no_allocation_site = nullptr;
+ TNode<AllocationSite> no_allocation_site = {};
TNode<IntPtrT> length_intptr = SmiUntag(length);
- TNode<IntPtrT> capacity = length_intptr;
// Note: The returned `elements` may be in young large object space, but
// `array` is guaranteed to be in new space so we could skip write barriers
@@ -100,18 +98,29 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::AllocateRegExpResult(
TNode<JSArray> array;
TNode<FixedArrayBase> elements;
std::tie(array, elements) = AllocateUninitializedJSArrayWithElements(
- elements_kind, map, length, no_allocation_site, capacity,
+ elements_kind, map, length, no_allocation_site, length_intptr,
INTPTR_PARAMETERS, kAllowLargeObjectAllocation, JSRegExpResult::kSize);
// Finish result initialization.
TNode<JSRegExpResult> result = CAST(array);
+ // Load undefined value once here to avoid multiple LoadRoots.
+ TNode<Oddball> undefined_value = UncheckedCast<Oddball>(
+ CodeAssembler::LoadRoot(RootIndex::kUndefinedValue));
+
StoreObjectFieldNoWriteBarrier(result, JSRegExpResult::kIndexOffset, index);
// TODO(jgruber,tebbi): Could skip barrier but the MemoryOptimizer complains.
StoreObjectField(result, JSRegExpResult::kInputOffset, input);
StoreObjectFieldNoWriteBarrier(result, JSRegExpResult::kGroupsOffset,
- UndefinedConstant());
+ undefined_value);
+ StoreObjectFieldNoWriteBarrier(result, JSRegExpResult::kNamesOffset,
+ undefined_value);
+
+ // Stash match_info in order to build JSRegExpResultIndices lazily when the
+ // 'indices' property is accessed.
+ StoreObjectField(result, JSRegExpResult::kCachedIndicesOrMatchInfoOffset,
+ match_info);
// Finish elements initialization.
@@ -213,7 +222,7 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
TNode<FixedArray> result_elements;
TNode<JSRegExpResult> result = AllocateRegExpResult(
- context, num_results, start, string, &result_elements);
+ context, num_results, start, string, match_info, &result_elements);
UnsafeStoreFixedArrayElement(result_elements, 0, first);
@@ -228,8 +237,7 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex + 2));
TVARIABLE(IntPtrT, var_to_cursor, IntPtrConstant(1));
- Variable* vars[] = {&var_from_cursor, &var_to_cursor};
- Label loop(this, 2, vars);
+ Label loop(this, {&var_from_cursor, &var_to_cursor});
Goto(&loop);
BIND(&loop);
@@ -289,6 +297,9 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
TNode<IntPtrT> names_length = LoadAndUntagFixedArrayBaseLength(names);
CSA_ASSERT(this, IntPtrGreaterThan(names_length, IntPtrZero()));
+ // Stash names in case we need them to build the indices array later.
+ StoreObjectField(result, JSRegExpResult::kNamesOffset, names);
+
// Allocate a new object to store the named capture properties.
// TODO(jgruber): Could be optimized by adding the object map to the heap
// root list.
@@ -305,9 +316,7 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
TVARIABLE(IntPtrT, var_i, IntPtrZero());
- Variable* vars[] = {&var_i};
- const int vars_count = sizeof(vars) / sizeof(vars[0]);
- Label loop(this, vars_count, vars);
+ Label loop(this, &var_i);
Goto(&loop);
BIND(&loop);
@@ -355,9 +364,10 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
}
void RegExpBuiltinsAssembler::GetStringPointers(
- Node* const string_data, Node* const offset, Node* const last_index,
- Node* const string_length, String::Encoding encoding,
- Variable* var_string_start, Variable* var_string_end) {
+ TNode<RawPtrT> string_data, TNode<IntPtrT> offset,
+ TNode<IntPtrT> last_index, TNode<IntPtrT> string_length,
+ String::Encoding encoding, TVariable<RawPtrT>* var_string_start,
+ TVariable<RawPtrT>* var_string_end) {
DCHECK_EQ(var_string_start->rep(), MachineType::PointerRepresentation());
DCHECK_EQ(var_string_end->rep(), MachineType::PointerRepresentation());
@@ -365,13 +375,14 @@ void RegExpBuiltinsAssembler::GetStringPointers(
? UINT8_ELEMENTS
: UINT16_ELEMENTS;
- TNode<IntPtrT> const from_offset = ElementOffsetFromIndex(
- IntPtrAdd(offset, last_index), kind, INTPTR_PARAMETERS);
- var_string_start->Bind(IntPtrAdd(string_data, from_offset));
+ TNode<IntPtrT> from_offset =
+ ElementOffsetFromIndex(IntPtrAdd(offset, last_index), kind);
+ *var_string_start =
+ ReinterpretCast<RawPtrT>(IntPtrAdd(string_data, from_offset));
- TNode<IntPtrT> const to_offset = ElementOffsetFromIndex(
- IntPtrAdd(offset, string_length), kind, INTPTR_PARAMETERS);
- var_string_end->Bind(IntPtrAdd(string_data, to_offset));
+ TNode<IntPtrT> to_offset =
+ ElementOffsetFromIndex(IntPtrAdd(offset, string_length), kind);
+ *var_string_end = ReinterpretCast<RawPtrT>(IntPtrAdd(string_data, to_offset));
}
TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
@@ -507,27 +518,18 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
GotoIf(TaggedIsSmi(var_code.value()), &runtime);
TNode<Code> code = CAST(var_code.value());
- // Tier-up in runtime if ticks are non-zero and tier-up hasn't happened yet
- // and ensure that a RegExp stack is allocated when using compiled Irregexp.
+ // Ensure that a RegExp stack is allocated when using compiled Irregexp.
+ // TODO(jgruber): Guarantee an allocated stack and remove this check.
{
- Label next(this), check_tier_up(this);
- GotoIfNot(TaggedIsSmi(var_bytecode.value()), &check_tier_up);
+ Label next(this);
+ GotoIfNot(TaggedIsSmi(var_bytecode.value()), &next);
CSA_ASSERT(this, SmiEqual(CAST(var_bytecode.value()),
SmiConstant(JSRegExp::kUninitializedValue)));
- // Ensure RegExp stack is allocated.
TNode<IntPtrT> stack_size = UncheckedCast<IntPtrT>(
Load(MachineType::IntPtr(), regexp_stack_memory_size_address));
- GotoIf(IntPtrEqual(stack_size, IntPtrZero()), &runtime);
- Goto(&next);
-
- // Check if tier-up is requested.
- BIND(&check_tier_up);
- TNode<Smi> ticks = CAST(
- UnsafeLoadFixedArrayElement(data, JSRegExp::kIrregexpTierUpTicksIndex));
- GotoIf(SmiToInt32(ticks), &runtime);
+ Branch(IntPtrEqual(stack_size, IntPtrZero()), &runtime, &next);
- Goto(&next);
BIND(&next);
}
@@ -656,18 +658,18 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
// Fill match and capture offsets in match_info.
{
- TNode<IntPtrT> limit_offset = ElementOffsetFromIndex(
- register_count, INT32_ELEMENTS, SMI_PARAMETERS, 0);
+ TNode<IntPtrT> limit_offset =
+ ElementOffsetFromIndex(register_count, INT32_ELEMENTS, 0);
TNode<IntPtrT> to_offset = ElementOffsetFromIndex(
IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex), PACKED_ELEMENTS,
- INTPTR_PARAMETERS, RegExpMatchInfo::kHeaderSize - kHeapObjectTag);
+ RegExpMatchInfo::kHeaderSize - kHeapObjectTag);
TVARIABLE(IntPtrT, var_to_offset, to_offset);
VariableList vars({&var_to_offset}, zone());
- BuildFastLoop(
+ BuildFastLoop<IntPtrT>(
vars, IntPtrZero(), limit_offset,
- [=, &var_to_offset](Node* offset) {
+ [&](TNode<IntPtrT> offset) {
TNode<Int32T> value = UncheckedCast<Int32T>(Load(
MachineType::Int32(), static_offsets_vector_address, offset));
TNode<Smi> smi_value = SmiFromInt32(value);
@@ -675,7 +677,7 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
var_to_offset.value(), smi_value);
Increment(&var_to_offset, kTaggedSize);
},
- kInt32Size, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+ kInt32Size, IndexAdvanceMode::kPost);
}
var_result = match_info;
@@ -733,7 +735,7 @@ RegExpBuiltinsAssembler::RegExpPrototypeExecBodyWithoutResult(
TNode<Context> context, TNode<JSReceiver> maybe_regexp,
TNode<String> string, Label* if_didnotmatch, const bool is_fastpath) {
if (!is_fastpath) {
- ThrowIfNotInstanceType(context, maybe_regexp, JS_REGEXP_TYPE,
+ ThrowIfNotInstanceType(context, maybe_regexp, JS_REG_EXP_TYPE,
"RegExp.prototype.exec");
}
@@ -894,14 +896,13 @@ TNode<BoolT> RegExpBuiltinsAssembler::IsReceiverInitialRegExpPrototype(
return TaggedEqual(receiver, initial_prototype);
}
-Node* RegExpBuiltinsAssembler::IsFastRegExpNoPrototype(
- SloppyTNode<Context> context, SloppyTNode<Object> object,
- SloppyTNode<Map> map) {
+TNode<BoolT> RegExpBuiltinsAssembler::IsFastRegExpNoPrototype(
+ TNode<Context> context, TNode<Object> object, TNode<Map> map) {
Label out(this);
- VARIABLE(var_result, MachineRepresentation::kWord32);
+ TVARIABLE(BoolT, var_result);
#ifdef V8_ENABLE_FORCE_SLOW_PATH
- var_result.Bind(Int32Constant(0));
+ var_result = Int32FalseConstant();
GotoIfForceSlowPath(&out);
#endif
@@ -912,13 +913,13 @@ Node* RegExpBuiltinsAssembler::IsFastRegExpNoPrototype(
LoadObjectField(regexp_fun, JSFunction::kPrototypeOrInitialMapOffset);
TNode<BoolT> const has_initialmap = TaggedEqual(map, initial_map);
- var_result.Bind(has_initialmap);
+ var_result = has_initialmap;
GotoIfNot(has_initialmap, &out);
// The smi check is required to omit ToLength(lastIndex) calls with possible
// user-code execution on the fast path.
TNode<Object> last_index = FastLoadLastIndexBeforeSmiCheck(CAST(object));
- var_result.Bind(TaggedIsPositiveSmi(last_index));
+ var_result = TaggedIsPositiveSmi(last_index);
Goto(&out);
BIND(&out);
@@ -939,7 +940,7 @@ TNode<BoolT> RegExpBuiltinsAssembler::IsFastRegExpWithOriginalExec(
GotoIfForceSlowPath(&out);
#endif
- TNode<BoolT> is_regexp = HasInstanceType(object, JS_REGEXP_TYPE);
+ TNode<BoolT> is_regexp = HasInstanceType(object, JS_REG_EXP_TYPE);
var_result = is_regexp;
GotoIfNot(is_regexp, &out);
@@ -970,8 +971,8 @@ TNode<BoolT> RegExpBuiltinsAssembler::IsFastRegExpWithOriginalExec(
return var_result.value();
}
-Node* RegExpBuiltinsAssembler::IsFastRegExpNoPrototype(
- SloppyTNode<Context> context, SloppyTNode<Object> object) {
+TNode<BoolT> RegExpBuiltinsAssembler::IsFastRegExpNoPrototype(
+ TNode<Context> context, TNode<Object> object) {
CSA_ASSERT(this, TaggedIsNotSmi(object));
return IsFastRegExpNoPrototype(context, object, LoadMap(CAST(object)));
}
@@ -1046,10 +1047,9 @@ void RegExpBuiltinsAssembler::BranchIfFastRegExp_Permissive(
if_isunmodified, if_ismodified);
}
-void RegExpBuiltinsAssembler::BranchIfFastRegExpResult(Node* const context,
- Node* const object,
- Label* if_isunmodified,
- Label* if_ismodified) {
+void RegExpBuiltinsAssembler::BranchIfFastRegExpResult(
+ const TNode<Context> context, const TNode<Object> object,
+ Label* if_isunmodified, Label* if_ismodified) {
// Could be a Smi.
TNode<Map> const map = LoadReceiverMap(object);
@@ -1061,15 +1061,6 @@ void RegExpBuiltinsAssembler::BranchIfFastRegExpResult(Node* const context,
if_ismodified);
}
-// Slow path stub for RegExpPrototypeExec to decrease code size.
-TF_BUILTIN(RegExpPrototypeExecSlow, RegExpBuiltinsAssembler) {
- TNode<JSRegExp> regexp = CAST(Parameter(Descriptor::kReceiver));
- TNode<String> string = CAST(Parameter(Descriptor::kString));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-
- Return(RegExpPrototypeExecBody(context, regexp, string, false));
-}
-
// Fast path stub for ATOM regexps. String matching is done by StringIndexOf,
// and {match_info} is updated on success.
// The slow path is implemented in RegExp::AtomExec.
@@ -1149,33 +1140,6 @@ TF_BUILTIN(RegExpExecInternal, RegExpBuiltinsAssembler) {
Return(RegExpExecInternal(context, regexp, string, last_index, match_info));
}
-// ES#sec-regexp.prototype.exec
-// RegExp.prototype.exec ( string )
-TF_BUILTIN(RegExpPrototypeExec, RegExpBuiltinsAssembler) {
- TNode<Object> maybe_receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> maybe_string = CAST(Parameter(Descriptor::kString));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-
- // Ensure {maybe_receiver} is a JSRegExp.
- ThrowIfNotInstanceType(context, maybe_receiver, JS_REGEXP_TYPE,
- "RegExp.prototype.exec");
- TNode<JSRegExp> receiver = CAST(maybe_receiver);
-
- // Convert {maybe_string} to a String.
- TNode<String> string = ToString_Inline(context, maybe_string);
-
- Label if_isfastpath(this), if_isslowpath(this);
- Branch(IsFastRegExpNoPrototype(context, receiver), &if_isfastpath,
- &if_isslowpath);
-
- BIND(&if_isfastpath);
- Return(RegExpPrototypeExecBody(context, receiver, string, true));
-
- BIND(&if_isslowpath);
- Return(CallBuiltin(Builtins::kRegExpPrototypeExecSlow, context, receiver,
- string));
-}
-
TNode<String> RegExpBuiltinsAssembler::FlagsGetter(TNode<Context> context,
TNode<Object> regexp,
bool is_fastpath) {
@@ -1246,8 +1210,8 @@ TNode<String> RegExpBuiltinsAssembler::FlagsGetter(TNode<Context> context,
{
TNode<String> const result = AllocateSeqOneByteString(var_length.value());
- VARIABLE(var_offset, MachineType::PointerRepresentation(),
- IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ TVARIABLE(IntPtrT, var_offset,
+ IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag));
#define CASE_FOR_FLAG(FLAG, CHAR) \
do { \
@@ -1256,7 +1220,7 @@ TNode<String> RegExpBuiltinsAssembler::FlagsGetter(TNode<Context> context,
TNode<Int32T> const value = Int32Constant(CHAR); \
StoreNoWriteBarrier(MachineRepresentation::kWord8, result, \
var_offset.value(), value); \
- var_offset.Bind(IntPtrAdd(var_offset.value(), int_one)); \
+ var_offset = IntPtrAdd(var_offset.value(), int_one); \
Goto(&next); \
BIND(&next); \
} while (false)
@@ -1273,64 +1237,11 @@ TNode<String> RegExpBuiltinsAssembler::FlagsGetter(TNode<Context> context,
}
}
-// ES#sec-isregexp IsRegExp ( argument )
-TNode<BoolT> RegExpBuiltinsAssembler::IsRegExp(TNode<Context> context,
- TNode<Object> maybe_receiver) {
- Label out(this), if_isregexp(this);
-
- TVARIABLE(BoolT, var_result, Int32FalseConstant());
-
- GotoIf(TaggedIsSmi(maybe_receiver), &out);
- GotoIfNot(IsJSReceiver(CAST(maybe_receiver)), &out);
-
- TNode<JSReceiver> receiver = CAST(maybe_receiver);
-
- // Check @@match.
- {
- TNode<Object> value =
- GetProperty(context, receiver, isolate()->factory()->match_symbol());
-
- Label match_isundefined(this), match_isnotundefined(this);
- Branch(IsUndefined(value), &match_isundefined, &match_isnotundefined);
-
- BIND(&match_isundefined);
- Branch(IsJSRegExp(receiver), &if_isregexp, &out);
-
- BIND(&match_isnotundefined);
- Label match_istrueish(this), match_isfalseish(this);
- BranchIfToBooleanIsTrue(value, &match_istrueish, &match_isfalseish);
-
- // The common path. Symbol.match exists, equals the RegExpPrototypeMatch
- // function (and is thus trueish), and the receiver is a JSRegExp.
- BIND(&match_istrueish);
- GotoIf(IsJSRegExp(receiver), &if_isregexp);
- CallRuntime(Runtime::kIncrementUseCounter, context,
- SmiConstant(v8::Isolate::kRegExpMatchIsTrueishOnNonJSRegExp));
- Goto(&if_isregexp);
-
- BIND(&match_isfalseish);
- GotoIfNot(IsJSRegExp(receiver), &out);
- CallRuntime(Runtime::kIncrementUseCounter, context,
- SmiConstant(v8::Isolate::kRegExpMatchIsFalseishOnJSRegExp));
- Goto(&out);
- }
-
- BIND(&if_isregexp);
- var_result = Int32TrueConstant();
- Goto(&out);
-
- BIND(&out);
- return var_result.value();
-}
-
// ES#sec-regexpinitialize
// Runtime Semantics: RegExpInitialize ( obj, pattern, flags )
-Node* RegExpBuiltinsAssembler::RegExpInitialize(Node* const context,
- Node* const regexp,
- Node* const maybe_pattern,
- Node* const maybe_flags) {
- CSA_ASSERT(this, IsJSRegExp(regexp));
-
+TNode<Object> RegExpBuiltinsAssembler::RegExpInitialize(
+ const TNode<Context> context, const TNode<JSRegExp> regexp,
+ const TNode<Object> maybe_pattern, const TNode<Object> maybe_flags) {
// Normalize pattern.
TNode<Object> const pattern = Select<Object>(
IsUndefined(maybe_pattern), [=] { return EmptyStringConstant(); },
@@ -1437,7 +1348,7 @@ TF_BUILTIN(RegExpConstructor, RegExpBuiltinsAssembler) {
// Allocate.
- VARIABLE(var_regexp, MachineRepresentation::kTagged);
+ TVARIABLE(JSRegExp, var_regexp);
{
Label allocate_jsregexp(this), allocate_generic(this, Label::kDeferred),
next(this);
@@ -1448,25 +1359,23 @@ TF_BUILTIN(RegExpConstructor, RegExpBuiltinsAssembler) {
{
TNode<Map> const initial_map = CAST(LoadObjectField(
regexp_function, JSFunction::kPrototypeOrInitialMapOffset));
- TNode<JSObject> const regexp = AllocateJSObjectFromMap(initial_map);
- var_regexp.Bind(regexp);
+ var_regexp = CAST(AllocateJSObjectFromMap(initial_map));
Goto(&next);
}
BIND(&allocate_generic);
{
ConstructorBuiltinsAssembler constructor_assembler(this->state());
- TNode<JSObject> const regexp = constructor_assembler.EmitFastNewObject(
- context, regexp_function, CAST(var_new_target.value()));
- var_regexp.Bind(regexp);
+ var_regexp = CAST(constructor_assembler.EmitFastNewObject(
+ context, regexp_function, CAST(var_new_target.value())));
Goto(&next);
}
BIND(&next);
}
- Node* const result = RegExpInitialize(context, var_regexp.value(),
- var_pattern.value(), var_flags.value());
+ const TNode<Object> result = RegExpInitialize(
+ context, var_regexp.value(), var_pattern.value(), var_flags.value());
Return(result);
}
@@ -1478,12 +1387,12 @@ TF_BUILTIN(RegExpPrototypeCompile, RegExpBuiltinsAssembler) {
TNode<Object> maybe_flags = CAST(Parameter(Descriptor::kFlags));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- ThrowIfNotInstanceType(context, maybe_receiver, JS_REGEXP_TYPE,
+ ThrowIfNotInstanceType(context, maybe_receiver, JS_REG_EXP_TYPE,
"RegExp.prototype.compile");
- Node* const receiver = maybe_receiver;
+ const TNode<JSRegExp> receiver = CAST(maybe_receiver);
- VARIABLE(var_flags, MachineRepresentation::kTagged, maybe_flags);
- VARIABLE(var_pattern, MachineRepresentation::kTagged, maybe_pattern);
+ TVARIABLE(Object, var_flags, maybe_flags);
+ TVARIABLE(Object, var_pattern, maybe_pattern);
// Handle a JSRegExp pattern.
{
@@ -1492,8 +1401,6 @@ TF_BUILTIN(RegExpPrototypeCompile, RegExpBuiltinsAssembler) {
GotoIf(TaggedIsSmi(maybe_pattern), &next);
GotoIfNot(IsJSRegExp(CAST(maybe_pattern)), &next);
- Node* const pattern = maybe_pattern;
-
// {maybe_flags} must be undefined in this case, otherwise throw.
{
Label next(this);
@@ -1504,19 +1411,20 @@ TF_BUILTIN(RegExpPrototypeCompile, RegExpBuiltinsAssembler) {
BIND(&next);
}
- TNode<String> const new_flags = FlagsGetter(context, CAST(pattern), true);
+ const TNode<JSRegExp> pattern = CAST(maybe_pattern);
+ TNode<String> const new_flags = FlagsGetter(context, pattern, true);
TNode<Object> const new_pattern =
LoadObjectField(pattern, JSRegExp::kSourceOffset);
- var_flags.Bind(new_flags);
- var_pattern.Bind(new_pattern);
+ var_flags = new_flags;
+ var_pattern = new_pattern;
Goto(&next);
BIND(&next);
}
- Node* const result = RegExpInitialize(context, receiver, var_pattern.value(),
- var_flags.value());
+ const TNode<Object> result = RegExpInitialize(
+ context, receiver, var_pattern.value(), var_flags.value());
Return(result);
}
@@ -1586,54 +1494,6 @@ TNode<BoolT> RegExpBuiltinsAssembler::FlagGetter(TNode<Context> context,
: SlowFlagGetter(context, regexp, flag);
}
-// ES#sec-regexpexec Runtime Semantics: RegExpExec ( R, S )
-TNode<Object> RegExpBuiltinsAssembler::RegExpExec(TNode<Context> context,
- Node* regexp, Node* string) {
- TVARIABLE(Object, var_result);
- Label out(this);
-
- // Take the slow path of fetching the exec property, calling it, and
- // verifying its return value.
-
- // Get the exec property.
- TNode<Object> const exec =
- GetProperty(context, regexp, isolate()->factory()->exec_string());
-
- // Is {exec} callable?
- Label if_iscallable(this), if_isnotcallable(this);
-
- GotoIf(TaggedIsSmi(exec), &if_isnotcallable);
-
- TNode<Map> const exec_map = LoadMap(CAST(exec));
- Branch(IsCallableMap(exec_map), &if_iscallable, &if_isnotcallable);
-
- BIND(&if_iscallable);
- {
- Callable call_callable = CodeFactory::Call(isolate());
- var_result = CAST(CallJS(call_callable, context, exec, regexp, string));
-
- GotoIf(IsNull(var_result.value()), &out);
-
- ThrowIfNotJSReceiver(context, var_result.value(),
- MessageTemplate::kInvalidRegExpExecResult, "");
-
- Goto(&out);
- }
-
- BIND(&if_isnotcallable);
- {
- ThrowIfNotInstanceType(context, regexp, JS_REGEXP_TYPE,
- "RegExp.prototype.exec");
-
- var_result = CallBuiltin(Builtins::kRegExpPrototypeExecSlow, context,
- regexp, string);
- Goto(&out);
- }
-
- BIND(&out);
- return var_result.value();
-}
-
TNode<Number> RegExpBuiltinsAssembler::AdvanceStringIndex(
SloppyTNode<String> string, SloppyTNode<Number> index,
SloppyTNode<BoolT> is_unicode, bool is_fastpath) {
@@ -1717,7 +1577,7 @@ TNode<Object> RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(
{
var_result = is_fastpath ? RegExpPrototypeExecBody(context, CAST(regexp),
string, true)
- : RegExpExec(context, regexp, string);
+ : RegExpExec(context, CAST(regexp), string);
Goto(&done);
}
@@ -1735,9 +1595,9 @@ TNode<Object> RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(
// Loop preparations. Within the loop, collect results from RegExpExec
// and store match strings in the array.
- Variable* vars[] = {array.var_array(), array.var_length(),
- array.var_capacity()};
- Label loop(this, 3, vars), out(this);
+ Label loop(this,
+ {array.var_array(), array.var_length(), array.var_capacity()}),
+ out(this);
// Check if the regexp is an ATOM type. If then, keep the literal string to
// search for so that we can avoid calling substring in the loop below.
@@ -1758,7 +1618,7 @@ TNode<Object> RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(
BIND(&loop);
{
- VARIABLE(var_match, MachineRepresentation::kTagged);
+ TVARIABLE(String, var_match);
Label if_didmatch(this), if_didnotmatch(this);
if (is_fastpath) {
@@ -1776,24 +1636,24 @@ TNode<Object> RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(
match_indices, RegExpMatchInfo::kFirstCaptureIndex);
TNode<Object> const match_to = UnsafeLoadFixedArrayElement(
match_indices, RegExpMatchInfo::kFirstCaptureIndex + 1);
- var_match.Bind(CallBuiltin(Builtins::kSubString, context, string,
- match_from, match_to));
+ var_match = CAST(CallBuiltin(Builtins::kSubString, context, string,
+ match_from, match_to));
Goto(&if_didmatch);
}
BIND(&donotsubstring);
- var_match.Bind(var_search_string.value());
+ var_match = var_search_string.value();
Goto(&if_didmatch);
} else {
DCHECK(!is_fastpath);
- TNode<Object> const result = RegExpExec(context, regexp, string);
+ TNode<Object> const result = RegExpExec(context, CAST(regexp), string);
Label load_match(this);
Branch(IsNull(result), &if_didnotmatch, &load_match);
BIND(&load_match);
- var_match.Bind(
- ToString_Inline(context, GetProperty(context, result, SmiZero())));
+ var_match =
+ ToString_Inline(context, GetProperty(context, result, SmiZero()));
Goto(&if_didmatch);
}
@@ -1807,11 +1667,11 @@ TNode<Object> RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(
BIND(&if_didmatch);
{
- Node* match = var_match.value();
+ TNode<String> match = var_match.value();
// Store the match, growing the fixed array if needed.
- array.Push(CAST(match));
+ array.Push(match);
// Advance last index if the match is the empty string.
@@ -1855,128 +1715,11 @@ TNode<Object> RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(
return var_result.value();
}
-void RegExpMatchAllAssembler::Generate(TNode<Context> context,
- TNode<Context> native_context,
- TNode<Object> receiver,
- TNode<Object> maybe_string) {
- // 1. Let R be the this value.
- // 2. If Type(R) is not Object, throw a TypeError exception.
- ThrowIfNotJSReceiver(context, receiver,
- MessageTemplate::kIncompatibleMethodReceiver,
- "RegExp.prototype.@@matchAll");
-
- // 3. Let S be ? ToString(O).
- TNode<String> string = ToString_Inline(context, maybe_string);
-
- TVARIABLE(Object, var_matcher);
- TVARIABLE(BoolT, var_global);
- TVARIABLE(BoolT, var_unicode);
- Label create_iterator(this), if_fast_regexp(this),
- if_slow_regexp(this, Label::kDeferred);
-
- // Strict, because following code uses the flags property.
- // TODO(jgruber): Handle slow flag accesses on the fast path and make this
- // permissive.
- BranchIfFastRegExp_Strict(context, CAST(receiver), &if_fast_regexp,
- &if_slow_regexp);
-
- BIND(&if_fast_regexp);
- {
- TNode<JSRegExp> fast_regexp = CAST(receiver);
- TNode<Object> source =
- LoadObjectField(fast_regexp, JSRegExp::kSourceOffset);
-
- // 4. Let C be ? SpeciesConstructor(R, %RegExp%).
- // 5. Let flags be ? ToString(? Get(R, "flags")).
- // 6. Let matcher be ? Construct(C, « R, flags »).
- TNode<String> flags = FlagsGetter(context, fast_regexp, true);
- var_matcher = RegExpCreate(context, native_context, source, flags);
- CSA_ASSERT(this,
- IsFastRegExpPermissive(context, CAST(var_matcher.value())));
-
- // 7. Let lastIndex be ? ToLength(? Get(R, "lastIndex")).
- // 8. Perform ? Set(matcher, "lastIndex", lastIndex, true).
- FastStoreLastIndex(CAST(var_matcher.value()),
- FastLoadLastIndex(fast_regexp));
-
- // 9. If flags contains "g", let global be true.
- // 10. Else, let global be false.
- var_global = FastFlagGetter(CAST(var_matcher.value()), JSRegExp::kGlobal);
-
- // 11. If flags contains "u", let fullUnicode be true.
- // 12. Else, let fullUnicode be false.
- var_unicode = FastFlagGetter(CAST(var_matcher.value()), JSRegExp::kUnicode);
- Goto(&create_iterator);
- }
-
- BIND(&if_slow_regexp);
- {
- // 4. Let C be ? SpeciesConstructor(R, %RegExp%).
- TNode<JSFunction> regexp_fun = CAST(
- LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX));
- TNode<JSReceiver> species_constructor =
- SpeciesConstructor(native_context, receiver, regexp_fun);
-
- // 5. Let flags be ? ToString(? Get(R, "flags")).
- TNode<Object> flags =
- GetProperty(context, receiver, isolate()->factory()->flags_string());
- TNode<String> flags_string = ToString_Inline(context, flags);
-
- // 6. Let matcher be ? Construct(C, « R, flags »).
- var_matcher =
- Construct(context, species_constructor, receiver, flags_string);
-
- // 7. Let lastIndex be ? ToLength(? Get(R, "lastIndex")).
- TNode<Number> last_index =
- ToLength_Inline(context, SlowLoadLastIndex(context, receiver));
-
- // 8. Perform ? Set(matcher, "lastIndex", lastIndex, true).
- SlowStoreLastIndex(context, var_matcher.value(), last_index);
-
- // 9. If flags contains "g", let global be true.
- // 10. Else, let global be false.
- TNode<String> global_char_string = StringConstant("g");
- TNode<Smi> global_ix =
- CAST(CallBuiltin(Builtins::kStringIndexOf, context, flags_string,
- global_char_string, SmiZero()));
- var_global = SmiNotEqual(global_ix, SmiConstant(-1));
-
- // 11. If flags contains "u", let fullUnicode be true.
- // 12. Else, let fullUnicode be false.
- TNode<String> unicode_char_string = StringConstant("u");
- TNode<Smi> unicode_ix =
- CAST(CallBuiltin(Builtins::kStringIndexOf, context, flags_string,
- unicode_char_string, SmiZero()));
- var_unicode = SmiNotEqual(unicode_ix, SmiConstant(-1));
- Goto(&create_iterator);
- }
-
- BIND(&create_iterator);
- {
- {
- // UseCounter for matchAll with non-g RegExp.
- // https://crbug.com/v8/9551
- Label next(this);
- GotoIf(var_global.value(), &next);
- CallRuntime(Runtime::kIncrementUseCounter, context,
- SmiConstant(v8::Isolate::kRegExpMatchAllWithNonGlobalRegExp));
- Goto(&next);
- BIND(&next);
- }
-
- // 13. Return ! CreateRegExpStringIterator(matcher, S, global, fullUnicode).
- TNode<Object> iterator =
- CreateRegExpStringIterator(native_context, var_matcher.value(), string,
- var_global.value(), var_unicode.value());
- Return(iterator);
- }
-}
-
// ES#sec-createregexpstringiterator
// CreateRegExpStringIterator ( R, S, global, fullUnicode )
TNode<Object> RegExpMatchAllAssembler::CreateRegExpStringIterator(
- TNode<Context> native_context, TNode<Object> regexp, TNode<String> string,
- TNode<BoolT> global, TNode<BoolT> full_unicode) {
+ TNode<NativeContext> native_context, TNode<Object> regexp,
+ TNode<String> string, TNode<BoolT> global, TNode<BoolT> full_unicode) {
TNode<Map> map = CAST(LoadContextElement(
native_context,
Context::INITIAL_REGEXP_STRING_ITERATOR_PROTOTYPE_MAP_INDEX));
@@ -2016,164 +1759,11 @@ TNode<Object> RegExpMatchAllAssembler::CreateRegExpStringIterator(
return iterator;
}
-// https://tc39.github.io/proposal-string-matchall/
-// RegExp.prototype [ @@matchAll ] ( string )
-TF_BUILTIN(RegExpPrototypeMatchAll, RegExpMatchAllAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<NativeContext> native_context = LoadNativeContext(context);
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> maybe_string = CAST(Parameter(Descriptor::kString));
- Generate(context, native_context, receiver, maybe_string);
-}
-
-void RegExpBuiltinsAssembler::RegExpPrototypeSearchBodyFast(
- TNode<Context> context, TNode<JSRegExp> regexp, TNode<String> string) {
- CSA_ASSERT(this, IsFastRegExpPermissive(context, regexp));
-
- // Grab the initial value of last index.
- TNode<Smi> previous_last_index = FastLoadLastIndex(regexp);
-
- // Ensure last index is 0.
- FastStoreLastIndex(regexp, SmiZero());
-
- // Call exec.
- Label if_didnotmatch(this);
- TNode<RegExpMatchInfo> match_indices = RegExpPrototypeExecBodyWithoutResult(
- context, regexp, string, &if_didnotmatch, true);
-
- // Successful match.
- {
- // Reset last index.
- FastStoreLastIndex(regexp, previous_last_index);
-
- // Return the index of the match.
- TNode<Object> const index = LoadFixedArrayElement(
- match_indices, RegExpMatchInfo::kFirstCaptureIndex);
- Return(index);
- }
-
- BIND(&if_didnotmatch);
- {
- // Reset last index and return -1.
- FastStoreLastIndex(regexp, previous_last_index);
- Return(SmiConstant(-1));
- }
-}
-
-void RegExpBuiltinsAssembler::RegExpPrototypeSearchBodySlow(
- TNode<Context> context, Node* const regexp, Node* const string) {
- CSA_ASSERT(this, IsJSReceiver(regexp));
- CSA_ASSERT(this, IsString(string));
-
- Isolate* const isolate = this->isolate();
-
- TNode<Smi> const smi_zero = SmiZero();
-
- // Grab the initial value of last index.
- TNode<Object> const previous_last_index =
- SlowLoadLastIndex(context, CAST(regexp));
-
- // Ensure last index is 0.
- {
- Label next(this), slow(this, Label::kDeferred);
- BranchIfSameValue(previous_last_index, smi_zero, &next, &slow);
-
- BIND(&slow);
- SlowStoreLastIndex(context, regexp, smi_zero);
- Goto(&next);
- BIND(&next);
- }
-
- // Call exec.
- TNode<Object> const exec_result = RegExpExec(context, regexp, string);
-
- // Reset last index if necessary.
- {
- Label next(this), slow(this, Label::kDeferred);
- TNode<Object> const current_last_index =
- SlowLoadLastIndex(context, CAST(regexp));
-
- BranchIfSameValue(current_last_index, previous_last_index, &next, &slow);
-
- BIND(&slow);
- SlowStoreLastIndex(context, regexp, previous_last_index);
- Goto(&next);
- BIND(&next);
- }
-
- // Return -1 if no match was found.
- {
- Label next(this);
- GotoIfNot(IsNull(exec_result), &next);
- Return(SmiConstant(-1));
- BIND(&next);
- }
-
- // Return the index of the match.
- {
- Label fast_result(this), slow_result(this, Label::kDeferred);
- BranchIfFastRegExpResult(context, exec_result, &fast_result, &slow_result);
-
- BIND(&fast_result);
- {
- TNode<Object> const index =
- LoadObjectField(CAST(exec_result), JSRegExpResult::kIndexOffset);
- Return(index);
- }
-
- BIND(&slow_result);
- {
- Return(GetProperty(context, exec_result,
- isolate->factory()->index_string()));
- }
- }
-}
-
-// ES#sec-regexp.prototype-@@search
-// RegExp.prototype [ @@search ] ( string )
-TF_BUILTIN(RegExpPrototypeSearch, RegExpBuiltinsAssembler) {
- TNode<Object> maybe_receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> maybe_string = CAST(Parameter(Descriptor::kString));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-
- // Ensure {maybe_receiver} is a JSReceiver.
- ThrowIfNotJSReceiver(context, maybe_receiver,
- MessageTemplate::kIncompatibleMethodReceiver,
- "RegExp.prototype.@@search");
- TNode<JSReceiver> receiver = CAST(maybe_receiver);
-
- // Convert {maybe_string} to a String.
- TNode<String> const string = ToString_Inline(context, maybe_string);
-
- Label fast_path(this), slow_path(this);
- BranchIfFastRegExp_Permissive(context, receiver, &fast_path, &slow_path);
-
- BIND(&fast_path);
- // TODO(pwong): Could be optimized to remove the overhead of calling the
- // builtin (at the cost of a larger builtin).
- Return(CallBuiltin(Builtins::kRegExpSearchFast, context, receiver, string));
-
- BIND(&slow_path);
- RegExpPrototypeSearchBodySlow(context, receiver, string);
-}
-
-// Helper that skips a few initial checks. and assumes...
-// 1) receiver is a "fast" RegExp
-// 2) pattern is a string
-TF_BUILTIN(RegExpSearchFast, RegExpBuiltinsAssembler) {
- TNode<JSRegExp> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<String> string = CAST(Parameter(Descriptor::kPattern));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-
- RegExpPrototypeSearchBodyFast(context, receiver, string);
-}
-
// Generates the fast path for @@split. {regexp} is an unmodified, non-sticky
// JSRegExp, {string} is a String, and {limit} is a Smi.
-void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(TNode<Context> context,
- TNode<JSRegExp> regexp,
- TNode<String> string,
- TNode<Smi> const limit) {
+TNode<JSArray> RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(
+ TNode<Context> context, TNode<JSRegExp> regexp, TNode<String> string,
+ TNode<Smi> const limit) {
CSA_ASSERT(this, IsFastRegExpPermissive(context, regexp));
CSA_ASSERT(this, Word32BinaryNot(FastFlagGetter(regexp, JSRegExp::kSticky)));
@@ -2182,11 +1772,13 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(TNode<Context> context,
const ElementsKind kind = PACKED_ELEMENTS;
const ParameterMode mode = CodeStubAssembler::INTPTR_PARAMETERS;
- Node* const allocation_site = nullptr;
+ TNode<AllocationSite> allocation_site = {};
TNode<NativeContext> const native_context = LoadNativeContext(context);
TNode<Map> array_map = LoadJSArrayElementsMap(kind, native_context);
Label return_empty_array(this, Label::kDeferred);
+ TVARIABLE(JSArray, var_result);
+ Label done(this);
// If limit is zero, return an empty array.
{
@@ -2220,13 +1812,13 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(TNode<Context> context,
{
TNode<Smi> length = SmiConstant(1);
TNode<IntPtrT> capacity = IntPtrConstant(1);
- TNode<JSArray> result = AllocateJSArray(kind, array_map, capacity,
- length, allocation_site, mode);
+ var_result = AllocateJSArray(kind, array_map, capacity, length,
+ allocation_site, mode);
- TNode<FixedArray> fixed_array = CAST(LoadElements(result));
+ TNode<FixedArray> fixed_array = CAST(LoadElements(var_result.value()));
UnsafeStoreFixedArrayElement(fixed_array, 0, string);
- Return(result);
+ Goto(&done);
}
}
@@ -2240,11 +1832,9 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(TNode<Context> context,
TVARIABLE(Smi, var_last_matched_until, SmiZero());
TVARIABLE(Smi, var_next_search_from, SmiZero());
- Variable* vars[] = {array.var_array(), array.var_length(),
- array.var_capacity(), &var_last_matched_until,
- &var_next_search_from};
- const int vars_count = sizeof(vars) / sizeof(vars[0]);
- Label loop(this, vars_count, vars), push_suffix_and_out(this), out(this);
+ Label loop(this, {array.var_array(), array.var_length(), array.var_capacity(),
+ &var_last_matched_until, &var_next_search_from}),
+ push_suffix_and_out(this), out(this);
Goto(&loop);
BIND(&loop);
@@ -2321,19 +1911,17 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(TNode<Context> context,
match_indices, RegExpMatchInfo::kNumberOfCapturesIndex));
TNode<IntPtrT> const int_num_registers = SmiUntag(num_registers);
- VARIABLE(var_reg, MachineType::PointerRepresentation());
- var_reg.Bind(IntPtrConstant(2));
+ TVARIABLE(IntPtrT, var_reg, IntPtrConstant(2));
- Variable* vars[] = {array.var_array(), array.var_length(),
- array.var_capacity(), &var_reg};
- const int vars_count = sizeof(vars) / sizeof(vars[0]);
- Label nested_loop(this, vars_count, vars), nested_loop_out(this);
+ Label nested_loop(this, {array.var_array(), array.var_length(),
+ array.var_capacity(), &var_reg}),
+ nested_loop_out(this);
Branch(IntPtrLessThan(var_reg.value(), int_num_registers), &nested_loop,
&nested_loop_out);
BIND(&nested_loop);
{
- Node* const reg = var_reg.value();
+ const TNode<IntPtrT> reg = var_reg.value();
TNode<Object> const from = LoadFixedArrayElement(
match_indices, reg,
RegExpMatchInfo::kFirstCaptureIndex * kTaggedSize, mode);
@@ -2342,30 +1930,30 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(TNode<Context> context,
(RegExpMatchInfo::kFirstCaptureIndex + 1) * kTaggedSize, mode));
Label select_capture(this), select_undefined(this), store_value(this);
- VARIABLE(var_value, MachineRepresentation::kTagged);
+ TVARIABLE(Object, var_value);
Branch(SmiEqual(to, SmiConstant(-1)), &select_undefined,
&select_capture);
BIND(&select_capture);
{
- var_value.Bind(
- CallBuiltin(Builtins::kSubString, context, string, from, to));
+ var_value =
+ CallBuiltin(Builtins::kSubString, context, string, from, to);
Goto(&store_value);
}
BIND(&select_undefined);
{
- var_value.Bind(UndefinedConstant());
+ var_value = UndefinedConstant();
Goto(&store_value);
}
BIND(&store_value);
{
- array.Push(CAST(var_value.value()));
+ array.Push(var_value.value());
GotoIf(WordEqual(array.length(), int_limit), &out);
- TNode<WordT> const new_reg = IntPtrAdd(reg, IntPtrConstant(2));
- var_reg.Bind(new_reg);
+ const TNode<IntPtrT> new_reg = IntPtrAdd(reg, IntPtrConstant(2));
+ var_reg = new_reg;
Branch(IntPtrLessThan(new_reg, int_num_registers), &nested_loop,
&nested_loop_out);
@@ -2382,316 +1970,29 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(TNode<Context> context,
BIND(&push_suffix_and_out);
{
- TNode<Smi> const from = var_last_matched_until.value();
- Node* const to = string_length;
+ const TNode<Smi> from = var_last_matched_until.value();
+ const TNode<Smi> to = string_length;
array.Push(CallBuiltin(Builtins::kSubString, context, string, from, to));
Goto(&out);
}
BIND(&out);
{
- TNode<JSArray> const result = array.ToJSArray(context);
- Return(result);
+ var_result = array.ToJSArray(context);
+ Goto(&done);
}
BIND(&return_empty_array);
{
TNode<Smi> length = SmiZero();
TNode<IntPtrT> capacity = IntPtrZero();
- TNode<JSArray> result = AllocateJSArray(kind, array_map, capacity, length,
- allocation_site, mode);
- Return(result);
- }
-}
-
-// Helper that skips a few initial checks.
-TF_BUILTIN(RegExpSplit, RegExpBuiltinsAssembler) {
- TNode<JSRegExp> regexp = CAST(Parameter(Descriptor::kRegExp));
- TNode<String> string = CAST(Parameter(Descriptor::kString));
- TNode<Object> maybe_limit = CAST(Parameter(Descriptor::kLimit));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-
- CSA_ASSERT_BRANCH(this, [&](Label* ok, Label* not_ok) {
- BranchIfFastRegExp_Strict(context, regexp, ok, not_ok);
- });
-
- // Verify {maybe_limit}.
-
- VARIABLE(var_limit, MachineRepresentation::kTagged, maybe_limit);
- Label if_limitissmimax(this), runtime(this, Label::kDeferred);
-
- {
- Label next(this);
-
- GotoIf(IsUndefined(maybe_limit), &if_limitissmimax);
- Branch(TaggedIsPositiveSmi(maybe_limit), &next, &runtime);
-
- // We need to be extra-strict and require the given limit to be either
- // undefined or a positive smi. We can't call ToUint32(maybe_limit) since
- // that might move us onto the slow path, resulting in ordering spec
- // violations (see https://crbug.com/801171).
-
- BIND(&if_limitissmimax);
- {
- // TODO(jgruber): In this case, we can probably avoid generation of limit
- // checks in Generate_RegExpPrototypeSplitBody.
- var_limit.Bind(SmiConstant(Smi::kMaxValue));
- Goto(&next);
- }
-
- BIND(&next);
- }
-
- // Due to specific shortcuts we take on the fast path (specifically, we don't
- // allocate a new regexp instance as specced), we need to ensure that the
- // given regexp is non-sticky to avoid invalid results. See crbug.com/v8/6706.
-
- GotoIf(FastFlagGetter(regexp, JSRegExp::kSticky), &runtime);
-
- // We're good to go on the fast path, which is inlined here.
-
- RegExpPrototypeSplitBody(context, regexp, string, CAST(var_limit.value()));
-
- BIND(&runtime);
- Return(CallRuntime(Runtime::kRegExpSplit, context, regexp, string,
- var_limit.value()));
-}
-
-// ES#sec-regexp.prototype-@@split
-// RegExp.prototype [ @@split ] ( string, limit )
-TF_BUILTIN(RegExpPrototypeSplit, RegExpBuiltinsAssembler) {
- const int kStringArg = 0;
- const int kLimitArg = 1;
-
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
- CodeStubArguments args(this, argc);
-
- TNode<Object> maybe_receiver = args.GetReceiver();
- TNode<Object> maybe_string = args.GetOptionalArgumentValue(kStringArg);
- TNode<Object> maybe_limit = args.GetOptionalArgumentValue(kLimitArg);
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-
- // Ensure {maybe_receiver} is a JSReceiver.
- ThrowIfNotJSReceiver(context, maybe_receiver,
- MessageTemplate::kIncompatibleMethodReceiver,
- "RegExp.prototype.@@split");
- TNode<JSReceiver> receiver = CAST(maybe_receiver);
-
- // Convert {maybe_string} to a String.
- TNode<String> string = ToString_Inline(context, maybe_string);
-
- // Strict: Reads the flags property.
- // TODO(jgruber): Handle slow flag accesses on the fast path and make this
- // permissive.
- Label stub(this), runtime(this, Label::kDeferred);
- BranchIfFastRegExp_Strict(context, receiver, &stub, &runtime);
-
- BIND(&stub);
- args.PopAndReturn(CallBuiltin(Builtins::kRegExpSplit, context, receiver,
- string, maybe_limit));
-
- BIND(&runtime);
- args.PopAndReturn(CallRuntime(Runtime::kRegExpSplit, context, receiver,
- string, maybe_limit));
-}
-
-class RegExpStringIteratorAssembler : public RegExpBuiltinsAssembler {
- public:
- explicit RegExpStringIteratorAssembler(compiler::CodeAssemblerState* state)
- : RegExpBuiltinsAssembler(state) {}
-
- protected:
- TNode<Smi> LoadFlags(TNode<HeapObject> iterator) {
- return LoadObjectField<Smi>(iterator, JSRegExpStringIterator::kFlagsOffset);
- }
-
- TNode<BoolT> HasDoneFlag(TNode<Smi> flags) {
- return UncheckedCast<BoolT>(
- IsSetSmi(flags, 1 << JSRegExpStringIterator::kDoneBit));
- }
-
- TNode<BoolT> HasGlobalFlag(TNode<Smi> flags) {
- return UncheckedCast<BoolT>(
- IsSetSmi(flags, 1 << JSRegExpStringIterator::kGlobalBit));
- }
-
- TNode<BoolT> HasUnicodeFlag(TNode<Smi> flags) {
- return UncheckedCast<BoolT>(
- IsSetSmi(flags, 1 << JSRegExpStringIterator::kUnicodeBit));
- }
-
- void SetDoneFlag(TNode<HeapObject> iterator, TNode<Smi> flags) {
- TNode<Smi> new_flags =
- SmiOr(flags, SmiConstant(1 << JSRegExpStringIterator::kDoneBit));
- StoreObjectFieldNoWriteBarrier(
- iterator, JSRegExpStringIterator::kFlagsOffset, new_flags);
- }
-};
-
-// https://tc39.github.io/proposal-string-matchall/
-// %RegExpStringIteratorPrototype%.next ( )
-TF_BUILTIN(RegExpStringIteratorPrototypeNext, RegExpStringIteratorAssembler) {
- const char* method_name = "%RegExpStringIterator%.prototype.next";
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> maybe_receiver = CAST(Parameter(Descriptor::kReceiver));
-
- Label if_match(this), if_no_match(this, Label::kDeferred),
- return_empty_done_result(this, Label::kDeferred);
-
- // 1. Let O be the this value.
- // 2. If Type(O) is not Object, throw a TypeError exception.
- // 3. If O does not have all of the internal slots of a RegExp String Iterator
- // Object Instance (see 5.3), throw a TypeError exception.
- ThrowIfNotInstanceType(context, maybe_receiver,
- JS_REGEXP_STRING_ITERATOR_TYPE, method_name);
- TNode<HeapObject> receiver = CAST(maybe_receiver);
-
- // 4. If O.[[Done]] is true, then
- // a. Return ! CreateIterResultObject(undefined, true).
- TNode<Smi> flags = LoadFlags(receiver);
- GotoIf(HasDoneFlag(flags), &return_empty_done_result);
-
- // 5. Let R be O.[[IteratingRegExp]].
- TNode<JSReceiver> iterating_regexp = CAST(LoadObjectField(
- receiver, JSRegExpStringIterator::kIteratingRegExpOffset));
-
- // For extra safety, also check the type in release mode.
- CSA_CHECK(this, IsJSReceiver(iterating_regexp));
-
- // 6. Let S be O.[[IteratedString]].
- TNode<String> iterating_string = CAST(
- LoadObjectField(receiver, JSRegExpStringIterator::kIteratedStringOffset));
-
- // 7. Let global be O.[[Global]].
- // See if_match.
-
- // 8. Let fullUnicode be O.[[Unicode]].
- // See if_global.
-
- // 9. Let match be ? RegExpExec(R, S).
- TVARIABLE(Object, var_match);
- TVARIABLE(BoolT, var_is_fast_regexp);
- {
- Label if_fast(this), if_slow(this, Label::kDeferred);
- BranchIfFastRegExp_Permissive(context, iterating_regexp, &if_fast,
- &if_slow);
-
- BIND(&if_fast);
- {
- TNode<RegExpMatchInfo> match_indices =
- RegExpPrototypeExecBodyWithoutResult(
- context, iterating_regexp, iterating_string, &if_no_match, true);
- var_match = ConstructNewResultFromMatchInfo(
- context, iterating_regexp, match_indices, iterating_string);
- var_is_fast_regexp = Int32TrueConstant();
- Goto(&if_match);
- }
-
- BIND(&if_slow);
- {
- var_match = RegExpExec(context, iterating_regexp, iterating_string);
- var_is_fast_regexp = Int32FalseConstant();
- Branch(IsNull(var_match.value()), &if_no_match, &if_match);
- }
- }
-
- // 10. If match is null, then
- BIND(&if_no_match);
- {
- // a. Set O.[[Done]] to true.
- SetDoneFlag(receiver, flags);
-
- // b. Return ! CreateIterResultObject(undefined, true).
- Goto(&return_empty_done_result);
+ var_result = AllocateJSArray(kind, array_map, capacity, length,
+ allocation_site, mode);
+ Goto(&done);
}
- // 11. Else,
- BIND(&if_match);
- {
- Label if_global(this), if_not_global(this, Label::kDeferred),
- return_result(this);
-
- // a. If global is true,
- Branch(HasGlobalFlag(flags), &if_global, &if_not_global);
- BIND(&if_global);
- {
- Label if_fast(this), if_slow(this, Label::kDeferred);
- // ii. If matchStr is the empty string,
- Branch(var_is_fast_regexp.value(), &if_fast, &if_slow);
- BIND(&if_fast);
- {
- // i. Let matchStr be ? ToString(? Get(match, "0")).
- CSA_ASSERT_BRANCH(this, [&](Label* ok, Label* not_ok) {
- BranchIfFastRegExpResult(context, var_match.value(), ok, not_ok);
- });
- CSA_ASSERT(this,
- SmiNotEqual(LoadFastJSArrayLength(CAST(var_match.value())),
- SmiZero()));
- TNode<FixedArray> result_fixed_array =
- CAST(LoadElements(CAST(var_match.value())));
- TNode<String> match_str =
- CAST(LoadFixedArrayElement(result_fixed_array, 0));
-
- // When iterating_regexp is fast, we assume it stays fast even after
- // accessing the first match from the RegExp result.
- CSA_ASSERT(this, IsFastRegExpPermissive(context, iterating_regexp));
- GotoIfNot(IsEmptyString(match_str), &return_result);
-
- // 1. Let thisIndex be ? ToLength(? Get(R, "lastIndex")).
- TNode<Smi> this_index = FastLoadLastIndex(CAST(iterating_regexp));
-
- // 2. Let nextIndex be ! AdvanceStringIndex(S, thisIndex, fullUnicode).
- TNode<Smi> next_index = AdvanceStringIndexFast(
- iterating_string, this_index, HasUnicodeFlag(flags));
-
- // 3. Perform ? Set(R, "lastIndex", nextIndex, true).
- FastStoreLastIndex(CAST(iterating_regexp), next_index);
-
- // iii. Return ! CreateIterResultObject(match, false).
- Goto(&return_result);
- }
- BIND(&if_slow);
- {
- // i. Let matchStr be ? ToString(? Get(match, "0")).
- TNode<String> match_str = ToString_Inline(
- context, GetProperty(context, var_match.value(), SmiZero()));
-
- GotoIfNot(IsEmptyString(match_str), &return_result);
-
- // 1. Let thisIndex be ? ToLength(? Get(R, "lastIndex")).
- TNode<Object> last_index = SlowLoadLastIndex(context, iterating_regexp);
- TNode<Number> this_index = ToLength_Inline(context, last_index);
-
- // 2. Let nextIndex be ! AdvanceStringIndex(S, thisIndex, fullUnicode).
- TNode<Number> next_index = AdvanceStringIndex(
- iterating_string, this_index, HasUnicodeFlag(flags), false);
-
- // 3. Perform ? Set(R, "lastIndex", nextIndex, true).
- SlowStoreLastIndex(context, iterating_regexp, next_index);
-
- // iii. Return ! CreateIterResultObject(match, false).
- Goto(&return_result);
- }
- }
- // b. Else,
- BIND(&if_not_global);
- {
- // i. Set O.[[Done]] to true.
- SetDoneFlag(receiver, flags);
-
- // ii. Return ! CreateIterResultObject(match, false).
- Goto(&return_result);
- }
- BIND(&return_result);
- {
- Return(AllocateJSIteratorResult(context, var_match.value(),
- FalseConstant()));
- }
- }
- BIND(&return_empty_done_result);
- Return(
- AllocateJSIteratorResult(context, UndefinedConstant(), TrueConstant()));
+ BIND(&done);
+ return var_result.value();
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.h b/deps/v8/src/builtins/builtins-regexp-gen.h
index de841f57b2..c6de458ef2 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.h
+++ b/deps/v8/src/builtins/builtins-regexp-gen.h
@@ -25,8 +25,6 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
TNode<Object> RegExpCreate(TNode<Context> context, TNode<Map> initial_map,
TNode<Object> regexp_string, TNode<String> flags);
- TNode<BoolT> IsRegExp(TNode<Context> context, TNode<Object> maybe_receiver);
-
TNode<Smi> SmiZero();
TNode<IntPtrT> IntPtrZero();
@@ -37,7 +35,8 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
// and input string.
TNode<JSRegExpResult> AllocateRegExpResult(
TNode<Context> context, TNode<Smi> length, TNode<Smi> index,
- TNode<String> input, TNode<FixedArray>* elements_out = nullptr);
+ TNode<String> input, TNode<RegExpMatchInfo> match_info,
+ TNode<FixedArray>* elements_out = nullptr);
TNode<Object> FastLoadLastIndexBeforeSmiCheck(TNode<JSRegExp> regexp);
TNode<Smi> FastLoadLastIndex(TNode<JSRegExp> regexp) {
@@ -56,10 +55,12 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
// Loads {var_string_start} and {var_string_end} with the corresponding
// offsets into the given {string_data}.
- void GetStringPointers(Node* const string_data, Node* const offset,
- Node* const last_index, Node* const string_length,
- String::Encoding encoding, Variable* var_string_start,
- Variable* var_string_end);
+ void GetStringPointers(TNode<RawPtrT> string_data, TNode<IntPtrT> offset,
+ TNode<IntPtrT> last_index,
+ TNode<IntPtrT> string_length,
+ String::Encoding encoding,
+ TVariable<RawPtrT>* var_string_start,
+ TVariable<RawPtrT>* var_string_end);
// Low level logic around the actual call into pattern matching code.
TNode<HeapObject> RegExpExecInternal(TNode<Context> context,
@@ -136,17 +137,17 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
// Performs fast path checks on the given object itself, but omits prototype
// checks.
- Node* IsFastRegExpNoPrototype(SloppyTNode<Context> context,
- SloppyTNode<Object> object);
- Node* IsFastRegExpNoPrototype(SloppyTNode<Context> context,
- SloppyTNode<Object> object,
- SloppyTNode<Map> map);
+ TNode<BoolT> IsFastRegExpNoPrototype(TNode<Context> context,
+ TNode<Object> object);
+ TNode<BoolT> IsFastRegExpNoPrototype(TNode<Context> context,
+ TNode<Object> object, TNode<Map> map);
// For debugging only. Uses a slow GetProperty call to fetch object.exec.
TNode<BoolT> IsFastRegExpWithOriginalExec(TNode<Context> context,
TNode<JSRegExp> object);
- void BranchIfFastRegExpResult(Node* const context, Node* const object,
+ void BranchIfFastRegExpResult(const TNode<Context> context,
+ const TNode<Object> object,
Label* if_isunmodified, Label* if_ismodified);
TNode<String> FlagsGetter(TNode<Context> context, TNode<Object> regexp,
@@ -164,10 +165,10 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
TNode<BoolT> FlagGetter(TNode<Context> context, TNode<Object> regexp,
JSRegExp::Flag flag, bool is_fastpath);
- Node* RegExpInitialize(Node* const context, Node* const regexp,
- Node* const maybe_pattern, Node* const maybe_flags);
-
- TNode<Object> RegExpExec(TNode<Context> context, Node* regexp, Node* string);
+ TNode<Object> RegExpInitialize(const TNode<Context> context,
+ const TNode<JSRegExp> regexp,
+ const TNode<Object> maybe_pattern,
+ const TNode<Object> maybe_flags);
TNode<Number> AdvanceStringIndex(SloppyTNode<String> string,
SloppyTNode<Number> index,
@@ -179,20 +180,20 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
return CAST(AdvanceStringIndex(string, index, is_unicode, true));
}
+ TNode<Smi> AdvanceStringIndexSlow(TNode<String> string, TNode<Number> index,
+ TNode<BoolT> is_unicode) {
+ return CAST(AdvanceStringIndex(string, index, is_unicode, false));
+ }
+
TNode<Object> RegExpPrototypeMatchBody(TNode<Context> context,
TNode<Object> regexp,
TNode<String> const string,
const bool is_fastpath);
- void RegExpPrototypeSearchBodyFast(TNode<Context> context,
- TNode<JSRegExp> regexp,
- TNode<String> string);
- void RegExpPrototypeSearchBodySlow(TNode<Context> context, Node* const regexp,
- Node* const string);
-
- void RegExpPrototypeSplitBody(TNode<Context> context, TNode<JSRegExp> regexp,
- TNode<String> const string,
- TNode<Smi> const limit);
+ TNode<JSArray> RegExpPrototypeSplitBody(TNode<Context> context,
+ TNode<JSRegExp> regexp,
+ TNode<String> const string,
+ TNode<Smi> const limit);
};
class RegExpMatchAllAssembler : public RegExpBuiltinsAssembler {
@@ -200,13 +201,11 @@ class RegExpMatchAllAssembler : public RegExpBuiltinsAssembler {
explicit RegExpMatchAllAssembler(compiler::CodeAssemblerState* state)
: RegExpBuiltinsAssembler(state) {}
- TNode<Object> CreateRegExpStringIterator(TNode<Context> native_context,
+ TNode<Object> CreateRegExpStringIterator(TNode<NativeContext> native_context,
TNode<Object> regexp,
TNode<String> string,
TNode<BoolT> global,
TNode<BoolT> full_unicode);
- void Generate(TNode<Context> context, TNode<Context> native_context,
- TNode<Object> receiver, TNode<Object> maybe_string);
};
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
index 8ae89187ec..85cb4f10f7 100644
--- a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
+++ b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
@@ -11,8 +11,6 @@ namespace v8 {
namespace internal {
using compiler::Node;
-template <typename T>
-using TNode = compiler::TNode<T>;
class SharedArrayBufferBuiltinsAssembler : public CodeStubAssembler {
public:
@@ -255,7 +253,7 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) {
GotoIf(Int32GreaterThan(elements_kind, Int32Constant(INT32_ELEMENTS)), &u64);
TNode<Number> value_integer = ToInteger_Inline(CAST(context), CAST(value));
- Node* value_word32 = TruncateTaggedToWord32(context, value_integer);
+ TNode<Word32T> value_word32 = TruncateTaggedToWord32(context, value_integer);
#if DEBUG
DebugSanityCheckAtomicIndex(array, index_word32, context);
@@ -338,7 +336,7 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
#if DEBUG
DebugSanityCheckAtomicIndex(array, index_word32, context);
#endif
- Node* value_word32 = TruncateTaggedToWord32(context, value_integer);
+ TNode<Word32T> value_word32 = TruncateTaggedToWord32(context, value_integer);
int32_t case_values[] = {
INT8_ELEMENTS, UINT8_ELEMENTS, INT16_ELEMENTS,
@@ -444,8 +442,10 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) {
#if DEBUG
DebugSanityCheckAtomicIndex(array, index_word32, context);
#endif
- Node* old_value_word32 = TruncateTaggedToWord32(context, old_value_integer);
- Node* new_value_word32 = TruncateTaggedToWord32(context, new_value_integer);
+ TNode<Word32T> old_value_word32 =
+ TruncateTaggedToWord32(context, old_value_integer);
+ TNode<Word32T> new_value_word32 =
+ TruncateTaggedToWord32(context, new_value_integer);
int32_t case_values[] = {
INT8_ELEMENTS, UINT8_ELEMENTS, INT16_ELEMENTS,
@@ -571,7 +571,7 @@ void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon(
#if DEBUG
DebugSanityCheckAtomicIndex(array, index_word32, context);
#endif
- Node* value_word32 = TruncateTaggedToWord32(context, value_integer);
+ TNode<Word32T> value_word32 = TruncateTaggedToWord32(context, value_integer);
int32_t case_values[] = {
INT8_ELEMENTS, UINT8_ELEMENTS, INT16_ELEMENTS,
diff --git a/deps/v8/src/builtins/builtins-string-gen.cc b/deps/v8/src/builtins/builtins-string-gen.cc
index fc2745ed0a..425ffc46d2 100644
--- a/deps/v8/src/builtins/builtins-string-gen.cc
+++ b/deps/v8/src/builtins/builtins-string-gen.cc
@@ -8,8 +8,10 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/codegen/code-factory.h"
+#include "src/execution/protectors.h"
#include "src/heap/factory-inl.h"
#include "src/heap/heap-inl.h"
+#include "src/logging/counters.h"
#include "src/objects/objects.h"
#include "src/objects/property-cell.h"
@@ -17,8 +19,6 @@ namespace v8 {
namespace internal {
using Node = compiler::Node;
-template <class T>
-using TNode = compiler::TNode<T>;
Node* StringBuiltinsAssembler::DirectStringData(Node* string,
Node* string_instance_type) {
@@ -120,14 +120,14 @@ Node* StringBuiltinsAssembler::CallSearchStringRaw(Node* const subject_ptr,
return result;
}
-TNode<IntPtrT> StringBuiltinsAssembler::PointerToStringDataAtIndex(
- Node* const string_data, Node* const index, String::Encoding encoding) {
+TNode<RawPtrT> StringBuiltinsAssembler::PointerToStringDataAtIndex(
+ TNode<RawPtrT> string_data, TNode<IntPtrT> index,
+ String::Encoding encoding) {
const ElementsKind kind = (encoding == String::ONE_BYTE_ENCODING)
? UINT8_ELEMENTS
: UINT16_ELEMENTS;
- TNode<IntPtrT> const offset_in_bytes =
- ElementOffsetFromIndex(index, kind, INTPTR_PARAMETERS);
- return Signed(IntPtrAdd(string_data, offset_in_bytes));
+ TNode<IntPtrT> offset_in_bytes = ElementOffsetFromIndex(index, kind);
+ return RawPtrAdd(string_data, offset_in_bytes);
}
void StringBuiltinsAssembler::GenerateStringEqual(TNode<String> left,
@@ -289,6 +289,262 @@ void StringBuiltinsAssembler::StringEqual_Loop(
}
}
+TNode<String> StringBuiltinsAssembler::StringFromSingleUTF16EncodedCodePoint(
+ TNode<Int32T> codepoint) {
+ VARIABLE(var_result, MachineRepresentation::kTagged, EmptyStringConstant());
+
+ Label if_isword16(this), if_isword32(this), return_result(this);
+
+ Branch(Uint32LessThan(codepoint, Int32Constant(0x10000)), &if_isword16,
+ &if_isword32);
+
+ BIND(&if_isword16);
+ {
+ var_result.Bind(StringFromSingleCharCode(codepoint));
+ Goto(&return_result);
+ }
+
+ BIND(&if_isword32);
+ {
+ TNode<String> value = AllocateSeqTwoByteString(2);
+ StoreNoWriteBarrier(
+ MachineRepresentation::kWord32, value,
+ IntPtrConstant(SeqTwoByteString::kHeaderSize - kHeapObjectTag),
+ codepoint);
+ var_result.Bind(value);
+ Goto(&return_result);
+ }
+
+ BIND(&return_result);
+ return CAST(var_result.value());
+}
+
+TNode<String> StringBuiltinsAssembler::AllocateConsString(TNode<Uint32T> length,
+ TNode<String> left,
+ TNode<String> right) {
+ // Added string can be a cons string.
+ Comment("Allocating ConsString");
+ TNode<Int32T> left_instance_type = LoadInstanceType(left);
+ TNode<Int32T> right_instance_type = LoadInstanceType(right);
+
+ // Determine the resulting ConsString map to use depending on whether
+ // any of {left} or {right} has two byte encoding.
+ STATIC_ASSERT(kOneByteStringTag != 0);
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ TNode<Int32T> combined_instance_type =
+ Word32And(left_instance_type, right_instance_type);
+ TNode<Map> result_map = CAST(Select<Object>(
+ IsSetWord32(combined_instance_type, kStringEncodingMask),
+ [=] { return ConsOneByteStringMapConstant(); },
+ [=] { return ConsStringMapConstant(); }));
+ TNode<HeapObject> result = AllocateInNewSpace(ConsString::kSize);
+ StoreMapNoWriteBarrier(result, result_map);
+ StoreObjectFieldNoWriteBarrier(result, ConsString::kLengthOffset, length,
+ MachineRepresentation::kWord32);
+ StoreObjectFieldNoWriteBarrier(result, ConsString::kHashFieldOffset,
+ Int32Constant(String::kEmptyHashField),
+ MachineRepresentation::kWord32);
+ StoreObjectFieldNoWriteBarrier(result, ConsString::kFirstOffset, left);
+ StoreObjectFieldNoWriteBarrier(result, ConsString::kSecondOffset, right);
+ return CAST(result);
+}
+
+TNode<String> StringBuiltinsAssembler::StringAdd(Node* context,
+ TNode<String> left,
+ TNode<String> right) {
+ TVARIABLE(String, result);
+ Label check_right(this), runtime(this, Label::kDeferred), cons(this),
+ done(this, &result), done_native(this, &result);
+ Counters* counters = isolate()->counters();
+
+ TNode<Uint32T> left_length = LoadStringLengthAsWord32(left);
+ GotoIfNot(Word32Equal(left_length, Uint32Constant(0)), &check_right);
+ result = right;
+ Goto(&done_native);
+
+ BIND(&check_right);
+ TNode<Uint32T> right_length = LoadStringLengthAsWord32(right);
+ GotoIfNot(Word32Equal(right_length, Uint32Constant(0)), &cons);
+ result = left;
+ Goto(&done_native);
+
+ BIND(&cons);
+ {
+ TNode<Uint32T> new_length = Uint32Add(left_length, right_length);
+
+ // If new length is greater than String::kMaxLength, goto runtime to
+ // throw. Note: we also need to invalidate the string length protector, so
+ // can't just throw here directly.
+ GotoIf(Uint32GreaterThan(new_length, Uint32Constant(String::kMaxLength)),
+ &runtime);
+
+ TVARIABLE(String, var_left, left);
+ TVARIABLE(String, var_right, right);
+ Variable* input_vars[2] = {&var_left, &var_right};
+ Label non_cons(this, 2, input_vars);
+ Label slow(this, Label::kDeferred);
+ GotoIf(Uint32LessThan(new_length, Uint32Constant(ConsString::kMinLength)),
+ &non_cons);
+
+ result =
+ AllocateConsString(new_length, var_left.value(), var_right.value());
+ Goto(&done_native);
+
+ BIND(&non_cons);
+
+ Comment("Full string concatenate");
+ TNode<Int32T> left_instance_type = LoadInstanceType(var_left.value());
+ TNode<Int32T> right_instance_type = LoadInstanceType(var_right.value());
+ // Compute intersection and difference of instance types.
+
+ TNode<Int32T> ored_instance_types =
+ Word32Or(left_instance_type, right_instance_type);
+ TNode<Word32T> xored_instance_types =
+ Word32Xor(left_instance_type, right_instance_type);
+
+ // Check if both strings have the same encoding and both are sequential.
+ GotoIf(IsSetWord32(xored_instance_types, kStringEncodingMask), &runtime);
+ GotoIf(IsSetWord32(ored_instance_types, kStringRepresentationMask), &slow);
+
+ TNode<IntPtrT> word_left_length = Signed(ChangeUint32ToWord(left_length));
+ TNode<IntPtrT> word_right_length = Signed(ChangeUint32ToWord(right_length));
+
+ Label two_byte(this);
+ GotoIf(Word32Equal(Word32And(ored_instance_types,
+ Int32Constant(kStringEncodingMask)),
+ Int32Constant(kTwoByteStringTag)),
+ &two_byte);
+ // One-byte sequential string case
+ result = AllocateSeqOneByteString(new_length);
+ CopyStringCharacters(var_left.value(), result.value(), IntPtrConstant(0),
+ IntPtrConstant(0), word_left_length,
+ String::ONE_BYTE_ENCODING, String::ONE_BYTE_ENCODING);
+ CopyStringCharacters(var_right.value(), result.value(), IntPtrConstant(0),
+ word_left_length, word_right_length,
+ String::ONE_BYTE_ENCODING, String::ONE_BYTE_ENCODING);
+ Goto(&done_native);
+
+ BIND(&two_byte);
+ {
+ // Two-byte sequential string case
+ result = AllocateSeqTwoByteString(new_length);
+ CopyStringCharacters(var_left.value(), result.value(), IntPtrConstant(0),
+ IntPtrConstant(0), word_left_length,
+ String::TWO_BYTE_ENCODING,
+ String::TWO_BYTE_ENCODING);
+ CopyStringCharacters(var_right.value(), result.value(), IntPtrConstant(0),
+ word_left_length, word_right_length,
+ String::TWO_BYTE_ENCODING,
+ String::TWO_BYTE_ENCODING);
+ Goto(&done_native);
+ }
+
+ BIND(&slow);
+ {
+ // Try to unwrap indirect strings, restart the above attempt on success.
+ MaybeDerefIndirectStrings(&var_left, left_instance_type, &var_right,
+ right_instance_type, &non_cons);
+ Goto(&runtime);
+ }
+ }
+ BIND(&runtime);
+ {
+ result = CAST(CallRuntime(Runtime::kStringAdd, context, left, right));
+ Goto(&done);
+ }
+
+ BIND(&done_native);
+ {
+ IncrementCounter(counters->string_add_native(), 1);
+ Goto(&done);
+ }
+
+ BIND(&done);
+ return result.value();
+}
+
+void StringBuiltinsAssembler::BranchIfCanDerefIndirectString(
+ TNode<String> string, TNode<Int32T> instance_type, Label* can_deref,
+ Label* cannot_deref) {
+ TNode<Int32T> representation =
+ Word32And(instance_type, Int32Constant(kStringRepresentationMask));
+ GotoIf(Word32Equal(representation, Int32Constant(kThinStringTag)), can_deref);
+ GotoIf(Word32NotEqual(representation, Int32Constant(kConsStringTag)),
+ cannot_deref);
+ // Cons string.
+ TNode<String> rhs =
+ LoadObjectField<String>(string, ConsString::kSecondOffset);
+ GotoIf(IsEmptyString(rhs), can_deref);
+ Goto(cannot_deref);
+}
+
+void StringBuiltinsAssembler::DerefIndirectString(TVariable<String>* var_string,
+ TNode<Int32T> instance_type) {
+#ifdef DEBUG
+ Label can_deref(this), cannot_deref(this);
+ BranchIfCanDerefIndirectString(var_string->value(), instance_type, &can_deref,
+ &cannot_deref);
+ BIND(&cannot_deref);
+ DebugBreak(); // Should be able to dereference string.
+ Goto(&can_deref);
+ BIND(&can_deref);
+#endif // DEBUG
+
+ STATIC_ASSERT(static_cast<int>(ThinString::kActualOffset) ==
+ static_cast<int>(ConsString::kFirstOffset));
+ *var_string =
+ LoadObjectField<String>(var_string->value(), ThinString::kActualOffset);
+}
+
+void StringBuiltinsAssembler::MaybeDerefIndirectString(
+ TVariable<String>* var_string, TNode<Int32T> instance_type,
+ Label* did_deref, Label* cannot_deref) {
+ Label deref(this);
+ BranchIfCanDerefIndirectString(var_string->value(), instance_type, &deref,
+ cannot_deref);
+
+ BIND(&deref);
+ {
+ DerefIndirectString(var_string, instance_type);
+ Goto(did_deref);
+ }
+}
+
+void StringBuiltinsAssembler::MaybeDerefIndirectStrings(
+ TVariable<String>* var_left, TNode<Int32T> left_instance_type,
+ TVariable<String>* var_right, TNode<Int32T> right_instance_type,
+ Label* did_something) {
+ Label did_nothing_left(this), did_something_left(this),
+ didnt_do_anything(this);
+ MaybeDerefIndirectString(var_left, left_instance_type, &did_something_left,
+ &did_nothing_left);
+
+ BIND(&did_something_left);
+ {
+ MaybeDerefIndirectString(var_right, right_instance_type, did_something,
+ did_something);
+ }
+
+ BIND(&did_nothing_left);
+ {
+ MaybeDerefIndirectString(var_right, right_instance_type, did_something,
+ &didnt_do_anything);
+ }
+
+ BIND(&didnt_do_anything);
+ // Fall through if neither string was an indirect string.
+}
+
+TNode<String> StringBuiltinsAssembler::DerefIndirectString(
+ TNode<String> string, TNode<Int32T> instance_type, Label* cannot_deref) {
+ Label deref(this);
+ BranchIfCanDerefIndirectString(string, instance_type, &deref, cannot_deref);
+ BIND(&deref);
+ STATIC_ASSERT(static_cast<int>(ThinString::kActualOffset) ==
+ static_cast<int>(ConsString::kFirstOffset));
+ return LoadObjectField<String>(string, ThinString::kActualOffset);
+}
+
TF_BUILTIN(StringAdd_CheckNone, StringBuiltinsAssembler) {
TNode<String> left = CAST(Parameter(Descriptor::kLeft));
TNode<String> right = CAST(Parameter(Descriptor::kRight));
@@ -504,19 +760,6 @@ TF_BUILTIN(StringGreaterThanOrEqual, StringBuiltinsAssembler) {
Operation::kGreaterThanOrEqual);
}
-TF_BUILTIN(StringCharAt, StringBuiltinsAssembler) {
- TNode<String> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<IntPtrT> position =
- UncheckedCast<IntPtrT>(Parameter(Descriptor::kPosition));
-
- // Load the character code at the {position} from the {receiver}.
- TNode<Int32T> code = StringCharCodeAt(receiver, position);
-
- // And return the single character string with only that {code}
- TNode<String> result = StringFromSingleCharCode(code);
- Return(result);
-}
-
TF_BUILTIN(StringCodePointAt, StringBuiltinsAssembler) {
Node* receiver = Parameter(Descriptor::kReceiver);
Node* position = Parameter(Descriptor::kPosition);
@@ -551,14 +794,14 @@ TF_BUILTIN(StringFromCodePointAt, StringBuiltinsAssembler) {
// ES6 section 21.1 String Objects
// ES6 #sec-string.fromcharcode
-TF_BUILTIN(StringFromCharCode, CodeStubAssembler) {
+TF_BUILTIN(StringFromCharCode, StringBuiltinsAssembler) {
// TODO(ishell): use constants from Descriptor once the JSFunction linkage
// arguments are reordered.
TNode<Int32T> argc =
UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
Node* context = Parameter(Descriptor::kContext);
- CodeStubArguments arguments(this, ChangeInt32ToIntPtr(argc));
+ CodeStubArguments arguments(this, argc);
// Check if we have exactly one argument (plus the implicit receiver), i.e.
// if the parent frame is not an arguments adaptor frame.
Label if_oneargument(this), if_notoneargument(this);
@@ -571,7 +814,7 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) {
// for one-byte code units, or fall back to creating a single character
// string on the fly otherwise.
TNode<Object> code = arguments.AtIndex(0);
- Node* code32 = TruncateTaggedToWord32(context, code);
+ TNode<Word32T> code32 = TruncateTaggedToWord32(context, code);
TNode<Int32T> code16 =
Signed(Word32And(code32, Int32Constant(String::kMaxUtf16CodeUnit)));
TNode<String> result = StringFromSingleCharCode(code16);
@@ -585,16 +828,14 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) {
// Assume that the resulting string contains only one-byte characters.
TNode<String> one_byte_result = AllocateSeqOneByteString(Unsigned(argc));
- TVARIABLE(IntPtrT, var_max_index);
- var_max_index = IntPtrConstant(0);
+ TVARIABLE(IntPtrT, var_max_index, IntPtrConstant(0));
// Iterate over the incoming arguments, converting them to 8-bit character
// codes. Stop if any of the conversions generates a code that doesn't fit
// in 8 bits.
CodeStubAssembler::VariableList vars({&var_max_index}, zone());
- arguments.ForEach(vars, [this, context, &two_byte, &var_max_index, &code16,
- one_byte_result](Node* arg) {
- Node* code32 = TruncateTaggedToWord32(context, arg);
+ arguments.ForEach(vars, [&](TNode<Object> arg) {
+ TNode<Word32T> code32 = TruncateTaggedToWord32(context, arg);
code16 = Word32And(code32, Int32Constant(String::kMaxUtf16CodeUnit));
GotoIf(
@@ -604,7 +845,6 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) {
// The {code16} fits into the SeqOneByteString {one_byte_result}.
TNode<IntPtrT> offset = ElementOffsetFromIndex(
var_max_index.value(), UINT8_ELEMENTS,
- CodeStubAssembler::INTPTR_PARAMETERS,
SeqOneByteString::kHeaderSize - kHeapObjectTag);
StoreNoWriteBarrier(MachineRepresentation::kWord8, one_byte_result,
offset, code16);
@@ -629,7 +869,6 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) {
// Write the character that caused the 8-bit to 16-bit fault.
TNode<IntPtrT> max_index_offset =
ElementOffsetFromIndex(var_max_index.value(), UINT16_ELEMENTS,
- CodeStubAssembler::INTPTR_PARAMETERS,
SeqTwoByteString::kHeaderSize - kHeapObjectTag);
StoreNoWriteBarrier(MachineRepresentation::kWord16, two_byte_result,
max_index_offset, code16);
@@ -640,14 +879,13 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) {
// using a 16-bit representation.
arguments.ForEach(
vars,
- [this, context, two_byte_result, &var_max_index](Node* arg) {
- Node* code32 = TruncateTaggedToWord32(context, arg);
+ [&](TNode<Object> arg) {
+ TNode<Word32T> code32 = TruncateTaggedToWord32(context, arg);
TNode<Word32T> code16 =
Word32And(code32, Int32Constant(String::kMaxUtf16CodeUnit));
TNode<IntPtrT> offset = ElementOffsetFromIndex(
var_max_index.value(), UINT16_ELEMENTS,
- CodeStubAssembler::INTPTR_PARAMETERS,
SeqTwoByteString::kHeaderSize - kHeapObjectTag);
StoreNoWriteBarrier(MachineRepresentation::kWord16, two_byte_result,
offset, code16);
@@ -723,9 +961,9 @@ void StringBuiltinsAssembler::StringIndexOf(
BIND(&one_one);
{
- TNode<IntPtrT> const adjusted_subject_ptr = PointerToStringDataAtIndex(
+ TNode<RawPtrT> const adjusted_subject_ptr = PointerToStringDataAtIndex(
subject_ptr, subject_offset, String::ONE_BYTE_ENCODING);
- TNode<IntPtrT> const adjusted_search_ptr = PointerToStringDataAtIndex(
+ TNode<RawPtrT> const adjusted_search_ptr = PointerToStringDataAtIndex(
search_ptr, search_offset, String::ONE_BYTE_ENCODING);
Label direct_memchr_call(this), generic_fast_path(this);
@@ -736,8 +974,8 @@ void StringBuiltinsAssembler::StringIndexOf(
// search strings.
BIND(&direct_memchr_call);
{
- TNode<IntPtrT> const string_addr =
- IntPtrAdd(adjusted_subject_ptr, start_position);
+ TNode<RawPtrT> const string_addr =
+ RawPtrAdd(adjusted_subject_ptr, start_position);
TNode<IntPtrT> const search_length =
IntPtrSub(subject_length, start_position);
TNode<IntPtrT> const search_byte =
@@ -745,14 +983,14 @@ void StringBuiltinsAssembler::StringIndexOf(
TNode<ExternalReference> const memchr =
ExternalConstant(ExternalReference::libc_memchr_function());
- TNode<IntPtrT> const result_address = UncheckedCast<IntPtrT>(
+ TNode<RawPtrT> const result_address = UncheckedCast<RawPtrT>(
CallCFunction(memchr, MachineType::Pointer(),
std::make_pair(MachineType::Pointer(), string_addr),
std::make_pair(MachineType::IntPtr(), search_byte),
std::make_pair(MachineType::UintPtr(), search_length)));
GotoIf(WordEqual(result_address, int_zero), &return_minus_1);
TNode<IntPtrT> const result_index =
- IntPtrAdd(IntPtrSub(result_address, string_addr), start_position);
+ IntPtrAdd(RawPtrSub(result_address, string_addr), start_position);
f_return(SmiTag(result_index));
}
@@ -767,9 +1005,9 @@ void StringBuiltinsAssembler::StringIndexOf(
BIND(&one_two);
{
- TNode<IntPtrT> const adjusted_subject_ptr = PointerToStringDataAtIndex(
+ TNode<RawPtrT> const adjusted_subject_ptr = PointerToStringDataAtIndex(
subject_ptr, subject_offset, String::ONE_BYTE_ENCODING);
- TNode<IntPtrT> const adjusted_search_ptr = PointerToStringDataAtIndex(
+ TNode<RawPtrT> const adjusted_search_ptr = PointerToStringDataAtIndex(
search_ptr, search_offset, String::TWO_BYTE_ENCODING);
Node* const result = CallSearchStringRaw<onebyte_t, twobyte_t>(
@@ -780,9 +1018,9 @@ void StringBuiltinsAssembler::StringIndexOf(
BIND(&two_one);
{
- TNode<IntPtrT> const adjusted_subject_ptr = PointerToStringDataAtIndex(
+ TNode<RawPtrT> const adjusted_subject_ptr = PointerToStringDataAtIndex(
subject_ptr, subject_offset, String::TWO_BYTE_ENCODING);
- TNode<IntPtrT> const adjusted_search_ptr = PointerToStringDataAtIndex(
+ TNode<RawPtrT> const adjusted_search_ptr = PointerToStringDataAtIndex(
search_ptr, search_offset, String::ONE_BYTE_ENCODING);
Node* const result = CallSearchStringRaw<twobyte_t, onebyte_t>(
@@ -793,9 +1031,9 @@ void StringBuiltinsAssembler::StringIndexOf(
BIND(&two_two);
{
- TNode<IntPtrT> const adjusted_subject_ptr = PointerToStringDataAtIndex(
+ TNode<RawPtrT> const adjusted_subject_ptr = PointerToStringDataAtIndex(
subject_ptr, subject_offset, String::TWO_BYTE_ENCODING);
- TNode<IntPtrT> const adjusted_search_ptr = PointerToStringDataAtIndex(
+ TNode<RawPtrT> const adjusted_search_ptr = PointerToStringDataAtIndex(
search_ptr, search_offset, String::TWO_BYTE_ENCODING);
Node* const result = CallSearchStringRaw<twobyte_t, twobyte_t>(
@@ -1300,8 +1538,8 @@ TF_BUILTIN(StringPrototypeMatchAll, StringBuiltinsAssembler) {
// maybe_regexp is a fast regexp and receiver is a string.
TNode<String> s = CAST(receiver);
- RegExpMatchAllAssembler regexp_asm(state());
- regexp_asm.Generate(context, native_context, maybe_regexp, s);
+ Return(
+ RegExpPrototypeMatchAllImpl(context, native_context, maybe_regexp, s));
};
auto if_generic_call = [=](Node* fn) {
Callable call_callable = CodeFactory::Call(isolate());
@@ -1368,9 +1606,9 @@ TNode<JSArray> StringBuiltinsAssembler::StringToArray(
TNode<IntPtrT> string_data_offset = to_direct.offset();
TNode<FixedArray> cache = SingleCharacterStringCacheConstant();
- BuildFastLoop(
+ BuildFastLoop<IntPtrT>(
IntPtrConstant(0), length,
- [&](Node* index) {
+ [&](TNode<IntPtrT> index) {
// TODO(jkummerow): Implement a CSA version of DisallowHeapAllocation
// and use that to guard ToDirectStringAssembler.PointerToData().
CSA_ASSERT(this, WordEqual(to_direct.PointerToData(&call_runtime),
@@ -1387,7 +1625,7 @@ TNode<JSArray> StringBuiltinsAssembler::StringToArray(
StoreFixedArrayElement(elements, index, entry);
},
- 1, ParameterMode::INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+ 1, IndexAdvanceMode::kPost);
TNode<Map> array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, context);
result_array = AllocateJSArray(array_map, elements, length_smi);
@@ -1614,7 +1852,7 @@ TF_BUILTIN(StringPrototypeSubstr, StringBuiltinsAssembler) {
}
}
-TF_BUILTIN(StringSubstring, CodeStubAssembler) {
+TF_BUILTIN(StringSubstring, StringBuiltinsAssembler) {
TNode<String> string = CAST(Parameter(Descriptor::kString));
TNode<IntPtrT> from = UncheckedCast<IntPtrT>(Parameter(Descriptor::kFrom));
TNode<IntPtrT> to = UncheckedCast<IntPtrT>(Parameter(Descriptor::kTo));
@@ -1870,9 +2108,248 @@ void StringBuiltinsAssembler::BranchIfStringPrimitiveWithNoCustomIteration(
DCHECK(isolate()->heap()->string_iterator_protector().IsPropertyCell());
Branch(
TaggedEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
- SmiConstant(Isolate::kProtectorValid)),
+ SmiConstant(Protectors::kProtectorValid)),
if_true, if_false);
}
+void StringBuiltinsAssembler::CopyStringCharacters(
+ Node* from_string, Node* to_string, TNode<IntPtrT> from_index,
+ TNode<IntPtrT> to_index, TNode<IntPtrT> character_count,
+ String::Encoding from_encoding, String::Encoding to_encoding) {
+ // Cannot assert IsString(from_string) and IsString(to_string) here because
+ // SubString can pass in faked sequential strings when handling external
+ // subject strings.
+ bool from_one_byte = from_encoding == String::ONE_BYTE_ENCODING;
+ bool to_one_byte = to_encoding == String::ONE_BYTE_ENCODING;
+ DCHECK_IMPLIES(to_one_byte, from_one_byte);
+ Comment("CopyStringCharacters ",
+ from_one_byte ? "ONE_BYTE_ENCODING" : "TWO_BYTE_ENCODING", " -> ",
+ to_one_byte ? "ONE_BYTE_ENCODING" : "TWO_BYTE_ENCODING");
+
+ ElementsKind from_kind = from_one_byte ? UINT8_ELEMENTS : UINT16_ELEMENTS;
+ ElementsKind to_kind = to_one_byte ? UINT8_ELEMENTS : UINT16_ELEMENTS;
+ STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ int header_size = SeqOneByteString::kHeaderSize - kHeapObjectTag;
+ TNode<IntPtrT> from_offset =
+ ElementOffsetFromIndex(from_index, from_kind, header_size);
+ TNode<IntPtrT> to_offset =
+ ElementOffsetFromIndex(to_index, to_kind, header_size);
+ TNode<IntPtrT> byte_count =
+ ElementOffsetFromIndex(character_count, from_kind);
+ TNode<IntPtrT> limit_offset = IntPtrAdd(from_offset, byte_count);
+
+ // Prepare the fast loop
+ MachineType type =
+ from_one_byte ? MachineType::Uint8() : MachineType::Uint16();
+ MachineRepresentation rep = to_one_byte ? MachineRepresentation::kWord8
+ : MachineRepresentation::kWord16;
+ int from_increment = 1 << ElementsKindToShiftSize(from_kind);
+ int to_increment = 1 << ElementsKindToShiftSize(to_kind);
+
+ TVARIABLE(IntPtrT, current_to_offset, to_offset);
+ VariableList vars({&current_to_offset}, zone());
+ int to_index_constant = 0, from_index_constant = 0;
+ bool index_same = (from_encoding == to_encoding) &&
+ (from_index == to_index ||
+ (ToInt32Constant(from_index, &from_index_constant) &&
+ ToInt32Constant(to_index, &to_index_constant) &&
+ from_index_constant == to_index_constant));
+ BuildFastLoop<IntPtrT>(
+ vars, from_offset, limit_offset,
+ [&](TNode<IntPtrT> offset) {
+ Node* value = Load(type, from_string, offset);
+ StoreNoWriteBarrier(rep, to_string,
+ index_same ? offset : current_to_offset.value(),
+ value);
+ if (!index_same) {
+ Increment(&current_to_offset, to_increment);
+ }
+ },
+ from_increment, IndexAdvanceMode::kPost);
+}
+
+// A wrapper around CopyStringCharacters which determines the correct string
+// encoding, allocates a corresponding sequential string, and then copies the
+// given character range using CopyStringCharacters.
+// |from_string| must be a sequential string.
+// 0 <= |from_index| <= |from_index| + |character_count| < from_string.length.
+TNode<String> StringBuiltinsAssembler::AllocAndCopyStringCharacters(
+ Node* from, Node* from_instance_type, TNode<IntPtrT> from_index,
+ TNode<IntPtrT> character_count) {
+ Label end(this), one_byte_sequential(this), two_byte_sequential(this);
+ TVARIABLE(String, var_result);
+
+ Branch(IsOneByteStringInstanceType(from_instance_type), &one_byte_sequential,
+ &two_byte_sequential);
+
+ // The subject string is a sequential one-byte string.
+ BIND(&one_byte_sequential);
+ {
+ TNode<String> result = AllocateSeqOneByteString(
+ Unsigned(TruncateIntPtrToInt32(character_count)));
+ CopyStringCharacters(from, result, from_index, IntPtrConstant(0),
+ character_count, String::ONE_BYTE_ENCODING,
+ String::ONE_BYTE_ENCODING);
+ var_result = result;
+ Goto(&end);
+ }
+
+ // The subject string is a sequential two-byte string.
+ BIND(&two_byte_sequential);
+ {
+ TNode<String> result = AllocateSeqTwoByteString(
+ Unsigned(TruncateIntPtrToInt32(character_count)));
+ CopyStringCharacters(from, result, from_index, IntPtrConstant(0),
+ character_count, String::TWO_BYTE_ENCODING,
+ String::TWO_BYTE_ENCODING);
+ var_result = result;
+ Goto(&end);
+ }
+
+ BIND(&end);
+ return var_result.value();
+}
+
+TNode<String> StringBuiltinsAssembler::SubString(TNode<String> string,
+ TNode<IntPtrT> from,
+ TNode<IntPtrT> to) {
+ TVARIABLE(String, var_result);
+ ToDirectStringAssembler to_direct(state(), string);
+ Label end(this), runtime(this);
+
+ TNode<IntPtrT> const substr_length = IntPtrSub(to, from);
+ TNode<IntPtrT> const string_length = LoadStringLengthAsWord(string);
+
+ // Begin dispatching based on substring length.
+
+ Label original_string_or_invalid_length(this);
+ GotoIf(UintPtrGreaterThanOrEqual(substr_length, string_length),
+ &original_string_or_invalid_length);
+
+ // A real substring (substr_length < string_length).
+ Label empty(this);
+ GotoIf(IntPtrEqual(substr_length, IntPtrConstant(0)), &empty);
+
+ Label single_char(this);
+ GotoIf(IntPtrEqual(substr_length, IntPtrConstant(1)), &single_char);
+
+ // Deal with different string types: update the index if necessary
+ // and extract the underlying string.
+
+ TNode<String> direct_string = to_direct.TryToDirect(&runtime);
+ TNode<IntPtrT> offset = IntPtrAdd(from, to_direct.offset());
+ TNode<Int32T> const instance_type = to_direct.instance_type();
+
+ // The subject string can only be external or sequential string of either
+ // encoding at this point.
+ Label external_string(this);
+ {
+ if (FLAG_string_slices) {
+ Label next(this);
+
+ // Short slice. Copy instead of slicing.
+ GotoIf(IntPtrLessThan(substr_length,
+ IntPtrConstant(SlicedString::kMinLength)),
+ &next);
+
+ // Allocate new sliced string.
+
+ Counters* counters = isolate()->counters();
+ IncrementCounter(counters->sub_string_native(), 1);
+
+ Label one_byte_slice(this), two_byte_slice(this);
+ Branch(IsOneByteStringInstanceType(to_direct.instance_type()),
+ &one_byte_slice, &two_byte_slice);
+
+ BIND(&one_byte_slice);
+ {
+ var_result = AllocateSlicedOneByteString(
+ Unsigned(TruncateIntPtrToInt32(substr_length)), direct_string,
+ SmiTag(offset));
+ Goto(&end);
+ }
+
+ BIND(&two_byte_slice);
+ {
+ var_result = AllocateSlicedTwoByteString(
+ Unsigned(TruncateIntPtrToInt32(substr_length)), direct_string,
+ SmiTag(offset));
+ Goto(&end);
+ }
+
+ BIND(&next);
+ }
+
+ // The subject string can only be external or sequential string of either
+ // encoding at this point.
+ GotoIf(to_direct.is_external(), &external_string);
+
+ var_result = AllocAndCopyStringCharacters(direct_string, instance_type,
+ offset, substr_length);
+
+ Counters* counters = isolate()->counters();
+ IncrementCounter(counters->sub_string_native(), 1);
+
+ Goto(&end);
+ }
+
+ // Handle external string.
+ BIND(&external_string);
+ {
+ TNode<RawPtrT> const fake_sequential_string =
+ to_direct.PointerToString(&runtime);
+
+ var_result = AllocAndCopyStringCharacters(
+ fake_sequential_string, instance_type, offset, substr_length);
+
+ Counters* counters = isolate()->counters();
+ IncrementCounter(counters->sub_string_native(), 1);
+
+ Goto(&end);
+ }
+
+ BIND(&empty);
+ {
+ var_result = EmptyStringConstant();
+ Goto(&end);
+ }
+
+ // Substrings of length 1 are generated through CharCodeAt and FromCharCode.
+ BIND(&single_char);
+ {
+ TNode<Int32T> char_code = StringCharCodeAt(string, from);
+ var_result = StringFromSingleCharCode(char_code);
+ Goto(&end);
+ }
+
+ BIND(&original_string_or_invalid_length);
+ {
+ CSA_ASSERT(this, IntPtrEqual(substr_length, string_length));
+
+ // Equal length - check if {from, to} == {0, str.length}.
+ GotoIf(UintPtrGreaterThan(from, IntPtrConstant(0)), &runtime);
+
+ // Return the original string (substr_length == string_length).
+
+ Counters* counters = isolate()->counters();
+ IncrementCounter(counters->sub_string_native(), 1);
+
+ var_result = string;
+ Goto(&end);
+ }
+
+ // Fall back to a runtime call.
+ BIND(&runtime);
+ {
+ var_result =
+ CAST(CallRuntime(Runtime::kStringSubstring, NoContextConstant(), string,
+ SmiTag(from), SmiTag(to)));
+ Goto(&end);
+ }
+
+ BIND(&end);
+ return var_result.value();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-string-gen.h b/deps/v8/src/builtins/builtins-string-gen.h
index 64d5a77615..0dfcf88a8c 100644
--- a/deps/v8/src/builtins/builtins-string-gen.h
+++ b/deps/v8/src/builtins/builtins-string-gen.h
@@ -33,6 +33,25 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
SloppyTNode<IntPtrT> index,
UnicodeEncoding encoding);
+ TNode<String> StringFromSingleUTF16EncodedCodePoint(TNode<Int32T> codepoint);
+
+ // Return a new string object which holds a substring containing the range
+ // [from,to[ of string.
+ TNode<String> SubString(TNode<String> string, TNode<IntPtrT> from,
+ TNode<IntPtrT> to);
+
+ // Copies |character_count| elements from |from_string| to |to_string|
+ // starting at the |from_index|'th character. |from_string| and |to_string|
+ // can either be one-byte strings or two-byte strings, although if
+ // |from_string| is two-byte, then |to_string| must be two-byte.
+ // |from_index|, |to_index| and |character_count| must be intptr_ts s.t. 0 <=
+ // |from_index| <= |from_index| + |character_count| <= from_string.length and
+ // 0 <= |to_index| <= |to_index| + |character_count| <= to_string.length.
+ V8_EXPORT_PRIVATE void CopyStringCharacters(
+ Node* from_string, Node* to_string, TNode<IntPtrT> from_index,
+ TNode<IntPtrT> to_index, TNode<IntPtrT> character_count,
+ String::Encoding from_encoding, String::Encoding to_encoding);
+
protected:
void StringEqual_Loop(Node* lhs, Node* lhs_instance_type,
MachineType lhs_type, Node* rhs,
@@ -51,8 +70,8 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
Node* const search_ptr, Node* const search_length,
Node* const start_position);
- TNode<IntPtrT> PointerToStringDataAtIndex(Node* const string_data,
- Node* const index,
+ TNode<RawPtrT> PointerToStringDataAtIndex(TNode<RawPtrT> string_data,
+ TNode<IntPtrT> index,
String::Encoding encoding);
// substr and slice have a common way of handling the {start} argument.
@@ -82,6 +101,38 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
return SmiLessThan(value, SmiConstant(0));
}
+ TNode<String> AllocateConsString(TNode<Uint32T> length, TNode<String> left,
+ TNode<String> right);
+
+ TNode<String> StringAdd(Node* context, TNode<String> left,
+ TNode<String> right);
+
+ // Check if |string| is an indirect (thin or flat cons) string type that can
+ // be dereferenced by DerefIndirectString.
+ void BranchIfCanDerefIndirectString(TNode<String> string,
+ TNode<Int32T> instance_type,
+ Label* can_deref, Label* cannot_deref);
+ // Allocate an appropriate one- or two-byte ConsString with the first and
+ // second parts specified by |left| and |right|.
+ // Unpack an indirect (thin or flat cons) string type.
+ void DerefIndirectString(TVariable<String>* var_string,
+ TNode<Int32T> instance_type);
+ // Check if |var_string| has an indirect (thin or flat cons) string type, and
+ // unpack it if so.
+ void MaybeDerefIndirectString(TVariable<String>* var_string,
+ TNode<Int32T> instance_type, Label* did_deref,
+ Label* cannot_deref);
+ // Check if |var_left| or |var_right| has an indirect (thin or flat cons)
+ // string type, and unpack it/them if so. Fall through if nothing was done.
+ void MaybeDerefIndirectStrings(TVariable<String>* var_left,
+ TNode<Int32T> left_instance_type,
+ TVariable<String>* var_right,
+ TNode<Int32T> right_instance_type,
+ Label* did_something);
+ TNode<String> DerefIndirectString(TNode<String> string,
+ TNode<Int32T> instance_type,
+ Label* cannot_deref);
+
// Implements boilerplate logic for {match, split, replace, search} of the
// form:
//
@@ -103,6 +154,12 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
Handle<Symbol> symbol,
DescriptorIndexNameValue additional_property_to_check,
const NodeFunction0& regexp_call, const NodeFunction1& generic_call);
+
+ private:
+ TNode<String> AllocAndCopyStringCharacters(Node* from,
+ Node* from_instance_type,
+ TNode<IntPtrT> from_index,
+ TNode<IntPtrT> character_count);
};
class StringIncludesIndexOfAssembler : public StringBuiltinsAssembler {
diff --git a/deps/v8/src/builtins/builtins-string.cc b/deps/v8/src/builtins/builtins-string.cc
index 04a96c7e46..ba2346d661 100644
--- a/deps/v8/src/builtins/builtins-string.cc
+++ b/deps/v8/src/builtins/builtins-string.cc
@@ -136,20 +136,21 @@ BUILTIN(StringPrototypeLocaleCompare) {
HandleScope handle_scope(isolate);
isolate->CountUsage(v8::Isolate::UseCounterFeature::kStringLocaleCompare);
+ const char* method = "String.prototype.localeCompare";
#ifdef V8_INTL_SUPPORT
- TO_THIS_STRING(str1, "String.prototype.localeCompare");
+ TO_THIS_STRING(str1, method);
Handle<String> str2;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, str2, Object::ToString(isolate, args.atOrUndefined(isolate, 1)));
RETURN_RESULT_OR_FAILURE(
- isolate, Intl::StringLocaleCompare(isolate, str1, str2,
- args.atOrUndefined(isolate, 2),
- args.atOrUndefined(isolate, 3)));
+ isolate, Intl::StringLocaleCompare(
+ isolate, str1, str2, args.atOrUndefined(isolate, 2),
+ args.atOrUndefined(isolate, 3), method));
#else
DCHECK_EQ(2, args.length());
- TO_THIS_STRING(str1, "String.prototype.localeCompare");
+ TO_THIS_STRING(str1, method);
Handle<String> str2;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, str2,
Object::ToString(isolate, args.at(1)));
diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.cc b/deps/v8/src/builtins/builtins-typed-array-gen.cc
index 448ff66603..c69034e813 100644
--- a/deps/v8/src/builtins/builtins-typed-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-typed-array-gen.cc
@@ -8,6 +8,7 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/builtins/growable-fixed-array-gen.h"
+#include "src/execution/protectors.h"
#include "src/handles/handles-inl.h"
#include "src/heap/factory-inl.h"
@@ -15,8 +16,6 @@ namespace v8 {
namespace internal {
using compiler::Node;
-template <class T>
-using TNode = compiler::TNode<T>;
// -----------------------------------------------------------------------------
// ES6 section 22.2 TypedArray Objects
@@ -117,8 +116,8 @@ TF_BUILTIN(TypedArrayConstructor, TypedArrayBuiltinsAssembler) {
// ES6 #sec-get-%typedarray%.prototype.bytelength
TF_BUILTIN(TypedArrayPrototypeByteLength, TypedArrayBuiltinsAssembler) {
const char* const kMethodName = "get TypedArray.prototype.byteLength";
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
// Check if the {receiver} is actually a JSTypedArray.
ThrowIfNotInstanceType(context, receiver, JS_TYPED_ARRAY_TYPE, kMethodName);
@@ -135,8 +134,8 @@ TF_BUILTIN(TypedArrayPrototypeByteLength, TypedArrayBuiltinsAssembler) {
// ES6 #sec-get-%typedarray%.prototype.byteoffset
TF_BUILTIN(TypedArrayPrototypeByteOffset, TypedArrayBuiltinsAssembler) {
const char* const kMethodName = "get TypedArray.prototype.byteOffset";
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
// Check if the {receiver} is actually a JSTypedArray.
ThrowIfNotInstanceType(context, receiver, JS_TYPED_ARRAY_TYPE, kMethodName);
@@ -153,8 +152,8 @@ TF_BUILTIN(TypedArrayPrototypeByteOffset, TypedArrayBuiltinsAssembler) {
// ES6 #sec-get-%typedarray%.prototype.length
TF_BUILTIN(TypedArrayPrototypeLength, TypedArrayBuiltinsAssembler) {
const char* const kMethodName = "get TypedArray.prototype.length";
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
// Check if the {receiver} is actually a JSTypedArray.
ThrowIfNotInstanceType(context, receiver, JS_TYPED_ARRAY_TYPE, kMethodName);
@@ -318,8 +317,8 @@ void TypedArrayBuiltinsAssembler::SetTypedArraySource(
// Grab pointers and byte lengths we need later on.
- TNode<RawPtrT> target_data_ptr = LoadJSTypedArrayBackingStore(target);
- TNode<RawPtrT> source_data_ptr = LoadJSTypedArrayBackingStore(source);
+ TNode<RawPtrT> target_data_ptr = LoadJSTypedArrayDataPtr(target);
+ TNode<RawPtrT> source_data_ptr = LoadJSTypedArrayDataPtr(source);
TNode<Int32T> source_el_kind = LoadElementsKind(source);
TNode<Int32T> target_el_kind = LoadElementsKind(target);
@@ -538,13 +537,83 @@ TNode<BoolT> TypedArrayBuiltinsAssembler::IsSharedArrayBuffer(
return IsSetWord32<JSArrayBuffer::IsSharedBit>(bitfield);
}
+void TypedArrayBuiltinsAssembler::SetJSTypedArrayOnHeapDataPtr(
+ TNode<JSTypedArray> holder, TNode<ByteArray> base, TNode<UintPtrT> offset) {
+ offset = UintPtrAdd(UintPtrConstant(ByteArray::kHeaderSize - kHeapObjectTag),
+ offset);
+ if (COMPRESS_POINTERS_BOOL) {
+ TNode<IntPtrT> full_base = Signed(BitcastTaggedToWord(base));
+ TNode<Int32T> compressed_base = TruncateIntPtrToInt32(full_base);
+ // TODO(v8:9706): Add a way to directly use kRootRegister value.
+ TNode<IntPtrT> isolate_root =
+ IntPtrSub(full_base, ChangeInt32ToIntPtr(compressed_base));
+ // Add JSTypedArray::ExternalPointerCompensationForOnHeapArray() to offset.
+ DCHECK_EQ(
+ isolate()->isolate_root(),
+ JSTypedArray::ExternalPointerCompensationForOnHeapArray(isolate()));
+ // See JSTypedArray::SetOnHeapDataPtr() for details.
+ offset = Unsigned(IntPtrAdd(offset, isolate_root));
+ }
+
+ StoreObjectField(holder, JSTypedArray::kBasePointerOffset, base);
+ StoreObjectFieldNoWriteBarrier<UintPtrT>(
+ holder, JSTypedArray::kExternalPointerOffset, offset);
+}
+
+void TypedArrayBuiltinsAssembler::SetJSTypedArrayOffHeapDataPtr(
+ TNode<JSTypedArray> holder, TNode<RawPtrT> base, TNode<UintPtrT> offset) {
+ StoreObjectFieldNoWriteBarrier(holder, JSTypedArray::kBasePointerOffset,
+ SmiConstant(0));
+
+ base = RawPtrAdd(base, Signed(offset));
+ StoreObjectFieldNoWriteBarrier<RawPtrT>(
+ holder, JSTypedArray::kExternalPointerOffset, base);
+}
+
+void TypedArrayBuiltinsAssembler::StoreJSTypedArrayElementFromTagged(
+ TNode<Context> context, TNode<JSTypedArray> typed_array,
+ TNode<Smi> index_node, TNode<Object> value, ElementsKind elements_kind) {
+ TNode<RawPtrT> data_ptr = LoadJSTypedArrayDataPtr(typed_array);
+ switch (elements_kind) {
+ case UINT8_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
+ case INT8_ELEMENTS:
+ case UINT16_ELEMENTS:
+ case INT16_ELEMENTS:
+ StoreElement(data_ptr, elements_kind, index_node, SmiToInt32(CAST(value)),
+ SMI_PARAMETERS);
+ break;
+ case UINT32_ELEMENTS:
+ case INT32_ELEMENTS:
+ StoreElement(data_ptr, elements_kind, index_node,
+ TruncateTaggedToWord32(context, value), SMI_PARAMETERS);
+ break;
+ case FLOAT32_ELEMENTS:
+ StoreElement(data_ptr, elements_kind, index_node,
+ TruncateFloat64ToFloat32(LoadHeapNumberValue(CAST(value))),
+ SMI_PARAMETERS);
+ break;
+ case FLOAT64_ELEMENTS:
+ StoreElement(data_ptr, elements_kind, index_node,
+ LoadHeapNumberValue(CAST(value)), SMI_PARAMETERS);
+ break;
+ case BIGUINT64_ELEMENTS:
+ case BIGINT64_ELEMENTS:
+ StoreElement(data_ptr, elements_kind, index_node,
+ UncheckedCast<BigInt>(value), SMI_PARAMETERS);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
// ES #sec-get-%typedarray%.prototype.set
TF_BUILTIN(TypedArrayPrototypeSet, TypedArrayBuiltinsAssembler) {
const char* method_name = "%TypedArray%.prototype.set";
+ TNode<Int32T> argc =
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- CodeStubArguments args(
- this,
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount)));
+ CodeStubArguments args(this, argc);
Label if_source_is_typed_array(this), if_source_is_fast_jsarray(this),
if_offset_is_out_of_bounds(this, Label::kDeferred),
@@ -618,7 +687,7 @@ TF_BUILTIN(TypedArrayPrototypeSet, TypedArrayBuiltinsAssembler) {
// ES #sec-get-%typedarray%.prototype-@@tostringtag
TF_BUILTIN(TypedArrayPrototypeToStringTag, TypedArrayBuiltinsAssembler) {
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Label if_receiverisheapobject(this), return_undefined(this);
Branch(TaggedIsSmi(receiver), &return_undefined, &if_receiverisheapobject);
@@ -645,12 +714,12 @@ TF_BUILTIN(TypedArrayPrototypeToStringTag, TypedArrayBuiltinsAssembler) {
#undef TYPED_ARRAY_CASE
};
- // We offset the dispatch by FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND, so
- // that this can be turned into a non-sparse table switch for ideal
- // performance.
+ // We offset the dispatch by FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND, so that
+ // this can be turned into a non-sparse table switch for ideal performance.
BIND(&if_receiverisheapobject);
+ TNode<HeapObject> receiver_heap_object = CAST(receiver);
TNode<Int32T> elements_kind =
- Int32Sub(LoadElementsKind(receiver),
+ Int32Sub(LoadElementsKind(receiver_heap_object),
Int32Constant(FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND));
Switch(elements_kind, &return_undefined, elements_kinds, elements_kind_labels,
kTypedElementsKindCount);
@@ -710,8 +779,7 @@ TF_BUILTIN(TypedArrayOf, TypedArrayBuiltinsAssembler) {
TNode<IntPtrT> length = ChangeInt32ToIntPtr(
UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
// 2. Let items be the List of arguments passed to this function.
- CodeStubArguments args(this, length, nullptr, INTPTR_PARAMETERS,
- CodeStubArguments::ReceiverMode::kHasReceiver);
+ CodeStubArguments args(this, length);
Label if_not_constructor(this, Label::kDeferred),
if_detached(this, Label::kDeferred);
@@ -737,10 +805,10 @@ TF_BUILTIN(TypedArrayOf, TypedArrayBuiltinsAssembler) {
DispatchTypedArrayByElementsKind(
elements_kind,
[&](ElementsKind kind, int size, int typed_array_fun_index) {
- BuildFastLoop(
+ BuildFastLoop<IntPtrT>(
IntPtrConstant(0), length,
- [&](Node* index) {
- TNode<Object> item = args.AtIndex(index, INTPTR_PARAMETERS);
+ [&](TNode<IntPtrT> index) {
+ TNode<Object> item = args.AtIndex(index);
Node* value =
PrepareValueForWriteToTypedArray(item, kind, context);
@@ -752,12 +820,11 @@ TF_BUILTIN(TypedArrayOf, TypedArrayBuiltinsAssembler) {
// GC may move backing store in ToNumber, thus load backing
// store everytime in this loop.
- TNode<RawPtrT> backing_store =
- LoadJSTypedArrayBackingStore(new_typed_array);
- StoreElement(backing_store, kind, index, value,
- INTPTR_PARAMETERS);
+ TNode<RawPtrT> data_ptr =
+ LoadJSTypedArrayDataPtr(new_typed_array);
+ StoreElement(data_ptr, kind, index, value, INTPTR_PARAMETERS);
},
- 1, ParameterMode::INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+ 1, IndexAdvanceMode::kPost);
});
// 8. Return newObj.
@@ -773,6 +840,8 @@ TF_BUILTIN(TypedArrayOf, TypedArrayBuiltinsAssembler) {
// ES6 #sec-%typedarray%.from
TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) {
+ TNode<Int32T> argc =
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Label check_iterator(this), from_array_like(this), fast_path(this),
@@ -782,9 +851,7 @@ TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) {
if_iterator_fn_not_callable(this, Label::kDeferred),
if_detached(this, Label::kDeferred);
- CodeStubArguments args(
- this,
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount)));
+ CodeStubArguments args(this, argc);
TNode<Object> source = args.GetOptionalArgumentValue(0);
// 5. If thisArg is present, let T be thisArg; else let T be undefined.
@@ -866,7 +933,7 @@ TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) {
TNode<PropertyCell> protector_cell = ArrayIteratorProtectorConstant();
GotoIfNot(
TaggedEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
- SmiConstant(Isolate::kProtectorValid)),
+ SmiConstant(Protectors::kProtectorValid)),
&check_iterator);
// Source is a TypedArray with unmodified iterator behavior. Use the
@@ -950,15 +1017,15 @@ TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) {
TNode<Int32T> elements_kind = LoadElementsKind(target_obj.value());
// 7e/13 : Copy the elements
- BuildFastLoop(
+ BuildFastLoop<Smi>(
SmiConstant(0), final_length.value(),
- [&](Node* index) {
+ [&](TNode<Smi> index) {
TNode<Object> const k_value =
GetProperty(context, final_source.value(), index);
TNode<Object> const mapped_value =
- CAST(CallJS(CodeFactory::Call(isolate()), context, map_fn, this_arg,
- k_value, index));
+ CallJS(CodeFactory::Call(isolate()), context, map_fn, this_arg,
+ k_value, index);
DispatchTypedArrayByElementsKind(
elements_kind,
@@ -974,13 +1041,12 @@ TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) {
// GC may move backing store in map_fn, thus load backing
// store in each iteration of this loop.
- TNode<RawPtrT> backing_store =
- LoadJSTypedArrayBackingStore(target_obj.value());
- StoreElement(backing_store, kind, index, final_value,
- SMI_PARAMETERS);
+ TNode<RawPtrT> data_ptr =
+ LoadJSTypedArrayDataPtr(target_obj.value());
+ StoreElement(data_ptr, kind, index, final_value, SMI_PARAMETERS);
});
},
- 1, ParameterMode::SMI_PARAMETERS, IndexAdvanceMode::kPost);
+ 1, IndexAdvanceMode::kPost);
args.PopAndReturn(target_obj.value());
diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.h b/deps/v8/src/builtins/builtins-typed-array-gen.h
index d637bc9c6b..10a2cb608c 100644
--- a/deps/v8/src/builtins/builtins-typed-array-gen.h
+++ b/deps/v8/src/builtins/builtins-typed-array-gen.h
@@ -111,6 +111,18 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
TNode<Word32T> elements_kind, const TypedArraySwitchCase& case_function);
TNode<BoolT> IsSharedArrayBuffer(TNode<JSArrayBuffer> buffer);
+
+ void SetJSTypedArrayOnHeapDataPtr(TNode<JSTypedArray> holder,
+ TNode<ByteArray> base,
+ TNode<UintPtrT> offset);
+ void SetJSTypedArrayOffHeapDataPtr(TNode<JSTypedArray> holder,
+ TNode<RawPtrT> base,
+ TNode<UintPtrT> offset);
+ void StoreJSTypedArrayElementFromTagged(TNode<Context> context,
+ TNode<JSTypedArray> typed_array,
+ TNode<Smi> index_node,
+ TNode<Object> value,
+ ElementsKind elements_kind);
};
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-utils-inl.h b/deps/v8/src/builtins/builtins-utils-inl.h
index c9d15f09dd..c8c9a2522c 100644
--- a/deps/v8/src/builtins/builtins-utils-inl.h
+++ b/deps/v8/src/builtins/builtins-utils-inl.h
@@ -12,20 +12,21 @@
namespace v8 {
namespace internal {
-Handle<Object> BuiltinArguments::atOrUndefined(Isolate* isolate, int index) {
+Handle<Object> BuiltinArguments::atOrUndefined(Isolate* isolate,
+ int index) const {
if (index >= length()) {
return isolate->factory()->undefined_value();
}
return at<Object>(index);
}
-Handle<Object> BuiltinArguments::receiver() { return at<Object>(0); }
+Handle<Object> BuiltinArguments::receiver() const { return at<Object>(0); }
-Handle<JSFunction> BuiltinArguments::target() {
+Handle<JSFunction> BuiltinArguments::target() const {
return Arguments::at<JSFunction>(Arguments::length() - 1 - kTargetOffset);
}
-Handle<HeapObject> BuiltinArguments::new_target() {
+Handle<HeapObject> BuiltinArguments::new_target() const {
return Arguments::at<HeapObject>(Arguments::length() - 1 - kNewTargetOffset);
}
diff --git a/deps/v8/src/builtins/builtins-utils.h b/deps/v8/src/builtins/builtins-utils.h
index 822f9df6ec..601dfd5813 100644
--- a/deps/v8/src/builtins/builtins-utils.h
+++ b/deps/v8/src/builtins/builtins-utils.h
@@ -23,13 +23,13 @@ class BuiltinArguments : public Arguments {
DCHECK_LE(1, this->length());
}
- Object operator[](int index) {
+ Object operator[](int index) const {
DCHECK_LT(index, length());
return Arguments::operator[](index);
}
template <class S = Object>
- Handle<S> at(int index) {
+ Handle<S> at(int index) const {
DCHECK_LT(index, length());
return Arguments::at<S>(index);
}
@@ -42,10 +42,10 @@ class BuiltinArguments : public Arguments {
static constexpr int kNumExtraArgs = 4;
static constexpr int kNumExtraArgsWithReceiver = 5;
- inline Handle<Object> atOrUndefined(Isolate* isolate, int index);
- inline Handle<Object> receiver();
- inline Handle<JSFunction> target();
- inline Handle<HeapObject> new_target();
+ inline Handle<Object> atOrUndefined(Isolate* isolate, int index) const;
+ inline Handle<Object> receiver() const;
+ inline Handle<JSFunction> target() const;
+ inline Handle<HeapObject> new_target() const;
// Gets the total number of arguments including the receiver (but
// excluding extra arguments).
@@ -77,7 +77,7 @@ class BuiltinArguments : public Arguments {
RuntimeCallCounterId::kBuiltin_##name); \
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.runtime"), \
"V8.Builtin_" #name); \
- return Builtin_Impl_##name(args, isolate).ptr(); \
+ return CONVERT_OBJECT(Builtin_Impl_##name(args, isolate)); \
} \
\
V8_WARN_UNUSED_RESULT Address Builtin_##name( \
@@ -87,7 +87,7 @@ class BuiltinArguments : public Arguments {
return Builtin_Impl_Stats_##name(args_length, args_object, isolate); \
} \
BuiltinArguments args(args_length, args_object); \
- return Builtin_Impl_##name(args, isolate).ptr(); \
+ return CONVERT_OBJECT(Builtin_Impl_##name(args, isolate)); \
} \
\
V8_WARN_UNUSED_RESULT static Object Builtin_Impl_##name( \
diff --git a/deps/v8/src/builtins/builtins.cc b/deps/v8/src/builtins/builtins.cc
index e5829dd1b3..e0750a732c 100644
--- a/deps/v8/src/builtins/builtins.cc
+++ b/deps/v8/src/builtins/builtins.cc
@@ -88,14 +88,16 @@ const BuiltinMetadata builtin_metadata[] = {BUILTIN_LIST(
} // namespace
BailoutId Builtins::GetContinuationBailoutId(Name name) {
- DCHECK(Builtins::KindOf(name) == TFJ || Builtins::KindOf(name) == TFC);
+ DCHECK(Builtins::KindOf(name) == TFJ || Builtins::KindOf(name) == TFC ||
+ Builtins::KindOf(name) == TFS);
return BailoutId(BailoutId::kFirstBuiltinContinuationId + name);
}
Builtins::Name Builtins::GetBuiltinFromBailoutId(BailoutId id) {
int builtin_index = id.ToInt() - BailoutId::kFirstBuiltinContinuationId;
DCHECK(Builtins::KindOf(builtin_index) == TFJ ||
- Builtins::KindOf(builtin_index) == TFC);
+ Builtins::KindOf(builtin_index) == TFC ||
+ Builtins::KindOf(builtin_index) == TFS);
return static_cast<Name>(builtin_index);
}
@@ -204,7 +206,7 @@ void Builtins::PrintBuiltinCode() {
CStrVector(FLAG_print_builtin_code_filter))) {
CodeTracer::Scope trace_scope(isolate_->GetCodeTracer());
OFStream os(trace_scope.file());
- code->Disassemble(builtin_name, os);
+ code->Disassemble(builtin_name, os, isolate_);
os << "\n";
}
}
diff --git a/deps/v8/src/builtins/frames.tq b/deps/v8/src/builtins/frames.tq
index 7467381690..79f2a0ae01 100644
--- a/deps/v8/src/builtins/frames.tq
+++ b/deps/v8/src/builtins/frames.tq
@@ -24,8 +24,8 @@ Cast<FrameType>(o: Object): FrameType
labels CastError {
if (TaggedIsNotSmi(o)) goto CastError;
assert(
- (Convert<uintptr>(BitcastTaggedToWord(o)) >>> kSmiTagSize) <
- kFrameTypeCount);
+ Convert<int32>(BitcastTaggedToWordForTagAndSmiBits(o)) <
+ Convert<int32>(kFrameTypeCount << kSmiTagSize));
return %RawDownCast<FrameType>(o);
}
diff --git a/deps/v8/src/builtins/growable-fixed-array-gen.h b/deps/v8/src/builtins/growable-fixed-array-gen.h
index 42f2afb281..8f72429a97 100644
--- a/deps/v8/src/builtins/growable-fixed-array-gen.h
+++ b/deps/v8/src/builtins/growable-fixed-array-gen.h
@@ -10,8 +10,6 @@
namespace v8 {
namespace internal {
-template <class T>
-using TNode = compiler::TNode<T>;
// Utility class implementing a growable fixed array through CSA.
class GrowableFixedArray : public CodeStubAssembler {
diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc
index feabac3b66..0885b6e633 100644
--- a/deps/v8/src/builtins/ia32/builtins-ia32.cc
+++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc
@@ -5,7 +5,7 @@
#if V8_TARGET_ARCH_IA32
#include "src/api/api-arguments.h"
-#include "src/base/adapters.h"
+#include "src/base/iterator.h"
#include "src/codegen/code-factory.h"
#include "src/debug/debug.h"
#include "src/deoptimizer/deoptimizer.h"
@@ -785,103 +785,75 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
__ bind(&no_match);
}
-static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
- Register scratch) {
+static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
+ Register optimized_code_entry) {
// ----------- S t a t e -------------
// -- edx : new target (preserved for callee if needed, and caller)
// -- edi : target function (preserved for callee if needed, and caller)
- // -- ecx : feedback vector (also used as scratch, value is not preserved)
// -----------------------------------
- DCHECK(!AreAliased(edx, edi, scratch));
-
- Label optimized_code_slot_is_weak_ref, fallthrough;
+ DCHECK(!AreAliased(edx, edi, optimized_code_entry));
Register closure = edi;
- // Scratch contains feedback_vector.
- Register feedback_vector = scratch;
- // Load the optimized code from the feedback vector and re-use the register.
- Register optimized_code_entry = scratch;
- __ mov(optimized_code_entry,
- FieldOperand(feedback_vector,
- FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
-
- // Check if the code entry is a Smi. If yes, we interpret it as an
- // optimisation marker. Otherwise, interpret it as a weak reference to a code
- // object.
- __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref);
+ __ push(edx);
+
+ // Check if the optimized code is marked for deopt. If it is, bailout to a
+ // given label.
+ Label found_deoptimized_code;
+ __ mov(eax,
+ FieldOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
+ __ test(FieldOperand(eax, CodeDataContainer::kKindSpecificFlagsOffset),
+ Immediate(1 << Code::kMarkedForDeoptimizationBit));
+ __ j(not_zero, &found_deoptimized_code);
+
+ // Optimized code is good, get it into the closure and link the closure
+ // into the optimized functions list, then tail call the optimized code.
+ ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure, edx,
+ eax);
+ static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
+ __ LoadCodeObjectEntry(ecx, optimized_code_entry);
+ __ pop(edx);
+ __ jmp(ecx);
- {
- // Optimized code slot is an optimization marker.
-
- // Fall through if no optimization trigger.
- __ cmp(optimized_code_entry,
- Immediate(Smi::FromEnum(OptimizationMarker::kNone)));
- __ j(equal, &fallthrough);
-
- // TODO(v8:8394): The logging of first execution will break if
- // feedback vectors are not allocated. We need to find a different way of
- // logging these events if required.
- TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
- OptimizationMarker::kLogFirstExecution,
- Runtime::kFunctionFirstExecution);
- TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
- OptimizationMarker::kCompileOptimized,
- Runtime::kCompileOptimized_NotConcurrent);
- TailCallRuntimeIfMarkerEquals(
- masm, optimized_code_entry,
- OptimizationMarker::kCompileOptimizedConcurrent,
- Runtime::kCompileOptimized_Concurrent);
+ // Optimized code slot contains deoptimized code, evict it and re-enter
+ // the closure's code.
+ __ bind(&found_deoptimized_code);
+ __ pop(edx);
+ GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
+}
- {
- // Otherwise, the marker is InOptimizationQueue, so fall through hoping
- // that an interrupt will eventually update the slot with optimized code.
- if (FLAG_debug_code) {
- __ cmp(
- optimized_code_entry,
- Immediate(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
- __ Assert(equal, AbortReason::kExpectedOptimizationSentinel);
- }
- __ jmp(&fallthrough);
- }
- }
+static void MaybeOptimizeCode(MacroAssembler* masm,
+ Register optimization_marker) {
+ // ----------- S t a t e -------------
+ // -- edx : new target (preserved for callee if needed, and caller)
+ // -- edi : target function (preserved for callee if needed, and caller)
+ // -- optimization_marker : a Smi containing a non-zero optimization marker.
+ // -----------------------------------
+ DCHECK(!AreAliased(edx, edi, optimization_marker));
+
+ // TODO(v8:8394): The logging of first execution will break if
+ // feedback vectors are not allocated. We need to find a different way of
+ // logging these events if required.
+ TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
+ OptimizationMarker::kLogFirstExecution,
+ Runtime::kFunctionFirstExecution);
+ TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
+ OptimizationMarker::kCompileOptimized,
+ Runtime::kCompileOptimized_NotConcurrent);
+ TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
+ OptimizationMarker::kCompileOptimizedConcurrent,
+ Runtime::kCompileOptimized_Concurrent);
{
- // Optimized code slot is a weak reference.
- __ bind(&optimized_code_slot_is_weak_ref);
-
- __ LoadWeakValue(optimized_code_entry, &fallthrough);
-
- __ push(edx);
-
- // Check if the optimized code is marked for deopt. If it is, bailout to a
- // given label.
- Label found_deoptimized_code;
- __ mov(eax,
- FieldOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
- __ test(FieldOperand(eax, CodeDataContainer::kKindSpecificFlagsOffset),
- Immediate(1 << Code::kMarkedForDeoptimizationBit));
- __ j(not_zero, &found_deoptimized_code);
-
- // Optimized code is good, get it into the closure and link the closure into
- // the optimized functions list, then tail call the optimized code.
- ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
- edx, eax);
- static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
- __ LoadCodeObjectEntry(ecx, optimized_code_entry);
- __ pop(edx);
- __ jmp(ecx);
-
- // Optimized code slot contains deoptimized code, evict it and re-enter the
- // closure's code.
- __ bind(&found_deoptimized_code);
- __ pop(edx);
- GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
+ // Otherwise, the marker is InOptimizationQueue, so fall through hoping
+ // that an interrupt will eventually update the slot with optimized code.
+ if (FLAG_debug_code) {
+ __ cmp(
+ optimization_marker,
+ Immediate(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
+ __ Assert(equal, AbortReason::kExpectedOptimizationSentinel);
+ }
}
-
- // Fall-through if the optimized code cell is clear and there is no
- // optimization marker.
- __ bind(&fallthrough);
}
// Advance the current bytecode offset. This simulates what all bytecode
@@ -912,20 +884,21 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
static_cast<int>(interpreter::Bytecode::kDebugBreakExtraWide));
__ cmp(bytecode, Immediate(0x3));
__ j(above, &process_bytecode, Label::kNear);
+ // The code to load the next bytecode is common to both wide and extra wide.
+ // We can hoist them up here. inc has to happen before test since it
+ // modifies the ZF flag.
+ __ inc(bytecode_offset);
__ test(bytecode, Immediate(0x1));
+ __ movzx_b(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0));
__ j(not_equal, &extra_wide, Label::kNear);
// Load the next bytecode and update table to the wide scaled table.
- __ inc(bytecode_offset);
- __ movzx_b(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0));
__ add(bytecode_size_table,
Immediate(kIntSize * interpreter::Bytecodes::kBytecodeCount));
__ jmp(&process_bytecode, Label::kNear);
__ bind(&extra_wide);
- // Load the next bytecode and update table to the extra wide scaled table.
- __ inc(bytecode_offset);
- __ movzx_b(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0));
+ // Update table to the extra wide scaled table.
__ add(bytecode_size_table,
Immediate(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
@@ -982,9 +955,21 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ CmpInstanceType(eax, FEEDBACK_VECTOR_TYPE);
__ j(not_equal, &push_stack_frame);
- // Read off the optimized code slot in the closure's feedback vector, and if
- // there is optimized code or an optimization marker, call that instead.
- MaybeTailCallOptimizedCodeSlot(masm, ecx);
+ // Read off the optimized code slot in the feedback vector.
+ // Load the optimized code from the feedback vector and re-use the register.
+ Register optimized_code_entry = ecx;
+ __ mov(optimized_code_entry,
+ FieldOperand(feedback_vector,
+ FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
+
+ // Check if the optimized code slot is not empty.
+ Label optimized_code_slot_not_empty;
+ __ cmp(optimized_code_entry,
+ Immediate(Smi::FromEnum(OptimizationMarker::kNone)));
+ __ j(not_equal, &optimized_code_slot_not_empty);
+
+ Label not_optimized;
+ __ bind(&not_optimized);
// Load the feedback vector and increment the invocation count.
__ mov(feedback_vector,
@@ -1035,6 +1020,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ push(Immediate(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag)));
// Allocate the local and temporary register file on the stack.
+ Label stack_overflow;
{
// Load frame size from the BytecodeArray object.
Register frame_size = ecx;
@@ -1042,22 +1028,19 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
BytecodeArray::kFrameSizeOffset));
// Do a stack check to ensure we don't go over the limit.
- Label ok;
__ mov(eax, esp);
__ sub(eax, frame_size);
__ CompareRealStackLimit(eax);
- __ j(above_equal, &ok);
- __ CallRuntime(Runtime::kThrowStackOverflow);
- __ bind(&ok);
+ __ j(below, &stack_overflow);
// If ok, push undefined as the initial value for all register file entries.
Label loop_header;
Label loop_check;
- __ Move(eax, masm->isolate()->factory()->undefined_value());
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
__ jmp(&loop_check);
__ bind(&loop_header);
// TODO(rmcilroy): Consider doing more than one push per loop iteration.
- __ push(eax);
+ __ push(kInterpreterAccumulatorRegister);
// Continue loop if not done.
__ bind(&loop_check);
__ sub(frame_size, Immediate(kSystemPointerSize));
@@ -1067,12 +1050,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// If the bytecode array has a valid incoming new target or generator object
// register, initialize it with incoming value which was passed in edx.
Label no_incoming_new_target_or_generator_register;
- __ mov(eax, FieldOperand(
+ __ mov(ecx, FieldOperand(
kInterpreterBytecodeArrayRegister,
BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
- __ test(eax, eax);
+ __ test(ecx, ecx);
__ j(zero, &no_incoming_new_target_or_generator_register);
- __ mov(Operand(ebp, eax, times_system_pointer_size, 0), edx);
+ __ mov(Operand(ebp, ecx, times_system_pointer_size, 0), edx);
__ bind(&no_incoming_new_target_or_generator_register);
// Load accumulator and bytecode offset into registers.
@@ -1117,8 +1100,26 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, edx, ecx);
__ ret(0);
+ __ bind(&optimized_code_slot_not_empty);
+ Label maybe_has_optimized_code;
+ // Check if optimized code marker is actually a weak reference to the
+ // optimized code as opposed to an optimization marker.
+ __ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code);
+ MaybeOptimizeCode(masm, optimized_code_entry);
+ // Fall through if there's no runnable optimized code.
+ __ jmp(&not_optimized);
+
+ __ bind(&maybe_has_optimized_code);
+ // Load code entry from the weak reference, if it was cleared, resume
+ // execution of unoptimized code.
+ __ LoadWeakValue(optimized_code_entry, &not_optimized);
+ TailCallOptimizedCodeSlot(masm, optimized_code_entry);
+
__ bind(&compile_lazy);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
+
+ __ bind(&stack_overflow);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ int3(); // Should not return.
}
@@ -2601,14 +2602,8 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
}
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
- // Lookup the function in the JavaScript frame.
- __ mov(eax, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(eax, Operand(eax, JavaScriptFrameConstants::kFunctionOffset));
-
{
FrameScope scope(masm, StackFrame::INTERNAL);
- // Pass function as argument.
- __ push(eax);
__ CallRuntime(Runtime::kCompileForOnStackReplacement);
}
diff --git a/deps/v8/src/builtins/internal-coverage.tq b/deps/v8/src/builtins/internal-coverage.tq
index 41ec0c36e4..ebedbdce75 100644
--- a/deps/v8/src/builtins/internal-coverage.tq
+++ b/deps/v8/src/builtins/internal-coverage.tq
@@ -28,8 +28,6 @@ namespace internal_coverage {
return UnsafeCast<CoverageInfo>(debugInfo.coverage_info);
}
- @export // Silence unused warning on release builds. SlotCount is only used
- // in an assert. TODO(szuend): Remove once macros and asserts work.
macro SlotCount(coverageInfo: CoverageInfo): Smi {
assert(kFirstSlotIndex == 0); // Otherwise we'd have to consider it below.
assert(kFirstSlotIndex == (coverageInfo.length & kSlotIndexCountMask));
diff --git a/deps/v8/src/builtins/iterator.tq b/deps/v8/src/builtins/iterator.tq
index 06e8ea539c..e662e4e75e 100644
--- a/deps/v8/src/builtins/iterator.tq
+++ b/deps/v8/src/builtins/iterator.tq
@@ -37,22 +37,45 @@ namespace iterator {
extern macro IteratorBuiltinsAssembler::IterableToList(
implicit context: Context)(JSAny, JSAny): JSArray;
+ extern macro IteratorBuiltinsAssembler::StringListFromIterable(
+ implicit context: Context)(JSAny): JSArray;
+
extern builtin IterableToListMayPreserveHoles(implicit context:
Context)(JSAny, JSAny);
extern builtin IterableToListWithSymbolLookup(implicit context:
Context)(JSAny);
transitioning builtin GetIteratorWithFeedback(
- context: Context, receiver: JSAny, feedbackSlot: Smi,
+ context: Context, receiver: JSAny, loadSlot: Smi, callSlot: Smi,
feedback: Undefined | FeedbackVector): JSAny {
+ let iteratorMethod: JSAny;
typeswitch (feedback) {
case (Undefined): {
- return GetProperty(receiver, IteratorSymbolConstant());
+ iteratorMethod = GetProperty(receiver, IteratorSymbolConstant());
}
case (feedback: FeedbackVector): {
- return LoadIC(
- context, receiver, IteratorSymbolConstant(), feedbackSlot,
- feedback);
+ iteratorMethod = LoadIC(
+ context, receiver, IteratorSymbolConstant(), loadSlot, feedback);
+ }
+ }
+ return CallIteratorWithFeedback(
+ context, receiver, iteratorMethod, callSlot, feedback);
+ }
+
+ transitioning builtin CallIteratorWithFeedback(
+ context: Context, receiver: JSAny, iteratorMethod: JSAny, callSlot: Smi,
+ feedback: Undefined | FeedbackVector): JSAny {
+ const callSlotUnTagged: uintptr = Unsigned(SmiUntag(callSlot));
+ CollectCallFeedback(iteratorMethod, context, feedback, callSlotUnTagged);
+ const iteratorCallable: Callable = Cast<Callable>(iteratorMethod)
+ otherwise ThrowCalledNonCallable(iteratorMethod);
+ const iterator: JSAny = Call(context, iteratorCallable, receiver);
+ typeswitch (iterator) {
+ case (JSReceiver): {
+ return iterator;
+ }
+ case (JSPrimitive): {
+ ThrowSymbolIteratorInvalid();
}
}
}
diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc
index d3237a1c38..ecfb224fb2 100644
--- a/deps/v8/src/builtins/mips/builtins-mips.cc
+++ b/deps/v8/src/builtins/mips/builtins-mips.cc
@@ -1085,18 +1085,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Push(kInterpreterBytecodeArrayRegister, t0);
// Allocate the local and temporary register file on the stack.
+ Label stack_overflow;
{
// Load frame size from the BytecodeArray object.
__ lw(t0, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kFrameSizeOffset));
// Do a stack check to ensure we don't go over the limit.
- Label ok;
__ Subu(t1, sp, Operand(t0));
LoadRealStackLimit(masm, a2);
- __ Branch(&ok, hs, t1, Operand(a2));
- __ CallRuntime(Runtime::kThrowStackOverflow);
- __ bind(&ok);
+ __ Branch(&stack_overflow, lo, t1, Operand(a2));
// If ok, push undefined as the initial value for all register file entries.
Label loop_header;
@@ -1169,6 +1167,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
// Unreachable code.
__ break_(0xCC);
+
+ __ bind(&stack_overflow);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ // Unreachable code.
+ __ break_(0xCC);
}
static void Generate_InterpreterPushArgs(MacroAssembler* masm,
@@ -1525,14 +1528,8 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
}
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
- // Lookup the function in the JavaScript frame.
- __ lw(a0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ lw(a0, MemOperand(a0, JavaScriptFrameConstants::kFunctionOffset));
-
{
FrameScope scope(masm, StackFrame::INTERNAL);
- // Pass function as argument.
- __ push(a0);
__ CallRuntime(Runtime::kCompileForOnStackReplacement);
}
@@ -2131,7 +2128,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// -- a1 : the target to call (can be any Object).
// -----------------------------------
- Label non_callable, non_function, non_smi;
+ Label non_callable, non_smi;
__ JumpIfSmi(a1, &non_callable);
__ bind(&non_smi);
__ GetObjectType(a1, t1, t2);
@@ -2146,12 +2143,11 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ Branch(&non_callable, eq, t1, Operand(zero_reg));
// Check if target is a proxy and call CallProxy external builtin
- __ Branch(&non_function, ne, t2, Operand(JS_PROXY_TYPE));
- __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET);
+ __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy),
+ RelocInfo::CODE_TARGET, eq, t2, Operand(JS_PROXY_TYPE));
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
- __ bind(&non_function);
// Overwrite the original receiver with the (original) target.
__ Lsa(kScratchReg, sp, a0, kPointerSizeLog2);
__ sw(a1, MemOperand(kScratchReg));
diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc
index 7cb66470a3..47dbc34002 100644
--- a/deps/v8/src/builtins/mips64/builtins-mips64.cc
+++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc
@@ -1103,18 +1103,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Push(kInterpreterBytecodeArrayRegister, a4);
// Allocate the local and temporary register file on the stack.
+ Label stack_overflow;
{
// Load frame size (word) from the BytecodeArray object.
__ Lw(a4, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kFrameSizeOffset));
// Do a stack check to ensure we don't go over the limit.
- Label ok;
__ Dsubu(a5, sp, Operand(a4));
LoadRealStackLimit(masm, a2);
- __ Branch(&ok, hs, a5, Operand(a2));
- __ CallRuntime(Runtime::kThrowStackOverflow);
- __ bind(&ok);
+ __ Branch(&stack_overflow, lo, a5, Operand(a2));
// If ok, push undefined as the initial value for all register file entries.
Label loop_header;
@@ -1188,6 +1186,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
// Unreachable code.
__ break_(0xCC);
+
+ __ bind(&stack_overflow);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ // Unreachable code.
+ __ break_(0xCC);
}
static void Generate_InterpreterPushArgs(MacroAssembler* masm,
@@ -1542,14 +1545,8 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
}
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
- // Lookup the function in the JavaScript frame.
- __ Ld(a0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ Ld(a0, MemOperand(a0, JavaScriptFrameConstants::kFunctionOffset));
-
{
FrameScope scope(masm, StackFrame::INTERNAL);
- // Pass function as argument.
- __ push(a0);
__ CallRuntime(Runtime::kCompileForOnStackReplacement);
}
@@ -2170,7 +2167,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// -- a1 : the target to call (can be any Object).
// -----------------------------------
- Label non_callable, non_function, non_smi;
+ Label non_callable, non_smi;
__ JumpIfSmi(a1, &non_callable);
__ bind(&non_smi);
__ GetObjectType(a1, t1, t2);
@@ -2184,12 +2181,11 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ And(t1, t1, Operand(Map::IsCallableBit::kMask));
__ Branch(&non_callable, eq, t1, Operand(zero_reg));
- __ Branch(&non_function, ne, t2, Operand(JS_PROXY_TYPE));
- __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET);
+ __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy),
+ RelocInfo::CODE_TARGET, eq, t2, Operand(JS_PROXY_TYPE));
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
- __ bind(&non_function);
// Overwrite the original receiver with the (original) target.
__ Dlsa(kScratchReg, sp, a0, kPointerSizeLog2);
__ Sd(a1, MemOperand(kScratchReg));
diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc
index 485b793395..ab0c7900d5 100644
--- a/deps/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc
@@ -863,9 +863,11 @@ void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
__ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
}
-static void ReplaceClosureCodeWithOptimizedCode(
- MacroAssembler* masm, Register optimized_code, Register closure,
- Register scratch1, Register scratch2, Register scratch3) {
+static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
+ Register optimized_code,
+ Register closure,
+ Register scratch1,
+ Register scratch2) {
// Store code entry in the closure.
__ StoreP(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset),
r0);
@@ -902,100 +904,73 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
__ bind(&no_match);
}
-static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
- Register feedback_vector,
- Register scratch1, Register scratch2,
- Register scratch3) {
+static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
+ Register optimized_code_entry,
+ Register scratch) {
// ----------- S t a t e -------------
// -- r6 : new target (preserved for callee if needed, and caller)
// -- r4 : target function (preserved for callee if needed, and caller)
- // -- feedback vector (preserved for caller if needed)
// -----------------------------------
- DCHECK(!AreAliased(feedback_vector, r4, r6, scratch1, scratch2, scratch3));
-
- Label optimized_code_slot_is_weak_ref, fallthrough;
+ DCHECK(!AreAliased(r4, r6, optimized_code_entry, scratch));
Register closure = r4;
- Register optimized_code_entry = scratch1;
-
- __ LoadP(
- optimized_code_entry,
- FieldMemOperand(feedback_vector,
- FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
-
- // Check if the code entry is a Smi. If yes, we interpret it as an
- // optimisation marker. Otherwise, interpret it as a weak reference to a code
- // object.
- __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref);
- {
- // Optimized code slot is a Smi optimization marker.
-
- // Fall through if no optimization trigger.
- __ CmpSmiLiteral(optimized_code_entry,
- Smi::FromEnum(OptimizationMarker::kNone), r0);
- __ beq(&fallthrough);
-
- TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
- OptimizationMarker::kLogFirstExecution,
- Runtime::kFunctionFirstExecution);
- TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
- OptimizationMarker::kCompileOptimized,
- Runtime::kCompileOptimized_NotConcurrent);
- TailCallRuntimeIfMarkerEquals(
- masm, optimized_code_entry,
- OptimizationMarker::kCompileOptimizedConcurrent,
- Runtime::kCompileOptimized_Concurrent);
-
- {
- // Otherwise, the marker is InOptimizationQueue, so fall through hoping
- // that an interrupt will eventually update the slot with optimized code.
- if (FLAG_debug_code) {
- __ CmpSmiLiteral(
- optimized_code_entry,
- Smi::FromEnum(OptimizationMarker::kInOptimizationQueue), r0);
- __ Assert(eq, AbortReason::kExpectedOptimizationSentinel);
- }
- __ b(&fallthrough);
- }
- }
+ // Check if the optimized code is marked for deopt. If it is, call the
+ // runtime to clear it.
+ Label found_deoptimized_code;
+ __ LoadP(scratch, FieldMemOperand(optimized_code_entry,
+ Code::kCodeDataContainerOffset));
+ __ LoadWordArith(
+ scratch,
+ FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
+ __ TestBit(scratch, Code::kMarkedForDeoptimizationBit, r0);
+ __ bne(&found_deoptimized_code, cr0);
+
+ // Optimized code is good, get it into the closure and link the closure
+ // into the optimized functions list, then tail call the optimized code.
+ ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
+ scratch, r8);
+ static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
+ __ LoadCodeObjectEntry(r5, optimized_code_entry);
+ __ Jump(r5);
- {
- // Optimized code slot is a weak reference.
- __ bind(&optimized_code_slot_is_weak_ref);
-
- __ LoadWeakValue(optimized_code_entry, optimized_code_entry, &fallthrough);
-
- // Check if the optimized code is marked for deopt. If it is, call the
- // runtime to clear it.
- Label found_deoptimized_code;
- __ LoadP(scratch2, FieldMemOperand(optimized_code_entry,
- Code::kCodeDataContainerOffset));
- __ LoadWordArith(
- scratch2,
- FieldMemOperand(scratch2, CodeDataContainer::kKindSpecificFlagsOffset));
- __ TestBit(scratch2, Code::kMarkedForDeoptimizationBit, r0);
- __ bne(&found_deoptimized_code, cr0);
-
- // Optimized code is good, get it into the closure and link the closure into
- // the optimized functions list, then tail call the optimized code.
- // The feedback vector is no longer used, so re-use it as a scratch
- // register.
- ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
- scratch2, scratch3, feedback_vector);
- static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
- __ LoadCodeObjectEntry(r5, optimized_code_entry);
- __ Jump(r5);
+ // Optimized code slot contains deoptimized code, evict it and re-enter
+ // the closure's code.
+ __ bind(&found_deoptimized_code);
+ GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
+}
- // Optimized code slot contains deoptimized code, evict it and re-enter the
- // closure's code.
- __ bind(&found_deoptimized_code);
- GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
+static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
+ Register optimization_marker) {
+ // ----------- S t a t e -------------
+ // -- r6 : new target (preserved for callee if needed, and caller)
+ // -- r4 : target function (preserved for callee if needed, and caller)
+ // -- feedback vector (preserved for caller if needed)
+ // -- optimization_marker : a Smi containing a non-zero optimization marker.
+ // -----------------------------------
+ DCHECK(!AreAliased(feedback_vector, r4, r6, optimization_marker));
+
+ // TODO(v8:8394): The logging of first execution will break if
+ // feedback vectors are not allocated. We need to find a different way of
+ // logging these events if required.
+ TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
+ OptimizationMarker::kLogFirstExecution,
+ Runtime::kFunctionFirstExecution);
+ TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
+ OptimizationMarker::kCompileOptimized,
+ Runtime::kCompileOptimized_NotConcurrent);
+ TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
+ OptimizationMarker::kCompileOptimizedConcurrent,
+ Runtime::kCompileOptimized_Concurrent);
+
+ // Otherwise, the marker is InOptimizationQueue, so fall through hoping
+ // that an interrupt will eventually update the slot with optimized code.
+ if (FLAG_debug_code) {
+ __ CmpSmiLiteral(optimization_marker,
+ Smi::FromEnum(OptimizationMarker::kInOptimizationQueue),
+ r0);
+ __ Assert(eq, AbortReason::kExpectedOptimizationSentinel);
}
-
- // Fall-through if the optimized code cell is clear and there is no
- // optimization marker.
- __ bind(&fallthrough);
}
// Advance the current bytecode offset. This simulates what all bytecode
@@ -1104,9 +1079,20 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ cmpi(r7, Operand(FEEDBACK_VECTOR_TYPE));
__ bne(&push_stack_frame);
- // Read off the optimized code slot in the feedback vector, and if there
- // is optimized code or an optimization marker, call that instead.
- MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r7, r9, r8);
+ Register optimized_code_entry = r7;
+
+ // Read off the optimized code slot in the feedback vector.
+ __ LoadP(optimized_code_entry,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
+ // Check if the optimized code slot is not empty.
+ Label optimized_code_slot_not_empty;
+ __ CmpSmiLiteral(optimized_code_entry,
+ Smi::FromEnum(OptimizationMarker::kNone), r0);
+ __ bne(&optimized_code_slot_not_empty);
+
+ Label not_optimized;
+ __ bind(&not_optimized);
// Increment invocation count for the function.
__ LoadWord(
@@ -1149,29 +1135,27 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Push(kInterpreterBytecodeArrayRegister, r3);
// Allocate the local and temporary register file on the stack.
+ Label stack_overflow;
{
// Load frame size (word) from the BytecodeArray object.
__ lwz(r5, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kFrameSizeOffset));
// Do a stack check to ensure we don't go over the limit.
- Label ok;
__ sub(r8, sp, r5);
LoadRealStackLimit(masm, r0);
__ cmpl(r8, r0);
- __ bge(&ok);
- __ CallRuntime(Runtime::kThrowStackOverflow);
- __ bind(&ok);
+ __ blt(&stack_overflow);
// If ok, push undefined as the initial value for all register file entries.
// TODO(rmcilroy): Consider doing more than one push per loop iteration.
Label loop, no_args;
- __ LoadRoot(r8, RootIndex::kUndefinedValue);
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
__ ShiftRightImm(r5, r5, Operand(kPointerSizeLog2), SetRC);
__ beq(&no_args, cr0);
__ mtctr(r5);
__ bind(&loop);
- __ push(r8);
+ __ push(kInterpreterAccumulatorRegister);
__ bdnz(&loop);
__ bind(&no_args);
}
@@ -1189,8 +1173,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ StorePX(r6, MemOperand(fp, r8));
__ bind(&no_incoming_new_target_or_generator_register);
- // Load accumulator with undefined.
- __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+ // The accumulator is already loaded with undefined.
+
// Load the dispatch table into a register and dispatch to the bytecode
// handler at the current bytecode offset.
Label do_dispatch;
@@ -1231,8 +1215,26 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, r5);
__ blr();
+ __ bind(&optimized_code_slot_not_empty);
+ Label maybe_has_optimized_code;
+ // Check if optimized code marker is actually a weak reference to the
+ // optimized code.
+ __ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code);
+ MaybeOptimizeCode(masm, feedback_vector, optimized_code_entry);
+ // Fall through if there's no runnable optimized code.
+ __ jmp(&not_optimized);
+
+ __ bind(&maybe_has_optimized_code);
+ // Load code entry from the weak reference, if it was cleared, resume
+ // execution of unoptimized code.
+ __ LoadWeakValue(optimized_code_entry, optimized_code_entry, &not_optimized);
+ TailCallOptimizedCodeSlot(masm, optimized_code_entry, r9);
+
__ bind(&compile_lazy);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
+
+ __ bind(&stack_overflow);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bkpt(0); // Should not return.
}
@@ -1596,14 +1598,8 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
}
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
- // Lookup the function in the JavaScript frame.
- __ LoadP(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(r3, MemOperand(r3, JavaScriptFrameConstants::kFunctionOffset));
-
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- // Pass function as argument.
- __ push(r3);
__ CallRuntime(Runtime::kCompileForOnStackReplacement);
}
@@ -2260,7 +2256,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// -- r4 : the target to call (can be any Object).
// -----------------------------------
- Label non_callable, non_function, non_smi;
+ Label non_callable, non_smi;
__ JumpIfSmi(r4, &non_callable);
__ bind(&non_smi);
__ CompareObjectType(r4, r7, r8, JS_FUNCTION_TYPE);
@@ -2277,12 +2273,10 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// Check if target is a proxy and call CallProxy external builtin
__ cmpi(r8, Operand(JS_PROXY_TYPE));
- __ bne(&non_function);
- __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET);
+ __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq);
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
- __ bind(&non_function);
// Overwrite the original receiver the (original) target.
__ ShiftLeftImm(r8, r3, Operand(kPointerSizeLog2));
__ StorePX(r4, MemOperand(sp, r8));
diff --git a/deps/v8/src/builtins/regexp-exec.tq b/deps/v8/src/builtins/regexp-exec.tq
new file mode 100644
index 0000000000..b2ca9de10b
--- /dev/null
+++ b/deps/v8/src/builtins/regexp-exec.tq
@@ -0,0 +1,45 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/builtins/builtins-regexp-gen.h'
+
+namespace regexp {
+
+ extern transitioning macro RegExpBuiltinsAssembler::RegExpPrototypeExecBody(
+ implicit context: Context)(JSReceiver, String, constexpr bool): JSAny;
+
+ transitioning macro RegExpPrototypeExecBodyFast(implicit context: Context)(
+ receiver: JSReceiver, string: String): JSAny {
+ return RegExpPrototypeExecBody(receiver, string, true);
+ }
+
+ transitioning macro RegExpPrototypeExecBodySlow(implicit context: Context)(
+ receiver: JSReceiver, string: String): JSAny {
+ return RegExpPrototypeExecBody(receiver, string, false);
+ }
+
+ // Slow path stub for RegExpPrototypeExec to decrease code size.
+ transitioning builtin
+ RegExpPrototypeExecSlow(implicit context: Context)(
+ regexp: JSRegExp, string: String): JSAny {
+ return RegExpPrototypeExecBodySlow(regexp, string);
+ }
+
+ extern macro RegExpBuiltinsAssembler::IsFastRegExpNoPrototype(
+ implicit context: Context)(Object): bool;
+
+ // ES#sec-regexp.prototype.exec
+ // RegExp.prototype.exec ( string )
+ transitioning javascript builtin RegExpPrototypeExec(
+ js-implicit context: Context, receiver: JSAny)(string: JSAny): JSAny {
+ // Ensure {receiver} is a JSRegExp.
+ const receiver = Cast<JSRegExp>(receiver) otherwise ThrowTypeError(
+ kIncompatibleMethodReceiver, 'RegExp.prototype.exec', receiver);
+ const string = ToString_Inline(context, string);
+
+ return IsFastRegExpNoPrototype(receiver) ?
+ RegExpPrototypeExecBodyFast(receiver, string) :
+ RegExpPrototypeExecSlow(receiver, string);
+ }
+}
diff --git a/deps/v8/src/builtins/regexp-match-all.tq b/deps/v8/src/builtins/regexp-match-all.tq
new file mode 100644
index 0000000000..1be6e69afc
--- /dev/null
+++ b/deps/v8/src/builtins/regexp-match-all.tq
@@ -0,0 +1,258 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/builtins/builtins-regexp-gen.h'
+
+namespace regexp {
+
+ extern transitioning macro RegExpBuiltinsAssembler::RegExpCreate(
+ implicit context: Context)(Context, Object, String): Object;
+
+ extern transitioning macro
+ RegExpMatchAllAssembler::CreateRegExpStringIterator(
+ NativeContext, Object, String, bool, bool): JSAny;
+
+ @export
+ transitioning macro RegExpPrototypeMatchAllImpl(implicit context: Context)(
+ nativeContext: NativeContext, receiver: JSAny, string: JSAny): JSAny {
+ // 1. Let R be the this value.
+ // 2. If Type(R) is not Object, throw a TypeError exception.
+ ThrowIfNotJSReceiver(
+ receiver, kIncompatibleMethodReceiver, 'RegExp.prototype.@@matchAll');
+ const receiver = UnsafeCast<JSReceiver>(receiver);
+
+ // 3. Let S be ? ToString(O).
+ const string: String = ToString_Inline(context, string);
+
+ let matcher: Object;
+ let global: bool;
+ let unicode: bool;
+
+ // 'FastJSRegExp' uses the strict fast path check because following code
+ // uses the flags property.
+ // TODO(jgruber): Handle slow flag accesses on the fast path and make this
+ // permissive.
+ typeswitch (receiver) {
+ case (fastRegExp: FastJSRegExp): {
+ const source = fastRegExp.source;
+
+ // 4. Let C be ? SpeciesConstructor(R, %RegExp%).
+ // 5. Let flags be ? ToString(? Get(R, "flags")).
+ // 6. Let matcher be ? Construct(C, « R, flags »).
+ const flags: String = FastFlagsGetter(fastRegExp);
+ matcher = RegExpCreate(nativeContext, source, flags);
+ const matcherRegExp = UnsafeCast<JSRegExp>(matcher);
+ assert(IsFastRegExpPermissive(matcherRegExp));
+
+ // 7. Let lastIndex be ? ToLength(? Get(R, "lastIndex")).
+ // 8. Perform ? Set(matcher, "lastIndex", lastIndex, true).
+ const fastRegExp = UnsafeCast<FastJSRegExp>(receiver);
+ FastStoreLastIndex(matcherRegExp, fastRegExp.lastIndex);
+
+ // 9. If flags contains "g", let global be true.
+ // 10. Else, let global be false.
+ global = FastFlagGetter(matcherRegExp, kGlobal);
+
+ // 11. If flags contains "u", let fullUnicode be true.
+ // 12. Else, let fullUnicode be false.
+ unicode = FastFlagGetter(matcherRegExp, kUnicode);
+ }
+ case (Object): {
+ // 4. Let C be ? SpeciesConstructor(R, %RegExp%).
+ const regexpFun =
+ UnsafeCast<JSFunction>(nativeContext[REGEXP_FUNCTION_INDEX]);
+ const speciesConstructor =
+ UnsafeCast<Constructor>(SpeciesConstructor(receiver, regexpFun));
+
+ // 5. Let flags be ? ToString(? Get(R, "flags")).
+ const flags = GetProperty(receiver, 'flags');
+ const flagsString = ToString_Inline(context, flags);
+
+ // 6. Let matcher be ? Construct(C, « R, flags »).
+ matcher = Construct(speciesConstructor, receiver, flagsString);
+
+ // 7. Let lastIndex be ? ToLength(? Get(R, "lastIndex")).
+ const lastIndex: Number =
+ ToLength_Inline(context, SlowLoadLastIndex(receiver));
+
+ // 8. Perform ? Set(matcher, "lastIndex", lastIndex, true).
+ SlowStoreLastIndex(UnsafeCast<JSReceiver>(matcher), lastIndex);
+
+ // 9. If flags contains "g", let global be true.
+ // 10. Else, let global be false.
+ const globalCharString: String = StringConstant('g');
+ const globalIndex: Smi =
+ StringIndexOf(flagsString, globalCharString, 0);
+ global = globalIndex != -1;
+
+ // 11. If flags contains "u", let fullUnicode be true.
+ // 12. Else, let fullUnicode be false.
+ const unicodeCharString = StringConstant('u');
+ const unicodeIndex: Smi =
+ StringIndexOf(flagsString, unicodeCharString, 0);
+ unicode = unicodeIndex != -1;
+ }
+ }
+
+ // 13. Return ! CreateRegExpStringIterator(matcher, S, global, fullUnicode).
+ return CreateRegExpStringIterator(
+ nativeContext, matcher, string, global, unicode);
+ }
+
+ // https://tc39.github.io/proposal-string-matchall/
+ // RegExp.prototype [ @@matchAll ] ( string )
+ transitioning javascript builtin RegExpPrototypeMatchAll(
+ js-implicit context: Context, receiver: JSAny)(string: JSAny): JSAny {
+ const nativeContext: NativeContext = LoadNativeContext(context);
+ return RegExpPrototypeMatchAllImpl(nativeContext, receiver, string);
+ }
+
+ const kJSRegExpStringIteratorDone:
+ constexpr int31 generates '1 << JSRegExpStringIterator::kDoneBit';
+ const kJSRegExpStringIteratorGlobal: constexpr int31
+ generates '1 << JSRegExpStringIterator::kGlobalBit';
+ const kJSRegExpStringIteratorUnicode: constexpr int31
+ generates '1 << JSRegExpStringIterator::kUnicodeBit';
+
+ extern macro IsSetSmi(Smi, constexpr int31): bool;
+
+ macro HasDoneFlag(flags: Smi): bool {
+ return IsSetSmi(flags, kJSRegExpStringIteratorDone);
+ }
+
+ macro HasGlobalFlag(flags: Smi): bool {
+ return IsSetSmi(flags, kJSRegExpStringIteratorGlobal);
+ }
+
+ macro HasUnicodeFlag(flags: Smi): bool {
+ return IsSetSmi(flags, kJSRegExpStringIteratorUnicode);
+ }
+
+ macro SetDoneFlag(iterator: JSRegExpStringIterator, flags: Smi) {
+ const newFlags: Smi = flags | kJSRegExpStringIteratorDone;
+ iterator.flags = newFlags;
+ }
+
+ extern macro RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
+ implicit context: Context)(JSReceiver, RegExpMatchInfo, String):
+ JSRegExpResult;
+
+ // https://tc39.github.io/proposal-string-matchall/
+ // %RegExpStringIteratorPrototype%.next ( )
+ transitioning javascript builtin RegExpStringIteratorPrototypeNext(
+ js-implicit context: Context, receiver: JSAny)(): JSAny {
+ // 1. Let O be the this value.
+ // 2. If Type(O) is not Object, throw a TypeError exception.
+ // 3. If O does not have all of the internal slots of a RegExp String
+ // Iterator Object Instance (see 5.3), throw a TypeError exception.
+ const methodName: constexpr string =
+ '%RegExpStringIterator%.prototype.next';
+ const receiver = Cast<JSRegExpStringIterator>(receiver) otherwise
+ ThrowTypeError(kIncompatibleMethodReceiver, methodName, receiver);
+
+ try {
+ // 4. If O.[[Done]] is true, then
+ // a. Return ! CreateIterResultObject(undefined, true).
+ const flags: Smi = receiver.flags;
+ if (HasDoneFlag(flags)) goto ReturnEmptyDoneResult;
+
+ // 5. Let R be O.[[iteratingRegExp]].
+ const iteratingRegExp: JSReceiver = receiver.iterating_reg_exp;
+
+ // 6. Let S be O.[[IteratedString]].
+ const iteratingString: String = receiver.iterated_string;
+
+ // 7. Let global be O.[[Global]].
+ // 8. Let fullUnicode be O.[[Unicode]].
+ // 9. Let match be ? RegExpExec(R, S).
+ let match: Object;
+ let isFastRegExp: bool = false;
+ try {
+ if (IsFastRegExpPermissive(iteratingRegExp)) {
+ const matchIndices: RegExpMatchInfo =
+ RegExpPrototypeExecBodyWithoutResultFast(
+ UnsafeCast<JSRegExp>(iteratingRegExp), iteratingString)
+ otherwise IfNoMatch;
+ match = ConstructNewResultFromMatchInfo(
+ iteratingRegExp, matchIndices, iteratingString);
+ isFastRegExp = true;
+ } else {
+ match = RegExpExec(iteratingRegExp, iteratingString);
+ if (match == Null) {
+ goto IfNoMatch;
+ }
+ }
+ // 11. Else,
+ // b. Else, handle non-global case first.
+ if (!HasGlobalFlag(flags)) {
+ // i. Set O.[[Done]] to true.
+ SetDoneFlag(receiver, flags);
+
+ // ii. Return ! CreateIterResultObject(match, false).
+ return AllocateJSIteratorResult(UnsafeCast<JSAny>(match), False);
+ }
+ // a. If global is true,
+ assert(HasGlobalFlag(flags));
+ if (isFastRegExp) {
+ // i. Let matchStr be ? ToString(? Get(match, "0")).
+ const match = UnsafeCast<FastJSRegExpResult>(match);
+ const resultFixedArray = UnsafeCast<FixedArray>(match.elements);
+ const matchStr = UnsafeCast<String>(resultFixedArray.objects[0]);
+
+ // When iterating_regexp is fast, we assume it stays fast even after
+ // accessing the first match from the RegExp result.
+ assert(IsFastRegExpPermissive(iteratingRegExp));
+ const iteratingRegExp = UnsafeCast<JSRegExp>(iteratingRegExp);
+ if (matchStr == kEmptyString) {
+ // 1. Let thisIndex be ? ToLength(? Get(R, "lastIndex")).
+ const thisIndex: Smi = FastLoadLastIndex(iteratingRegExp);
+
+ // 2. Let nextIndex be ! AdvanceStringIndex(S, thisIndex,
+ // fullUnicode).
+ const nextIndex: Smi = AdvanceStringIndexFast(
+ iteratingString, thisIndex, HasUnicodeFlag(flags));
+
+ // 3. Perform ? Set(R, "lastIndex", nextIndex, true).
+ FastStoreLastIndex(iteratingRegExp, nextIndex);
+ }
+
+ // iii. Return ! CreateIterResultObject(match, false).
+ return AllocateJSIteratorResult(match, False);
+ }
+ assert(!isFastRegExp);
+ // i. Let matchStr be ? ToString(? Get(match, "0")).
+ const match = UnsafeCast<JSAny>(match);
+ const matchStr =
+ ToString_Inline(context, GetProperty(match, SmiConstant(0)));
+
+ if (matchStr == kEmptyString) {
+ // 1. Let thisIndex be ? ToLength(? Get(R, "lastIndex")).
+ const lastIndex: JSAny = SlowLoadLastIndex(iteratingRegExp);
+ const thisIndex: Number = ToLength_Inline(context, lastIndex);
+
+ // 2. Let nextIndex be ! AdvanceStringIndex(S, thisIndex,
+ // fullUnicode).
+ const nextIndex: Number = AdvanceStringIndexSlow(
+ iteratingString, thisIndex, HasUnicodeFlag(flags));
+
+ // 3. Perform ? Set(R, "lastIndex", nextIndex, true).
+ SlowStoreLastIndex(iteratingRegExp, nextIndex);
+ }
+ // iii. Return ! CreateIterResultObject(match, false).
+ return AllocateJSIteratorResult(match, False);
+ }
+ // 10. If match is null, then
+ label IfNoMatch {
+ // a. Set O.[[Done]] to true.
+ SetDoneFlag(receiver, flags);
+
+ // b. Return ! CreateIterResultObject(undefined, true).
+ goto ReturnEmptyDoneResult;
+ }
+ }
+ label ReturnEmptyDoneResult {
+ return AllocateJSIteratorResult(Undefined, True);
+ }
+ }
+}
diff --git a/deps/v8/src/builtins/regexp-replace.tq b/deps/v8/src/builtins/regexp-replace.tq
index f13724b476..1333ce97fb 100644
--- a/deps/v8/src/builtins/regexp-replace.tq
+++ b/deps/v8/src/builtins/regexp-replace.tq
@@ -7,8 +7,6 @@
namespace regexp {
extern builtin
- StringIndexOf(implicit context: Context)(String, String, Smi): Smi;
- extern builtin
SubString(implicit context: Context)(String, Smi, Smi): String;
extern runtime RegExpExecMultiple(implicit context: Context)(
@@ -21,9 +19,6 @@ namespace regexp {
StringReplaceNonGlobalRegExpWithFunction(implicit context: Context)(
String, JSRegExp, Callable): String;
- extern macro
- RegExpBuiltinsAssembler::AdvanceStringIndexFast(String, Smi, bool): Smi;
-
transitioning macro RegExpReplaceCallableNoExplicitCaptures(implicit context:
Context)(
matchesElements: FixedArray, matchesLength: intptr, string: String,
diff --git a/deps/v8/src/builtins/regexp-search.tq b/deps/v8/src/builtins/regexp-search.tq
new file mode 100644
index 0000000000..3c4e57d734
--- /dev/null
+++ b/deps/v8/src/builtins/regexp-search.tq
@@ -0,0 +1,105 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/builtins/builtins-regexp-gen.h'
+
+namespace regexp {
+
+ transitioning macro
+ RegExpPrototypeSearchBodyFast(implicit context: Context)(
+ regexp: JSRegExp, string: String): JSAny {
+ assert(IsFastRegExpPermissive(regexp));
+
+ // Grab the initial value of last index.
+ const previousLastIndex: Smi = FastLoadLastIndex(regexp);
+
+ // Ensure last index is 0.
+ FastStoreLastIndex(regexp, 0);
+
+ // Call exec.
+ try {
+ const matchIndices: RegExpMatchInfo =
+ RegExpPrototypeExecBodyWithoutResultFast(regexp, string)
+ otherwise DidNotMatch;
+
+ // Successful match.
+ // Reset last index.
+ FastStoreLastIndex(regexp, previousLastIndex);
+
+ // Return the index of the match.
+ return UnsafeCast<Smi>(
+ matchIndices.objects[kRegExpMatchInfoFirstCaptureIndex]);
+ }
+ label DidNotMatch {
+ // Reset last index and return -1.
+ FastStoreLastIndex(regexp, previousLastIndex);
+ return SmiConstant(-1);
+ }
+ }
+
+ extern macro RegExpBuiltinsAssembler::BranchIfFastRegExpResult(
+ implicit context: Context)(Object): never labels IsUnmodified,
+ IsModified;
+
+ macro
+ IsFastRegExpResult(implicit context: Context)(execResult: HeapObject): bool {
+ BranchIfFastRegExpResult(execResult) otherwise return true, return false;
+ }
+
+ transitioning macro RegExpPrototypeSearchBodySlow(implicit context: Context)(
+ regexp: JSReceiver, string: String): JSAny {
+ // Grab the initial value of last index.
+ const previousLastIndex = SlowLoadLastIndex(regexp);
+ const smiZero: Smi = 0;
+
+ // Ensure last index is 0.
+ if (!SameValue(previousLastIndex, smiZero)) {
+ SlowStoreLastIndex(regexp, smiZero);
+ }
+
+ // Call exec.
+ const execResult = RegExpExec(regexp, string);
+
+ // Reset last index if necessary.
+ const currentLastIndex = SlowLoadLastIndex(regexp);
+ if (!SameValue(currentLastIndex, previousLastIndex)) {
+ SlowStoreLastIndex(regexp, previousLastIndex);
+ }
+
+ // Return -1 if no match was found.
+ if (execResult == Null) {
+ return SmiConstant(-1);
+ }
+
+ // Return the index of the match.
+ const fastExecResult = Cast<FastJSRegExpResult>(execResult)
+ otherwise return GetProperty(execResult, 'index');
+ return fastExecResult.index;
+ }
+
+ // Helper that skips a few initial checks. and assumes...
+ // 1) receiver is a "fast permissive" RegExp
+ // 2) pattern is a string
+ transitioning builtin RegExpSearchFast(implicit context: Context)(
+ receiver: JSRegExp, string: String): JSAny {
+ return RegExpPrototypeSearchBodyFast(receiver, string);
+ }
+
+ // ES#sec-regexp.prototype-@@search
+ // RegExp.prototype [ @@search ] ( string )
+ transitioning javascript builtin RegExpPrototypeSearch(
+ js-implicit context: Context, receiver: JSAny)(string: JSAny): JSAny {
+ ThrowIfNotJSReceiver(
+ receiver, kIncompatibleMethodReceiver, 'RegExp.prototype.@@search');
+ const receiver = UnsafeCast<JSReceiver>(receiver);
+ const string: String = ToString_Inline(context, string);
+
+ if (IsFastRegExpPermissive(receiver)) {
+ // TODO(pwong): Could be optimized to remove the overhead of calling the
+ // builtin (at the cost of a larger builtin).
+ return RegExpSearchFast(UnsafeCast<JSRegExp>(receiver), string);
+ }
+ return RegExpPrototypeSearchBodySlow(receiver, string);
+ }
+}
diff --git a/deps/v8/src/builtins/regexp-source.tq b/deps/v8/src/builtins/regexp-source.tq
index c1ce1c5e9a..266c9e7472 100644
--- a/deps/v8/src/builtins/regexp-source.tq
+++ b/deps/v8/src/builtins/regexp-source.tq
@@ -6,9 +6,6 @@
namespace regexp {
- const kRegExpPrototypeSourceGetter: constexpr int31
- generates 'v8::Isolate::kRegExpPrototypeSourceGetter';
-
// ES6 21.2.5.10.
// ES #sec-get-regexp.prototype.source
transitioning javascript builtin RegExpPrototypeSourceGetter(
diff --git a/deps/v8/src/builtins/regexp-split.tq b/deps/v8/src/builtins/regexp-split.tq
new file mode 100644
index 0000000000..8a9a30a7e9
--- /dev/null
+++ b/deps/v8/src/builtins/regexp-split.tq
@@ -0,0 +1,72 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/builtins/builtins-regexp-gen.h'
+
+namespace runtime {
+ extern transitioning runtime
+ RegExpSplit(implicit context: Context)(JSReceiver, String, Object): JSAny;
+} // namespace runtime
+
+namespace regexp {
+
+ const kMaxValueSmi: constexpr int31
+ generates 'Smi::kMaxValue';
+
+ extern transitioning macro RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(
+ implicit context: Context)(JSRegExp, String, Smi): JSArray;
+
+ // Helper that skips a few initial checks.
+ transitioning builtin
+ RegExpSplit(implicit context: Context)(
+ regexp: FastJSRegExp, string: String, limit: JSAny): JSAny {
+ let sanitizedLimit: Smi;
+
+ // We need to be extra-strict and require the given limit to be either
+ // undefined or a positive smi. We can't call ToUint32(maybe_limit) since
+ // that might move us onto the slow path, resulting in ordering spec
+ // violations (see https://crbug.com/801171).
+
+ if (limit == Undefined) {
+ // TODO(jgruber): In this case, we can probably avoid generation of limit
+ // checks in Generate_RegExpPrototypeSplitBody.
+ sanitizedLimit = SmiConstant(kMaxValueSmi);
+ } else if (!TaggedIsPositiveSmi(limit)) {
+ return runtime::RegExpSplit(regexp, string, limit);
+ } else {
+ sanitizedLimit = UnsafeCast<Smi>(limit);
+ }
+
+ // Due to specific shortcuts we take on the fast path (specifically, we
+ // don't allocate a new regexp instance as specced), we need to ensure that
+ // the given regexp is non-sticky to avoid invalid results. See
+ // crbug.com/v8/6706.
+
+ if (FastFlagGetter(regexp, kSticky)) {
+ return runtime::RegExpSplit(regexp, string, sanitizedLimit);
+ }
+
+ // We're good to go on the fast path, which is inlined here.
+ return RegExpPrototypeSplitBody(regexp, string, sanitizedLimit);
+ }
+
+ // ES#sec-regexp.prototype-@@split
+ // RegExp.prototype [ @@split ] ( string, limit )
+ transitioning javascript builtin RegExpPrototypeSplit(
+ js-implicit context: Context, receiver: JSAny)(...arguments): JSAny {
+ ThrowIfNotJSReceiver(
+ receiver, kIncompatibleMethodReceiver, 'RegExp.prototype.@@split');
+ const receiver = UnsafeCast<JSReceiver>(receiver);
+ const string: String = ToString_Inline(context, arguments[0]);
+ const limit = arguments[1];
+
+ // Strict: Reads the flags property.
+ // TODO(jgruber): Handle slow flag accesses on the fast path and make this
+ // permissive.
+ const fastRegExp = Cast<FastJSRegExp>(receiver)
+ otherwise return runtime::RegExpSplit(receiver, string, limit);
+ return RegExpSplit(fastRegExp, string, limit);
+ }
+
+}
diff --git a/deps/v8/src/builtins/regexp-test.tq b/deps/v8/src/builtins/regexp-test.tq
index 938dfa51f3..f2ebb7c259 100644
--- a/deps/v8/src/builtins/regexp-test.tq
+++ b/deps/v8/src/builtins/regexp-test.tq
@@ -20,7 +20,7 @@ namespace regexp {
otherwise return False;
return True;
}
- const matchIndices = RegExpExec(context, receiver, str);
+ const matchIndices = RegExpExec(receiver, str);
return SelectBooleanConstant(matchIndices != Null);
}
diff --git a/deps/v8/src/builtins/regexp.tq b/deps/v8/src/builtins/regexp.tq
index 7352d2738f..e48e7c584d 100644
--- a/deps/v8/src/builtins/regexp.tq
+++ b/deps/v8/src/builtins/regexp.tq
@@ -22,8 +22,34 @@ namespace regexp {
BranchIfFastRegExp_Permissive(o) otherwise return true, return false;
}
- extern macro RegExpBuiltinsAssembler::RegExpExec(Context, Object, Object):
- Object;
+ const kInvalidRegExpExecResult: constexpr MessageTemplate
+ generates 'MessageTemplate::kInvalidRegExpExecResult';
+
+ // ES#sec-regexpexec Runtime Semantics: RegExpExec ( R, S )
+ @export
+ transitioning macro RegExpExec(implicit context: Context)(
+ receiver: JSReceiver, string: String): JSAny {
+ // Take the slow path of fetching the exec property, calling it, and
+ // verifying its return value.
+
+ const exec = GetProperty(receiver, 'exec');
+
+ // Is {exec} callable?
+ typeswitch (exec) {
+ case (execCallable: Callable): {
+ const result = Call(context, execCallable, receiver, string);
+ if (result != Null) {
+ ThrowIfNotJSReceiver(result, kInvalidRegExpExecResult, '');
+ }
+ return result;
+ }
+ case (Object): {
+ const regexp = Cast<JSRegExp>(receiver) otherwise ThrowTypeError(
+ kIncompatibleMethodReceiver, 'RegExp.prototype.exec', receiver);
+ return RegExpPrototypeExecSlow(regexp, string);
+ }
+ }
+ }
extern macro
RegExpBuiltinsAssembler::RegExpPrototypeExecBodyWithoutResultFast(
@@ -161,4 +187,59 @@ namespace regexp {
otherwise return SlowFlagsGetter(receiver);
return FastFlagsGetter(fastRegexp);
}
+
+ extern transitioning macro RegExpBuiltinsAssembler::SlowLoadLastIndex(
+ implicit context: Context)(JSAny): JSAny;
+ extern transitioning macro RegExpBuiltinsAssembler::SlowStoreLastIndex(
+ implicit context: Context)(JSAny, JSAny): void;
+
+ extern macro RegExpBuiltinsAssembler::FastLoadLastIndex(JSRegExp): Smi;
+ extern macro RegExpBuiltinsAssembler::FastStoreLastIndex(JSRegExp, Smi): void;
+
+ extern builtin
+ StringIndexOf(implicit context: Context)(String, String, Smi): Smi;
+
+ extern macro
+ RegExpBuiltinsAssembler::AdvanceStringIndexFast(String, Smi, bool): Smi;
+ extern macro
+ RegExpBuiltinsAssembler::AdvanceStringIndexSlow(String, Number, bool): Smi;
+
+ type UseCounterFeature extends int31
+ constexpr 'v8::Isolate::UseCounterFeature';
+ const kRegExpMatchIsTrueishOnNonJSRegExp: constexpr UseCounterFeature
+ generates 'v8::Isolate::kRegExpMatchIsTrueishOnNonJSRegExp';
+ const kRegExpMatchIsFalseishOnJSRegExp: constexpr UseCounterFeature
+ generates 'v8::Isolate::kRegExpMatchIsFalseishOnJSRegExp';
+ const kRegExpPrototypeSourceGetter: constexpr UseCounterFeature
+ generates 'v8::Isolate::kRegExpPrototypeSourceGetter';
+
+ // ES#sec-isregexp IsRegExp ( argument )
+ @export
+ transitioning macro IsRegExp(implicit context: Context)(obj: JSAny): bool {
+ const receiver = Cast<JSReceiver>(obj) otherwise return false;
+
+ // Check @match.
+ const value = GetProperty(receiver, MatchSymbolConstant());
+ if (value == Undefined) {
+ return Is<JSRegExp>(receiver);
+ }
+
+ assert(value != Undefined);
+ // The common path. Symbol.match exists, equals the RegExpPrototypeMatch
+ // function (and is thus trueish), and the receiver is a JSRegExp.
+ if (ToBoolean(value)) {
+ if (!Is<JSRegExp>(receiver)) {
+ IncrementUseCounter(
+ context, SmiConstant(kRegExpMatchIsTrueishOnNonJSRegExp));
+ }
+ return true;
+ }
+
+ assert(!ToBoolean(value));
+ if (Is<JSRegExp>(receiver)) {
+ IncrementUseCounter(
+ context, SmiConstant(kRegExpMatchIsFalseishOnJSRegExp));
+ }
+ return false;
+ }
}
diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc
index 7dca12d17e..7fc6b91ba3 100644
--- a/deps/v8/src/builtins/s390/builtins-s390.cc
+++ b/deps/v8/src/builtins/s390/builtins-s390.cc
@@ -103,7 +103,7 @@ void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
// here which will cause scratch to become negative.
__ SubP(scratch, sp, scratch);
// Check if the arguments will overflow the stack.
- __ ShiftLeftP(r0, num_args, Operand(kPointerSizeLog2));
+ __ ShiftLeftP(r0, num_args, Operand(kSystemPointerSizeLog2));
__ CmpP(scratch, r0);
__ ble(stack_overflow); // Signed comparison.
}
@@ -147,11 +147,11 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// sp[2]: number of arguments (smi-tagged)
Label loop, no_args;
__ beq(&no_args);
- __ ShiftLeftP(scratch, r2, Operand(kPointerSizeLog2));
+ __ ShiftLeftP(scratch, r2, Operand(kSystemPointerSizeLog2));
__ SubP(sp, sp, scratch);
__ LoadRR(r1, r2);
__ bind(&loop);
- __ lay(scratch, MemOperand(scratch, -kPointerSize));
+ __ lay(scratch, MemOperand(scratch, -kSystemPointerSize));
__ LoadP(r0, MemOperand(scratch, r6));
__ StoreP(r0, MemOperand(scratch, sp));
__ BranchOnCount(r1, &loop);
@@ -177,7 +177,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ SmiToPtrArrayOffset(scratch, scratch);
__ AddP(sp, sp, scratch);
- __ AddP(sp, sp, Operand(kPointerSize));
+ __ AddP(sp, sp, Operand(kSystemPointerSize));
__ Ret();
__ bind(&stack_overflow);
@@ -213,11 +213,11 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ Push(r5);
// ----------- S t a t e -------------
- // -- sp[0*kPointerSize]: new target
- // -- sp[1*kPointerSize]: padding
- // -- r3 and sp[2*kPointerSize]: constructor function
- // -- sp[3*kPointerSize]: number of arguments (tagged)
- // -- sp[4*kPointerSize]: context
+ // -- sp[0*kSystemPointerSize]: new target
+ // -- sp[1*kSystemPointerSize]: padding
+ // -- r3 and sp[2*kSystemPointerSize]: constructor function
+ // -- sp[3*kSystemPointerSize]: number of arguments (tagged)
+ // -- sp[4*kSystemPointerSize]: context
// -----------------------------------
__ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
@@ -239,11 +239,11 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2: receiver
- // -- Slot 4 / sp[0*kPointerSize]: new target
- // -- Slot 3 / sp[1*kPointerSize]: padding
- // -- Slot 2 / sp[2*kPointerSize]: constructor function
- // -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged)
- // -- Slot 0 / sp[4*kPointerSize]: context
+ // -- Slot 4 / sp[0*kSystemPointerSize]: new target
+ // -- Slot 3 / sp[1*kSystemPointerSize]: padding
+ // -- Slot 2 / sp[2*kSystemPointerSize]: constructor function
+ // -- Slot 1 / sp[3*kSystemPointerSize]: number of arguments (tagged)
+ // -- Slot 0 / sp[4*kSystemPointerSize]: context
// -----------------------------------
// Deoptimizer enters here.
masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
@@ -259,12 +259,12 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r5: new target
- // -- sp[0*kPointerSize]: implicit receiver
- // -- sp[1*kPointerSize]: implicit receiver
- // -- sp[2*kPointerSize]: padding
- // -- sp[3*kPointerSize]: constructor function
- // -- sp[4*kPointerSize]: number of arguments (tagged)
- // -- sp[5*kPointerSize]: context
+ // -- sp[0*kSystemPointerSize]: implicit receiver
+ // -- sp[1*kSystemPointerSize]: implicit receiver
+ // -- sp[2*kSystemPointerSize]: padding
+ // -- sp[3*kSystemPointerSize]: constructor function
+ // -- sp[4*kSystemPointerSize]: number of arguments (tagged)
+ // -- sp[5*kSystemPointerSize]: context
// -----------------------------------
// Restore constructor function and argument count.
@@ -295,21 +295,21 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// -- r5: new target
// -- r6: pointer to last argument
// -- cr0: condition indicating whether r2 is zero
- // -- sp[0*kPointerSize]: implicit receiver
- // -- sp[1*kPointerSize]: implicit receiver
- // -- sp[2*kPointerSize]: padding
- // -- r3 and sp[3*kPointerSize]: constructor function
- // -- sp[4*kPointerSize]: number of arguments (tagged)
- // -- sp[5*kPointerSize]: context
+ // -- sp[0*kSystemPointerSize]: implicit receiver
+ // -- sp[1*kSystemPointerSize]: implicit receiver
+ // -- sp[2*kSystemPointerSize]: padding
+ // -- r3 and sp[3*kSystemPointerSize]: constructor function
+ // -- sp[4*kSystemPointerSize]: number of arguments (tagged)
+ // -- sp[5*kSystemPointerSize]: context
// -----------------------------------
__ ltgr(r2, r2);
__ beq(&no_args);
- __ ShiftLeftP(r8, r2, Operand(kPointerSizeLog2));
+ __ ShiftLeftP(r8, r2, Operand(kSystemPointerSizeLog2));
__ SubP(sp, sp, r8);
__ LoadRR(r1, r2);
__ bind(&loop);
- __ lay(r8, MemOperand(r8, -kPointerSize));
+ __ lay(r8, MemOperand(r8, -kSystemPointerSize));
__ LoadP(r0, MemOperand(r8, r6));
__ StoreP(r0, MemOperand(r8, sp));
__ BranchOnCount(r1, &loop);
@@ -321,11 +321,11 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0: constructor result
- // -- sp[0*kPointerSize]: implicit receiver
- // -- sp[1*kPointerSize]: padding
- // -- sp[2*kPointerSize]: constructor function
- // -- sp[3*kPointerSize]: number of arguments
- // -- sp[4*kPointerSize]: context
+ // -- sp[0*kSystemPointerSize]: implicit receiver
+ // -- sp[1*kSystemPointerSize]: padding
+ // -- sp[2*kSystemPointerSize]: constructor function
+ // -- sp[3*kSystemPointerSize]: number of arguments
+ // -- sp[4*kSystemPointerSize]: context
// -----------------------------------
// Store offset of return address for deoptimizer.
@@ -376,7 +376,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ SmiToPtrArrayOffset(r3, r3);
__ AddP(sp, sp, r3);
- __ AddP(sp, sp, Operand(kPointerSize));
+ __ AddP(sp, sp, Operand(kSystemPointerSize));
__ Ret();
}
@@ -465,16 +465,16 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
r3, JSGeneratorObject::kParametersAndRegistersOffset));
{
Label loop, done_loop;
- __ ShiftLeftP(r5, r5, Operand(kPointerSizeLog2));
+ __ ShiftLeftP(r5, r5, Operand(kSystemPointerSizeLog2));
__ SubP(sp, r5);
// ip = stack offset
// r5 = parameter array offset
__ LoadImmP(ip, Operand::Zero());
- __ SubP(r5, Operand(kPointerSize));
+ __ SubP(r5, Operand(kSystemPointerSize));
__ blt(&done_loop);
- __ lgfi(r1, Operand(-kPointerSize));
+ __ lgfi(r1, Operand(-kSystemPointerSize));
__ bind(&loop);
@@ -483,7 +483,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ StoreP(r0, MemOperand(sp, ip));
// update offsets
- __ lay(ip, MemOperand(ip, kPointerSize));
+ __ lay(ip, MemOperand(ip, kSystemPointerSize));
__ BranchRelativeOnIdxHighP(r5, r1, &loop);
@@ -550,9 +550,9 @@ void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
namespace {
constexpr int kPushedStackSpace =
- (kNumCalleeSaved + 2) * kPointerSize +
- kNumCalleeSavedDoubles * kDoubleSize + 5 * kPointerSize +
- EntryFrameConstants::kCallerFPOffset - kPointerSize;
+ (kNumCalleeSaved + 2) * kSystemPointerSize +
+ kNumCalleeSavedDoubles * kDoubleSize + 5 * kSystemPointerSize +
+ EntryFrameConstants::kCallerFPOffset - kSystemPointerSize;
// Called with the native C calling convention. The corresponding function
// signature is either:
@@ -607,9 +607,9 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// Requires us to save the callee-preserved registers r6-r13
// General convention is to also save r14 (return addr) and
// sp/r15 as well in a single STM/STMG
- __ lay(sp, MemOperand(sp, -10 * kPointerSize));
+ __ lay(sp, MemOperand(sp, -10 * kSystemPointerSize));
__ StoreMultipleP(r6, sp, MemOperand(sp, 0));
- pushed_stack_space += (kNumCalleeSaved + 2) * kPointerSize;
+ pushed_stack_space += (kNumCalleeSaved + 2) * kSystemPointerSize;
// Initialize the root register.
// C calling convention. The first argument is passed in r2.
@@ -625,8 +625,8 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// SMI Marker
// kCEntryFPAddress
// Frame type
- __ lay(sp, MemOperand(sp, -5 * kPointerSize));
- pushed_stack_space += 5 * kPointerSize;
+ __ lay(sp, MemOperand(sp, -5 * kSystemPointerSize));
+ pushed_stack_space += 5 * kSystemPointerSize;
// Push a bad frame pointer to fail if it is used.
__ LoadImmP(r9, Operand(-1));
@@ -637,16 +637,17 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ Move(r6, ExternalReference::Create(
IsolateAddressId::kCEntryFPAddress, masm->isolate()));
__ LoadP(r6, MemOperand(r6));
- __ StoreMultipleP(r6, r9, MemOperand(sp, kPointerSize));
+ __ StoreMultipleP(r6, r9, MemOperand(sp, kSystemPointerSize));
Register scrach = r8;
// Set up frame pointer for the frame to be pushed.
- // Need to add kPointerSize, because sp has one extra
+ // Need to add kSystemPointerSize, because sp has one extra
// frame already for the frame type being pushed later.
- __ lay(fp, MemOperand(
- sp, -EntryFrameConstants::kCallerFPOffset + kPointerSize));
- pushed_stack_space += EntryFrameConstants::kCallerFPOffset - kPointerSize;
+ __ lay(fp, MemOperand(sp, -EntryFrameConstants::kCallerFPOffset +
+ kSystemPointerSize));
+ pushed_stack_space +=
+ EntryFrameConstants::kCallerFPOffset - kSystemPointerSize;
// restore r6
__ LoadRR(r6, r1);
@@ -736,7 +737,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// Reload callee-saved preserved regs, return address reg (r14) and sp
__ LoadMultipleP(r6, sp, MemOperand(sp, 0));
- __ la(sp, MemOperand(sp, 10 * kPointerSize));
+ __ la(sp, MemOperand(sp, 10 * kSystemPointerSize));
// saving floating point registers
#if V8_TARGET_ARCH_S390X
@@ -790,7 +791,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
// here which will cause scratch1 to become negative.
__ SubP(scratch1, sp, scratch1);
// Check if the arguments will overflow the stack.
- __ ShiftLeftP(scratch2, argc, Operand(kPointerSizeLog2));
+ __ ShiftLeftP(scratch2, argc, Operand(kSystemPointerSizeLog2));
__ CmpP(scratch1, scratch2);
__ bgt(&okay); // Signed comparison.
@@ -807,7 +808,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r4: function
// r5: receiver
// r6: argc
- // [fp + kPushedStackSpace + 20 * kPointerSize]: argv
+ // [fp + kPushedStackSpace + 20 * kSystemPointerSize]: argv
// r0,r2,r7-r9, cp may be clobbered
// Enter an internal frame.
@@ -831,7 +832,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r3: new.target
// r4: function
// r6: argc
- // [fp + kPushedStackSpace + 20 * kPointerSize]: argv
+ // [fp + kPushedStackSpace + 20 * kSystemPointerSize]: argv
// r0,r2,r5,r7-r9, cp may be clobbered
// Setup new.target, argc and function.
@@ -862,15 +863,15 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r9: scratch reg to hold index into argv
Label argLoop, argExit;
intptr_t zero = 0;
- __ ShiftLeftP(r7, r2, Operand(kPointerSizeLog2));
+ __ ShiftLeftP(r7, r2, Operand(kSystemPointerSizeLog2));
__ SubRR(sp, r7); // Buy the stack frame to fit args
__ LoadImmP(r9, Operand(zero)); // Initialize argv index
__ bind(&argLoop);
__ CmpPH(r7, Operand(zero));
__ beq(&argExit, Label::kNear);
- __ lay(r7, MemOperand(r7, -kPointerSize));
+ __ lay(r7, MemOperand(r7, -kSystemPointerSize));
__ LoadP(r8, MemOperand(r9, r6)); // read next parameter
- __ la(r9, MemOperand(r9, kPointerSize)); // r9++;
+ __ la(r9, MemOperand(r9, kSystemPointerSize)); // r9++;
__ LoadP(r0, MemOperand(r8)); // dereference handle
__ StoreP(r0, MemOperand(r7, sp)); // push parameter
__ b(&argLoop);
@@ -920,9 +921,11 @@ void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
__ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
}
-static void ReplaceClosureCodeWithOptimizedCode(
- MacroAssembler* masm, Register optimized_code, Register closure,
- Register scratch1, Register scratch2, Register scratch3) {
+static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
+ Register optimized_code,
+ Register closure,
+ Register scratch1,
+ Register scratch2) {
// Store code entry in the closure.
__ StoreP(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset),
r0);
@@ -960,100 +963,72 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
__ bind(&no_match);
}
-static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
- Register feedback_vector,
- Register scratch1, Register scratch2,
- Register scratch3) {
+static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
+ Register optimized_code_entry,
+ Register scratch) {
// ----------- S t a t e -------------
// -- r5 : new target (preserved for callee if needed, and caller)
// -- r3 : target function (preserved for callee if needed, and caller)
- // -- feedback vector (preserved for caller if needed)
// -----------------------------------
- DCHECK(!AreAliased(feedback_vector, r3, r5, scratch1, scratch2, scratch3));
-
- Label optimized_code_slot_is_weak_ref, fallthrough;
+ DCHECK(!AreAliased(r3, r5, optimized_code_entry, scratch));
Register closure = r3;
- Register optimized_code_entry = scratch1;
-
- __ LoadP(
- optimized_code_entry,
- FieldMemOperand(feedback_vector,
- FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
-
- // Check if the code entry is a Smi. If yes, we interpret it as an
- // optimisation marker. Otherwise, interpret it as a weak reference to a code
- // object.
- __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref);
- {
- // Optimized code slot is a Smi optimization marker.
-
- // Fall through if no optimization trigger.
- __ CmpSmiLiteral(optimized_code_entry,
- Smi::FromEnum(OptimizationMarker::kNone), r0);
- __ beq(&fallthrough);
-
- TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
- OptimizationMarker::kLogFirstExecution,
- Runtime::kFunctionFirstExecution);
- TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
- OptimizationMarker::kCompileOptimized,
- Runtime::kCompileOptimized_NotConcurrent);
- TailCallRuntimeIfMarkerEquals(
- masm, optimized_code_entry,
- OptimizationMarker::kCompileOptimizedConcurrent,
- Runtime::kCompileOptimized_Concurrent);
-
- {
- // Otherwise, the marker is InOptimizationQueue, so fall through hoping
- // that an interrupt will eventually update the slot with optimized code.
- if (FLAG_debug_code) {
- __ CmpSmiLiteral(
- optimized_code_entry,
- Smi::FromEnum(OptimizationMarker::kInOptimizationQueue), r0);
- __ Assert(eq, AbortReason::kExpectedOptimizationSentinel);
- }
- __ b(&fallthrough, Label::kNear);
- }
- }
+ // Check if the optimized code is marked for deopt. If it is, call the
+ // runtime to clear it.
+ Label found_deoptimized_code;
+ __ LoadP(scratch, FieldMemOperand(optimized_code_entry,
+ Code::kCodeDataContainerOffset));
+ __ LoadW(scratch, FieldMemOperand(
+ scratch, CodeDataContainer::kKindSpecificFlagsOffset));
+ __ TestBit(scratch, Code::kMarkedForDeoptimizationBit, r0);
+ __ bne(&found_deoptimized_code);
+
+ // Optimized code is good, get it into the closure and link the closure
+ // into the optimized functions list, then tail call the optimized code.
+ ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
+ scratch, r7);
+ static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
+ __ LoadCodeObjectEntry(r4, optimized_code_entry);
+ __ Jump(r4);
- {
- // Optimized code slot is a weak reference.
- __ bind(&optimized_code_slot_is_weak_ref);
-
- __ LoadWeakValue(optimized_code_entry, optimized_code_entry, &fallthrough);
-
- // Check if the optimized code is marked for deopt. If it is, call the
- // runtime to clear it.
- Label found_deoptimized_code;
- __ LoadP(scratch2, FieldMemOperand(optimized_code_entry,
- Code::kCodeDataContainerOffset));
- __ LoadW(
- scratch2,
- FieldMemOperand(scratch2, CodeDataContainer::kKindSpecificFlagsOffset));
- __ TestBit(scratch2, Code::kMarkedForDeoptimizationBit, r0);
- __ bne(&found_deoptimized_code);
-
- // Optimized code is good, get it into the closure and link the closure into
- // the optimized functions list, then tail call the optimized code.
- // The feedback vector is no longer used, so re-use it as a scratch
- // register.
- ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
- scratch2, scratch3, feedback_vector);
- static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
- __ LoadCodeObjectEntry(r4, optimized_code_entry);
- __ Jump(r4);
+ // Optimized code slot contains deoptimized code, evict it and re-enter
+ // the closure's code.
+ __ bind(&found_deoptimized_code);
+ GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
+}
- // Optimized code slot contains deoptimized code, evict it and re-enter the
- // closure's code.
- __ bind(&found_deoptimized_code);
- GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
+static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
+ Register optimization_marker) {
+ // ----------- S t a t e -------------
+ // -- r5 : new target (preserved for callee if needed, and caller)
+ // -- r3 : target function (preserved for callee if needed, and caller)
+ // -- feedback vector (preserved for caller if needed)
+ // -- optimization_marker : a Smi containing a non-zero optimization marker.
+ // -----------------------------------
+ DCHECK(!AreAliased(feedback_vector, r3, r5, optimization_marker));
+
+ // TODO(v8:8394): The logging of first execution will break if
+ // feedback vectors are not allocated. We need to find a different way of
+ // logging these events if required.
+ TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
+ OptimizationMarker::kLogFirstExecution,
+ Runtime::kFunctionFirstExecution);
+ TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
+ OptimizationMarker::kCompileOptimized,
+ Runtime::kCompileOptimized_NotConcurrent);
+ TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
+ OptimizationMarker::kCompileOptimizedConcurrent,
+ Runtime::kCompileOptimized_Concurrent);
+
+ // Otherwise, the marker is InOptimizationQueue, so fall through hoping
+ // that an interrupt will eventually update the slot with optimized code.
+ if (FLAG_debug_code) {
+ __ CmpSmiLiteral(optimization_marker,
+ Smi::FromEnum(OptimizationMarker::kInOptimizationQueue),
+ r0);
+ __ Assert(eq, AbortReason::kExpectedOptimizationSentinel);
}
-
- // Fall-through if the optimized code cell is clear and there is no
- // optimization marker.
- __ bind(&fallthrough);
}
// Advance the current bytecode offset. This simulates what all bytecode
@@ -1163,9 +1138,21 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ CmpP(r6, Operand(FEEDBACK_VECTOR_TYPE));
__ bne(&push_stack_frame);
- // Read off the optimized code slot in the feedback vector, and if there
- // is optimized code or an optimization marker, call that instead.
- MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r6, r8, r7);
+ Register optimized_code_entry = r6;
+
+ // Read off the optimized code slot in the feedback vector.
+ __ LoadP(optimized_code_entry,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
+
+ // Check if the optimized code slot is not empty.
+ Label optimized_code_slot_not_empty;
+ __ CmpSmiLiteral(optimized_code_entry,
+ Smi::FromEnum(OptimizationMarker::kNone), r0);
+ __ bne(&optimized_code_slot_not_empty);
+
+ Label not_optimized;
+ __ bind(&not_optimized);
// Increment invocation count for the function.
__ LoadW(r1, FieldMemOperand(feedback_vector,
@@ -1202,29 +1189,27 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Push(kInterpreterBytecodeArrayRegister, r4);
// Allocate the local and temporary register file on the stack.
+ Label stack_overflow;
{
// Load frame size (word) from the BytecodeArray object.
__ LoadlW(r4, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kFrameSizeOffset));
// Do a stack check to ensure we don't go over the limit.
- Label ok;
__ SubP(r8, sp, r4);
__ CmpLogicalP(r8, RealStackLimitAsMemOperand(masm));
- __ bge(&ok);
- __ CallRuntime(Runtime::kThrowStackOverflow);
- __ bind(&ok);
+ __ blt(&stack_overflow);
// If ok, push undefined as the initial value for all register file entries.
// TODO(rmcilroy): Consider doing more than one push per loop iteration.
Label loop, no_args;
- __ LoadRoot(r8, RootIndex::kUndefinedValue);
- __ ShiftRightP(r4, r4, Operand(kPointerSizeLog2));
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+ __ ShiftRightP(r4, r4, Operand(kSystemPointerSizeLog2));
__ LoadAndTestP(r4, r4);
__ beq(&no_args);
__ LoadRR(r1, r4);
__ bind(&loop);
- __ push(r8);
+ __ push(kInterpreterAccumulatorRegister);
__ SubP(r1, Operand(1));
__ bne(&loop);
__ bind(&no_args);
@@ -1238,12 +1223,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
__ CmpP(r8, Operand::Zero());
__ beq(&no_incoming_new_target_or_generator_register);
- __ ShiftLeftP(r8, r8, Operand(kPointerSizeLog2));
+ __ ShiftLeftP(r8, r8, Operand(kSystemPointerSizeLog2));
__ StoreP(r5, MemOperand(fp, r8));
__ bind(&no_incoming_new_target_or_generator_register);
- // Load accumulator with undefined.
- __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+ // The accumulator is already loaded with undefined.
+
// Load the dispatch table into a register and dispatch to the bytecode
// handler at the current bytecode offset.
Label do_dispatch;
@@ -1254,7 +1239,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ LoadlB(r5, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ ShiftLeftP(r5, r5, Operand(kPointerSizeLog2));
+ __ ShiftLeftP(r5, r5, Operand(kSystemPointerSizeLog2));
__ LoadP(kJavaScriptCallCodeStartRegister,
MemOperand(kInterpreterDispatchTableRegister, r5));
__ Call(kJavaScriptCallCodeStartRegister);
@@ -1285,8 +1270,26 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, r4);
__ Ret();
+ __ bind(&optimized_code_slot_not_empty);
+ Label maybe_has_optimized_code;
+ // Check if optimized code marker is actually a weak reference to the
+ // optimized code.
+ __ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code);
+ MaybeOptimizeCode(masm, feedback_vector, optimized_code_entry);
+ // Fall through if there's no runnable optimized code.
+ __ jmp(&not_optimized);
+
+ __ bind(&maybe_has_optimized_code);
+ // Load code entry from the weak reference, if it was cleared, resume
+ // execution of unoptimized code.
+ __ LoadWeakValue(optimized_code_entry, optimized_code_entry, &not_optimized);
+ TailCallOptimizedCodeSlot(masm, optimized_code_entry, r8);
+
__ bind(&compile_lazy);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
+
+ __ bind(&stack_overflow);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bkpt(0); // Should not return.
}
@@ -1296,11 +1299,11 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
Label loop, skip;
__ CmpP(count, Operand::Zero());
__ beq(&skip);
- __ AddP(index, index, Operand(kPointerSize)); // Bias up for LoadPU
+ __ AddP(index, index, Operand(kSystemPointerSize)); // Bias up for LoadPU
__ LoadRR(r0, count);
__ bind(&loop);
- __ LoadP(scratch, MemOperand(index, -kPointerSize));
- __ lay(index, MemOperand(index, -kPointerSize));
+ __ LoadP(scratch, MemOperand(index, -kSystemPointerSize));
+ __ lay(index, MemOperand(index, -kSystemPointerSize));
__ push(scratch);
__ SubP(r0, Operand(1));
__ bne(&loop);
@@ -1474,7 +1477,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
Register scratch = temps.Acquire();
__ LoadlB(scratch, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ ShiftLeftP(scratch, scratch, Operand(kPointerSizeLog2));
+ __ ShiftLeftP(scratch, scratch, Operand(kSystemPointerSizeLog2));
__ LoadP(kJavaScriptCallCodeStartRegister,
MemOperand(kInterpreterDispatchTableRegister, scratch));
__ Jump(kJavaScriptCallCodeStartRegister);
@@ -1540,7 +1543,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
}
for (int i = j - 1; i >= 0; --i) {
__ LoadP(r6, MemOperand(fp, StandardFrameConstants::kCallerSPOffset +
- i * kPointerSize));
+ i * kSystemPointerSize));
__ push(r6);
}
for (int i = 0; i < 3 - j; ++i) {
@@ -1589,9 +1592,10 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
// Overwrite the hole inserted by the deoptimizer with the return value from
// the LAZY deopt point.
__ StoreP(
- r2, MemOperand(
- sp, config->num_allocatable_general_registers() * kPointerSize +
- BuiltinContinuationFrameConstants::kFixedFrameSize));
+ r2,
+ MemOperand(sp, config->num_allocatable_general_registers() *
+ kSystemPointerSize +
+ BuiltinContinuationFrameConstants::kFixedFrameSize));
}
for (int i = allocatable_register_count - 1; i >= 0; --i) {
int code = config->GetAllocatableGeneralCode(i);
@@ -1647,14 +1651,8 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
}
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
- // Lookup the function in the JavaScript frame.
- __ LoadP(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(r2, MemOperand(r2, JavaScriptFrameConstants::kFunctionOffset));
-
{
FrameScope scope(masm, StackFrame::INTERNAL);
- // Pass function as argument.
- __ push(r2);
__ CallRuntime(Runtime::kCompileForOnStackReplacement);
}
@@ -1707,16 +1705,16 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
Register arg_size = r7;
Register new_sp = r5;
Register scratch = r6;
- __ ShiftLeftP(arg_size, r2, Operand(kPointerSizeLog2));
+ __ ShiftLeftP(arg_size, r2, Operand(kSystemPointerSizeLog2));
__ AddP(new_sp, sp, arg_size);
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
__ LoadRR(r4, scratch);
__ LoadP(r3, MemOperand(new_sp, 0)); // receiver
- __ CmpP(arg_size, Operand(kPointerSize));
+ __ CmpP(arg_size, Operand(kSystemPointerSize));
__ blt(&skip);
- __ LoadP(scratch, MemOperand(new_sp, 1 * -kPointerSize)); // thisArg
+ __ LoadP(scratch, MemOperand(new_sp, 1 * -kSystemPointerSize)); // thisArg
__ beq(&skip);
- __ LoadP(r4, MemOperand(new_sp, 2 * -kPointerSize)); // argArray
+ __ LoadP(r4, MemOperand(new_sp, 2 * -kSystemPointerSize)); // argArray
__ bind(&skip);
__ LoadRR(sp, new_sp);
__ StoreP(scratch, MemOperand(sp, 0));
@@ -1765,7 +1763,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// r2: actual number of arguments
// 2. Get the callable to call (passed as receiver) from the stack.
- __ ShiftLeftP(r4, r2, Operand(kPointerSizeLog2));
+ __ ShiftLeftP(r4, r2, Operand(kSystemPointerSizeLog2));
__ LoadP(r3, MemOperand(sp, r4));
// 3. Shift arguments and return address one slot down on the stack
@@ -1780,9 +1778,9 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
__ AddP(r4, sp, r4);
__ bind(&loop);
- __ LoadP(scratch, MemOperand(r4, -kPointerSize));
+ __ LoadP(scratch, MemOperand(r4, -kSystemPointerSize));
__ StoreP(scratch, MemOperand(r4));
- __ SubP(r4, Operand(kPointerSize));
+ __ SubP(r4, Operand(kSystemPointerSize));
__ CmpP(r4, sp);
__ bne(&loop);
// Adjust the actual number of arguments and remove the top element
@@ -1812,19 +1810,20 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
Register arg_size = r7;
Register new_sp = r5;
Register scratch = r6;
- __ ShiftLeftP(arg_size, r2, Operand(kPointerSizeLog2));
+ __ ShiftLeftP(arg_size, r2, Operand(kSystemPointerSizeLog2));
__ AddP(new_sp, sp, arg_size);
__ LoadRoot(r3, RootIndex::kUndefinedValue);
__ LoadRR(scratch, r3);
__ LoadRR(r4, r3);
- __ CmpP(arg_size, Operand(kPointerSize));
+ __ CmpP(arg_size, Operand(kSystemPointerSize));
__ blt(&skip);
- __ LoadP(r3, MemOperand(new_sp, 1 * -kPointerSize)); // target
+ __ LoadP(r3, MemOperand(new_sp, 1 * -kSystemPointerSize)); // target
__ beq(&skip);
- __ LoadP(scratch, MemOperand(new_sp, 2 * -kPointerSize)); // thisArgument
- __ CmpP(arg_size, Operand(2 * kPointerSize));
+ __ LoadP(scratch,
+ MemOperand(new_sp, 2 * -kSystemPointerSize)); // thisArgument
+ __ CmpP(arg_size, Operand(2 * kSystemPointerSize));
__ beq(&skip);
- __ LoadP(r4, MemOperand(new_sp, 3 * -kPointerSize)); // argumentsList
+ __ LoadP(r4, MemOperand(new_sp, 3 * -kSystemPointerSize)); // argumentsList
__ bind(&skip);
__ LoadRR(sp, new_sp);
__ StoreP(scratch, MemOperand(sp, 0));
@@ -1862,21 +1861,21 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
Label skip;
Register arg_size = r7;
Register new_sp = r6;
- __ ShiftLeftP(arg_size, r2, Operand(kPointerSizeLog2));
+ __ ShiftLeftP(arg_size, r2, Operand(kSystemPointerSizeLog2));
__ AddP(new_sp, sp, arg_size);
__ LoadRoot(r3, RootIndex::kUndefinedValue);
__ LoadRR(r4, r3);
__ LoadRR(r5, r3);
__ StoreP(r3, MemOperand(new_sp, 0)); // receiver (undefined)
- __ CmpP(arg_size, Operand(kPointerSize));
+ __ CmpP(arg_size, Operand(kSystemPointerSize));
__ blt(&skip);
- __ LoadP(r3, MemOperand(new_sp, 1 * -kPointerSize)); // target
+ __ LoadP(r3, MemOperand(new_sp, 1 * -kSystemPointerSize)); // target
__ LoadRR(r5, r3); // new.target defaults to target
__ beq(&skip);
- __ LoadP(r4, MemOperand(new_sp, 2 * -kPointerSize)); // argumentsList
- __ CmpP(arg_size, Operand(2 * kPointerSize));
+ __ LoadP(r4, MemOperand(new_sp, 2 * -kSystemPointerSize)); // argumentsList
+ __ CmpP(arg_size, Operand(2 * kSystemPointerSize));
__ beq(&skip);
- __ LoadP(r5, MemOperand(new_sp, 3 * -kPointerSize)); // new.target
+ __ LoadP(r5, MemOperand(new_sp, 3 * -kSystemPointerSize)); // new.target
__ bind(&skip);
__ LoadRR(sp, new_sp);
}
@@ -1912,15 +1911,15 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
// Function
// ArgC as SMI
// Padding <--- New SP
- __ lay(sp, MemOperand(sp, -5 * kPointerSize));
+ __ lay(sp, MemOperand(sp, -5 * kSystemPointerSize));
// Cleanse the top nibble of 31-bit pointers.
__ CleanseP(r14);
- __ StoreP(r14, MemOperand(sp, 4 * kPointerSize));
- __ StoreP(fp, MemOperand(sp, 3 * kPointerSize));
- __ StoreP(r6, MemOperand(sp, 2 * kPointerSize));
- __ StoreP(r3, MemOperand(sp, 1 * kPointerSize));
- __ StoreP(r2, MemOperand(sp, 0 * kPointerSize));
+ __ StoreP(r14, MemOperand(sp, 4 * kSystemPointerSize));
+ __ StoreP(fp, MemOperand(sp, 3 * kSystemPointerSize));
+ __ StoreP(r6, MemOperand(sp, 2 * kSystemPointerSize));
+ __ StoreP(r3, MemOperand(sp, 1 * kSystemPointerSize));
+ __ StoreP(r2, MemOperand(sp, 0 * kSystemPointerSize));
__ Push(Smi::zero()); // Padding.
__ la(fp,
MemOperand(sp, ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp));
@@ -1933,7 +1932,7 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// Get the number of arguments passed (as a smi), tear down the frame and
// then tear down the parameters.
__ LoadP(r3, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- int stack_adjustment = kPointerSize; // adjust for receiver
+ int stack_adjustment = kSystemPointerSize; // adjust for receiver
__ LeaveFrame(StackFrame::ARGUMENTS_ADAPTOR, stack_adjustment);
__ SmiToPtrArrayOffset(r3, r3);
__ lay(sp, MemOperand(sp, r3));
@@ -1981,12 +1980,13 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Label loop, no_args, skip;
__ CmpP(r6, Operand::Zero());
__ beq(&no_args);
- __ AddP(r4, r4,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
+ __ AddP(
+ r4, r4,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag - kSystemPointerSize));
__ LoadRR(r1, r6);
__ bind(&loop);
- __ LoadP(scratch, MemOperand(r4, kPointerSize));
- __ la(r4, MemOperand(r4, kPointerSize));
+ __ LoadP(scratch, MemOperand(r4, kSystemPointerSize));
+ __ la(r4, MemOperand(r4, kSystemPointerSize));
__ CompareRoot(scratch, RootIndex::kTheHoleValue);
__ bne(&skip, Label::kNear);
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
@@ -2070,11 +2070,11 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// Forward the arguments from the caller frame.
{
Label loop;
- __ AddP(r6, r6, Operand(kPointerSize));
+ __ AddP(r6, r6, Operand(kSystemPointerSize));
__ AddP(r2, r2, r7);
__ bind(&loop);
{
- __ ShiftLeftP(scratch, r7, Operand(kPointerSizeLog2));
+ __ ShiftLeftP(scratch, r7, Operand(kSystemPointerSizeLog2));
__ LoadP(scratch, MemOperand(r6, scratch));
__ push(scratch);
__ SubP(r7, r7, Operand(1));
@@ -2132,7 +2132,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ LoadGlobalProxy(r5);
} else {
Label convert_to_object, convert_receiver;
- __ ShiftLeftP(r5, r2, Operand(kPointerSizeLog2));
+ __ ShiftLeftP(r5, r2, Operand(kSystemPointerSizeLog2));
__ LoadP(r5, MemOperand(sp, r5));
__ JumpIfSmi(r5, &convert_to_object);
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
@@ -2169,7 +2169,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ LoadP(r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ bind(&convert_receiver);
}
- __ ShiftLeftP(r6, r2, Operand(kPointerSizeLog2));
+ __ ShiftLeftP(r6, r2, Operand(kSystemPointerSizeLog2));
__ StoreP(r5, MemOperand(sp, r6));
}
__ bind(&done_convert);
@@ -2226,7 +2226,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
{
Label done;
__ LoadRR(scratch, sp); // preserve previous stack pointer
- __ ShiftLeftP(r9, r6, Operand(kPointerSizeLog2));
+ __ ShiftLeftP(r9, r6, Operand(kSystemPointerSizeLog2));
__ SubP(sp, sp, r9);
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack
@@ -2256,7 +2256,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ bind(&loop);
__ LoadP(r0, MemOperand(scratch, r7));
__ StoreP(r0, MemOperand(sp, r7));
- __ AddP(r7, r7, Operand(kPointerSize));
+ __ AddP(r7, r7, Operand(kSystemPointerSize));
__ BranchOnCount(r1, &loop);
__ bind(&skip);
}
@@ -2268,10 +2268,10 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ AddP(r4, r4, r9);
__ LoadRR(r1, r6);
__ bind(&loop);
- __ LoadP(r0, MemOperand(r4, -kPointerSize));
- __ lay(r4, MemOperand(r4, -kPointerSize));
+ __ LoadP(r0, MemOperand(r4, -kSystemPointerSize));
+ __ lay(r4, MemOperand(r4, -kSystemPointerSize));
__ StoreP(r0, MemOperand(sp, r7));
- __ AddP(r7, r7, Operand(kPointerSize));
+ __ AddP(r7, r7, Operand(kSystemPointerSize));
__ BranchOnCount(r1, &loop);
__ AddP(r2, r2, r6);
}
@@ -2291,7 +2291,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// Patch the receiver to [[BoundThis]].
__ LoadP(r5, FieldMemOperand(r3, JSBoundFunction::kBoundThisOffset));
- __ ShiftLeftP(r1, r2, Operand(kPointerSizeLog2));
+ __ ShiftLeftP(r1, r2, Operand(kSystemPointerSizeLog2));
__ StoreP(r5, MemOperand(sp, r1));
// Push the [[BoundArguments]] onto the stack.
@@ -2311,7 +2311,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// -- r3 : the target to call (can be any Object).
// -----------------------------------
- Label non_callable, non_function, non_smi;
+ Label non_callable, non_smi;
__ JumpIfSmi(r3, &non_callable);
__ bind(&non_smi);
__ CompareObjectType(r3, r6, r7, JS_FUNCTION_TYPE);
@@ -2328,14 +2328,12 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// Check if target is a proxy and call CallProxy external builtin
__ CmpP(r7, Operand(JS_PROXY_TYPE));
- __ bne(&non_function);
- __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET);
+ __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq);
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
- __ bind(&non_function);
// Overwrite the original receiver the (original) target.
- __ ShiftLeftP(r7, r2, Operand(kPointerSizeLog2));
+ __ ShiftLeftP(r7, r2, Operand(kSystemPointerSizeLog2));
__ StoreP(r3, MemOperand(sp, r7));
// Let the "call_as_function_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, r3);
@@ -2449,7 +2447,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ bind(&non_proxy);
{
// Overwrite the original receiver with the (original) target.
- __ ShiftLeftP(r7, r2, Operand(kPointerSizeLog2));
+ __ ShiftLeftP(r7, r2, Operand(kSystemPointerSizeLog2));
__ StoreP(r3, MemOperand(sp, r7));
// Let the "call_as_constructor_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, r3);
@@ -2504,8 +2502,8 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ SmiToPtrArrayOffset(r2, r2);
__ AddP(r2, fp);
// adjust for return address and receiver
- __ AddP(r2, r2, Operand(2 * kPointerSize));
- __ ShiftLeftP(r6, r4, Operand(kPointerSizeLog2));
+ __ AddP(r2, r2, Operand(2 * kSystemPointerSize));
+ __ ShiftLeftP(r6, r4, Operand(kSystemPointerSizeLog2));
__ SubP(r6, r2, r6);
// Copy the arguments (including the receiver) to the new stack frame.
@@ -2520,7 +2518,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ LoadP(r0, MemOperand(r2, 0));
__ push(r0);
__ CmpP(r2, r6); // Compare before moving to next argument.
- __ lay(r2, MemOperand(r2, -kPointerSize));
+ __ lay(r2, MemOperand(r2, -kSystemPointerSize));
__ bne(&copy);
__ b(&invoke);
@@ -2548,22 +2546,22 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label copy;
__ bind(&copy);
// Adjust load for return address and receiver.
- __ LoadP(r0, MemOperand(r2, 2 * kPointerSize));
+ __ LoadP(r0, MemOperand(r2, 2 * kSystemPointerSize));
__ push(r0);
__ CmpP(r2, fp); // Compare before moving to next argument.
- __ lay(r2, MemOperand(r2, -kPointerSize));
+ __ lay(r2, MemOperand(r2, -kSystemPointerSize));
__ bne(&copy);
// Fill the remaining expected arguments with undefined.
// r3: function
// r4: expected number of argumentus
__ LoadRoot(r0, RootIndex::kUndefinedValue);
- __ ShiftLeftP(r6, r4, Operand(kPointerSizeLog2));
+ __ ShiftLeftP(r6, r4, Operand(kSystemPointerSizeLog2));
__ SubP(r6, fp, r6);
// Adjust for frame.
__ SubP(r6, r6,
Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp +
- kPointerSize));
+ kSystemPointerSize));
Label fill;
__ bind(&fill);
@@ -2608,7 +2606,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Remove superfluous parameters from the stack.
__ SubP(r6, r2, r4);
__ lgr(r2, r4);
- __ ShiftLeftP(r6, r6, Operand(kPointerSizeLog2));
+ __ ShiftLeftP(r6, r6, Operand(kSystemPointerSizeLog2));
__ lay(sp, MemOperand(sp, r6));
__ b(&dont_adapt_arguments);
}
@@ -2708,8 +2706,8 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ LoadRR(r3, r4);
} else {
// Compute the argv pointer.
- __ ShiftLeftP(r3, r2, Operand(kPointerSizeLog2));
- __ lay(r3, MemOperand(r3, sp, -kPointerSize));
+ __ ShiftLeftP(r3, r2, Operand(kSystemPointerSizeLog2));
+ __ lay(r3, MemOperand(r3, sp, -kSystemPointerSize));
}
// Enter the exit frame that transitions from JavaScript to C++.
@@ -2751,7 +2749,8 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// by one register each.
__ LoadRR(r4, r3);
__ LoadRR(r3, r2);
- __ la(r2, MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kPointerSize));
+ __ la(r2,
+ MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kSystemPointerSize));
isolate_reg = r5;
// Clang doesn't preserve r2 (result buffer)
// write to r8 (preserved) before entry
@@ -2765,7 +2764,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// If return value is on the stack, pop it to registers.
if (needs_return_buffer) {
__ LoadRR(r2, r8);
- __ LoadP(r3, MemOperand(r2, kPointerSize));
+ __ LoadP(r3, MemOperand(r2, kSystemPointerSize));
__ LoadP(r2, MemOperand(r2));
}
@@ -2870,7 +2869,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
__ Push(result_reg, scratch);
// Account for saved regs.
- int argument_offset = 2 * kPointerSize;
+ int argument_offset = 2 * kSystemPointerSize;
// Load double input.
__ LoadDouble(double_scratch, MemOperand(sp, argument_offset));
@@ -2884,7 +2883,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
__ Push(scratch_high, scratch_low);
// Account for saved regs.
- argument_offset += 2 * kPointerSize;
+ argument_offset += 2 * kSystemPointerSize;
__ LoadlW(scratch_high,
MemOperand(sp, argument_offset + Register::kExponentOffset));
@@ -2958,7 +2957,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
__ bind(&done);
__ Pop(scratch_high, scratch_low);
- argument_offset -= 2 * kPointerSize;
+ argument_offset -= 2 * kSystemPointerSize;
__ bind(&fastpath_done);
__ StoreP(result_reg, MemOperand(sp, argument_offset));
@@ -3159,33 +3158,33 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// Set up FunctionCallbackInfo's implicit_args on the stack as follows:
//
// Target state:
- // sp[0 * kPointerSize]: kHolder
- // sp[1 * kPointerSize]: kIsolate
- // sp[2 * kPointerSize]: undefined (kReturnValueDefaultValue)
- // sp[3 * kPointerSize]: undefined (kReturnValue)
- // sp[4 * kPointerSize]: kData
- // sp[5 * kPointerSize]: undefined (kNewTarget)
+ // sp[0 * kSystemPointerSize]: kHolder
+ // sp[1 * kSystemPointerSize]: kIsolate
+ // sp[2 * kSystemPointerSize]: undefined (kReturnValueDefaultValue)
+ // sp[3 * kSystemPointerSize]: undefined (kReturnValue)
+ // sp[4 * kSystemPointerSize]: kData
+ // sp[5 * kSystemPointerSize]: undefined (kNewTarget)
// Reserve space on the stack.
- __ lay(sp, MemOperand(sp, -(FCA::kArgsLength * kPointerSize)));
+ __ lay(sp, MemOperand(sp, -(FCA::kArgsLength * kSystemPointerSize)));
// kHolder.
- __ StoreP(holder, MemOperand(sp, 0 * kPointerSize));
+ __ StoreP(holder, MemOperand(sp, 0 * kSystemPointerSize));
// kIsolate.
__ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
- __ StoreP(scratch, MemOperand(sp, 1 * kPointerSize));
+ __ StoreP(scratch, MemOperand(sp, 1 * kSystemPointerSize));
// kReturnValueDefaultValue and kReturnValue.
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
- __ StoreP(scratch, MemOperand(sp, 2 * kPointerSize));
- __ StoreP(scratch, MemOperand(sp, 3 * kPointerSize));
+ __ StoreP(scratch, MemOperand(sp, 2 * kSystemPointerSize));
+ __ StoreP(scratch, MemOperand(sp, 3 * kSystemPointerSize));
// kData.
- __ StoreP(call_data, MemOperand(sp, 4 * kPointerSize));
+ __ StoreP(call_data, MemOperand(sp, 4 * kSystemPointerSize));
// kNewTarget.
- __ StoreP(scratch, MemOperand(sp, 5 * kPointerSize));
+ __ StoreP(scratch, MemOperand(sp, 5 * kSystemPointerSize));
// Keep a pointer to kHolder (= implicit_args) in a scratch register.
// We use it below to set up the FunctionCallbackInfo object.
@@ -3207,33 +3206,34 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above).
// Arguments are after the return address (pushed by EnterExitFrame()).
- __ StoreP(scratch,
- MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kPointerSize));
+ __ StoreP(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 1) *
+ kSystemPointerSize));
// FunctionCallbackInfo::values_ (points at the first varargs argument passed
// on the stack).
- __ AddP(scratch, scratch, Operand((FCA::kArgsLength - 1) * kPointerSize));
- __ ShiftLeftP(r1, argc, Operand(kPointerSizeLog2));
+ __ AddP(scratch, scratch,
+ Operand((FCA::kArgsLength - 1) * kSystemPointerSize));
+ __ ShiftLeftP(r1, argc, Operand(kSystemPointerSizeLog2));
__ AddP(scratch, scratch, r1);
- __ StoreP(scratch,
- MemOperand(sp, (kStackFrameExtraParamSlot + 2) * kPointerSize));
+ __ StoreP(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 2) *
+ kSystemPointerSize));
// FunctionCallbackInfo::length_.
- __ StoreW(argc,
- MemOperand(sp, (kStackFrameExtraParamSlot + 3) * kPointerSize));
+ __ StoreW(argc, MemOperand(sp, (kStackFrameExtraParamSlot + 3) *
+ kSystemPointerSize));
// We also store the number of bytes to drop from the stack after returning
// from the API function here.
__ mov(scratch,
- Operand((FCA::kArgsLength + 1 /* receiver */) * kPointerSize));
- __ ShiftLeftP(r1, argc, Operand(kPointerSizeLog2));
+ Operand((FCA::kArgsLength + 1 /* receiver */) * kSystemPointerSize));
+ __ ShiftLeftP(r1, argc, Operand(kSystemPointerSizeLog2));
__ AddP(scratch, r1);
- __ StoreP(scratch,
- MemOperand(sp, (kStackFrameExtraParamSlot + 4) * kPointerSize));
+ __ StoreP(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 4) *
+ kSystemPointerSize));
// v8::InvocationCallback's argument.
__ lay(r2,
- MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kPointerSize));
+ MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kSystemPointerSize));
ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
@@ -3241,11 +3241,11 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// TODO(jgruber): Document what these arguments are.
static constexpr int kStackSlotsAboveFCA = 2;
MemOperand return_value_operand(
- fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kPointerSize);
+ fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kSystemPointerSize);
static constexpr int kUseStackSpaceOperand = 0;
MemOperand stack_space_operand(
- sp, (kStackFrameExtraParamSlot + 4) * kPointerSize);
+ sp, (kStackFrameExtraParamSlot + 4) * kSystemPointerSize);
AllowExternalCallThatCantCauseGC scope(masm);
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
@@ -3293,7 +3293,7 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
// Load address of v8::PropertyAccessorInfo::args_ array and name handle.
__ LoadRR(r2, sp); // r2 = Handle<Name>
- __ AddP(r3, r2, Operand(1 * kPointerSize)); // r3 = v8::PCI::args_
+ __ AddP(r3, r2, Operand(1 * kSystemPointerSize)); // r3 = v8::PCI::args_
// If ABI passes Handles (pointer-sized struct) in a register:
//
@@ -3321,14 +3321,14 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
if (!ABI_PASSES_HANDLES_IN_REGS) {
// pass 1st arg by reference
- __ StoreP(r2, MemOperand(sp, arg0Slot * kPointerSize));
- __ AddP(r2, sp, Operand(arg0Slot * kPointerSize));
+ __ StoreP(r2, MemOperand(sp, arg0Slot * kSystemPointerSize));
+ __ AddP(r2, sp, Operand(arg0Slot * kSystemPointerSize));
}
// Create v8::PropertyCallbackInfo object on the stack and initialize
// it's args_ field.
- __ StoreP(r3, MemOperand(sp, accessorInfoSlot * kPointerSize));
- __ AddP(r3, sp, Operand(accessorInfoSlot * kPointerSize));
+ __ StoreP(r3, MemOperand(sp, accessorInfoSlot * kSystemPointerSize));
+ __ AddP(r3, sp, Operand(accessorInfoSlot * kSystemPointerSize));
// r3 = v8::PropertyCallbackInfo&
ExternalReference thunk_ref =
@@ -3340,7 +3340,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
// +3 is to skip prolog, return address and name handle.
MemOperand return_value_operand(
- fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
+ fp,
+ (PropertyCallbackArguments::kReturnValueOffset + 3) * kSystemPointerSize);
MemOperand* const kUseStackSpaceConstant = nullptr;
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
kStackUnwindSpace, kUseStackSpaceConstant,
diff --git a/deps/v8/src/builtins/setup-builtins-internal.cc b/deps/v8/src/builtins/setup-builtins-internal.cc
index e93d7008e4..e3f39a0906 100644
--- a/deps/v8/src/builtins/setup-builtins-internal.cc
+++ b/deps/v8/src/builtins/setup-builtins-internal.cc
@@ -264,22 +264,17 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
namespace {
Code GenerateBytecodeHandler(Isolate* isolate, int builtin_index,
- const char* name,
interpreter::OperandScale operand_scale,
interpreter::Bytecode bytecode) {
DCHECK(interpreter::Bytecodes::BytecodeHasHandler(bytecode, operand_scale));
Handle<Code> code = interpreter::GenerateBytecodeHandler(
- isolate, bytecode, operand_scale, builtin_index,
- BuiltinAssemblerOptions(isolate, builtin_index));
+ isolate, Builtins::name(builtin_index), bytecode, operand_scale,
+ builtin_index, BuiltinAssemblerOptions(isolate, builtin_index));
return *code;
}
} // namespace
-#ifdef _MSC_VER
-#pragma optimize( "", off )
-#endif
-
// static
void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) {
Builtins* builtins = isolate->builtins();
@@ -318,9 +313,8 @@ void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) {
CallDescriptors::InterfaceDescriptor, #Name); \
AddBuiltin(builtins, index++, code);
-#define BUILD_BCH(Name, OperandScale, Bytecode) \
- code = GenerateBytecodeHandler(isolate, index, Builtins::name(index), \
- OperandScale, Bytecode); \
+#define BUILD_BCH(Name, OperandScale, Bytecode) \
+ code = GenerateBytecodeHandler(isolate, index, OperandScale, Bytecode); \
AddBuiltin(builtins, index++, code);
#define BUILD_ASM(Name, InterfaceDescriptor) \
@@ -357,10 +351,5 @@ void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) {
builtins->MarkInitialized();
}
-#ifdef _MSC_VER
-#pragma optimize( "", on )
-#endif
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/string-endswith.tq b/deps/v8/src/builtins/string-endswith.tq
index c3cc7d949b..9590b853e7 100644
--- a/deps/v8/src/builtins/string-endswith.tq
+++ b/deps/v8/src/builtins/string-endswith.tq
@@ -41,7 +41,7 @@ namespace string {
// 3. Let isRegExp be ? IsRegExp(searchString).
// 4. If isRegExp is true, throw a TypeError exception.
- if (IsRegExp(searchString)) {
+ if (regexp::IsRegExp(searchString)) {
ThrowTypeError(kFirstArgumentNotRegExp, kBuiltinName);
}
diff --git a/deps/v8/src/builtins/string-iterator.tq b/deps/v8/src/builtins/string-iterator.tq
index d36a44fa97..b0bbb8d4a3 100644
--- a/deps/v8/src/builtins/string-iterator.tq
+++ b/deps/v8/src/builtins/string-iterator.tq
@@ -11,7 +11,7 @@ namespace string_iterator {
properties_or_hash: kEmptyFixedArray,
elements: kEmptyFixedArray,
string: string,
- next_index: nextIndex
+ index: nextIndex
};
}
@@ -31,7 +31,7 @@ namespace string_iterator {
kIncompatibleMethodReceiver, 'String Iterator.prototype.next',
receiver);
const string = iterator.string;
- const position: intptr = SmiUntag(iterator.next_index);
+ const position: intptr = SmiUntag(iterator.index);
const length: intptr = string.length_intptr;
if (position >= length) {
return AllocateJSIteratorResult(Undefined, True);
@@ -40,7 +40,7 @@ namespace string_iterator {
const encoding = UTF16;
const ch = string::LoadSurrogatePairAt(string, length, position, encoding);
const value: String = string::StringFromSingleUTF16EncodedCodePoint(ch);
- iterator.next_index = SmiTag(position + value.length_intptr);
+ iterator.index = SmiTag(position + value.length_intptr);
return AllocateJSIteratorResult(value, False);
}
}
diff --git a/deps/v8/src/builtins/string-slice.tq b/deps/v8/src/builtins/string-slice.tq
index 661cc264c5..b5ddbdb2cc 100644
--- a/deps/v8/src/builtins/string-slice.tq
+++ b/deps/v8/src/builtins/string-slice.tq
@@ -4,7 +4,8 @@
namespace string_slice {
- extern macro SubString(String, intptr, intptr): String;
+ extern macro StringBuiltinsAssembler::SubString(String, intptr, intptr):
+ String;
// ES6 #sec-string.prototype.slice ( start, end )
// https://tc39.github.io/ecma262/#sec-string.prototype.slice
diff --git a/deps/v8/src/builtins/string-startswith.tq b/deps/v8/src/builtins/string-startswith.tq
index 7fa7ec6d5c..3238f52b86 100644
--- a/deps/v8/src/builtins/string-startswith.tq
+++ b/deps/v8/src/builtins/string-startswith.tq
@@ -5,9 +5,6 @@
#include 'src/builtins/builtins-regexp-gen.h'
namespace string {
- extern macro RegExpBuiltinsAssembler::IsRegExp(implicit context:
- Context)(Object): bool;
-
// https://tc39.github.io/ecma262/#sec-string.prototype.startswith
transitioning javascript builtin StringPrototypeStartsWith(
js-implicit context: Context, receiver: JSAny)(...arguments): Boolean {
@@ -23,7 +20,7 @@ namespace string {
// 3. Let isRegExp be ? IsRegExp(searchString).
// 4. If isRegExp is true, throw a TypeError exception.
- if (IsRegExp(searchString)) {
+ if (regexp::IsRegExp(searchString)) {
ThrowTypeError(kFirstArgumentNotRegExp, kBuiltinName);
}
diff --git a/deps/v8/src/builtins/string-substring.tq b/deps/v8/src/builtins/string-substring.tq
index c97b294a34..813dc35ab2 100644
--- a/deps/v8/src/builtins/string-substring.tq
+++ b/deps/v8/src/builtins/string-substring.tq
@@ -4,7 +4,8 @@
namespace string_substring {
- extern macro SubString(String, intptr, intptr): String;
+ extern macro StringBuiltinsAssembler::SubString(String, intptr, intptr):
+ String;
transitioning macro ToSmiBetweenZeroAnd(implicit context: Context)(
value: JSAny, limit: Smi): Smi {
diff --git a/deps/v8/src/builtins/string.tq b/deps/v8/src/builtins/string.tq
index 7f007680e9..4f2c342fd5 100644
--- a/deps/v8/src/builtins/string.tq
+++ b/deps/v8/src/builtins/string.tq
@@ -21,7 +21,8 @@ namespace string {
extern macro StringBuiltinsAssembler::LoadSurrogatePairAt(
String, intptr, intptr, constexpr UnicodeEncoding): int32;
- extern macro StringFromSingleUTF16EncodedCodePoint(int32): String;
+ extern macro StringBuiltinsAssembler::StringFromSingleUTF16EncodedCodePoint(
+ int32): String;
// This function assumes StringPrimitiveWithNoCustomIteration is true.
transitioning builtin StringToList(implicit context: Context)(string: String):
@@ -187,4 +188,12 @@ namespace string {
left: String, right: JSAny): String {
return left + ToStringImpl(context, ToPrimitiveDefault(right));
}
+
+ builtin StringCharAt(implicit context: Context)(
+ receiver: String, position: intptr): String {
+ // Load the character code at the {position} from the {receiver}.
+ const code: int32 = StringCharCodeAt(receiver, position);
+ // And return the single character string with only that {code}
+ return StringFromSingleCharCode(code);
+ }
}
diff --git a/deps/v8/src/builtins/typed-array-createtypedarray.tq b/deps/v8/src/builtins/typed-array-createtypedarray.tq
index a476739861..a6bd445e34 100644
--- a/deps/v8/src/builtins/typed-array-createtypedarray.tq
+++ b/deps/v8/src/builtins/typed-array-createtypedarray.tq
@@ -27,21 +27,16 @@ namespace typed_array_createtypedarray {
isOnHeap: constexpr bool, map: Map, buffer: JSArrayBuffer,
byteOffset: uintptr, byteLength: uintptr, length: uintptr): JSTypedArray {
let elements: ByteArray;
- let externalPointer: RawPtr;
- let basePointer: ByteArray | Smi;
if constexpr (isOnHeap) {
elements = AllocateByteArray(byteLength);
- basePointer = elements;
- externalPointer = PointerConstant(kExternalPointerForOnHeapArray);
} else {
- basePointer = Convert<Smi>(0);
+ elements = kEmptyByteArray;
// The max byteOffset is 8 * MaxSmi on the particular platform. 32 bit
// platforms are self-limiting, because we can't allocate an array bigger
// than our 32-bit arithmetic range anyway. 64 bit platforms could
// theoretically have an offset up to 2^35 - 1.
- const backingStore: RawPtr = buffer.backing_store;
- externalPointer = backingStore + Convert<intptr>(byteOffset);
+ const backingStore: uintptr = Convert<uintptr>(buffer.backing_store);
// Assert no overflow has occurred. Only assert if the mock array buffer
// allocator is NOT used. When the mock array buffer is used, impossibly
@@ -49,9 +44,7 @@ namespace typed_array_createtypedarray {
// and this assertion to fail.
assert(
IsMockArrayBufferAllocatorFlag() ||
- Convert<uintptr>(externalPointer) >= Convert<uintptr>(backingStore));
-
- elements = kEmptyByteArray;
+ (backingStore + byteOffset) >= backingStore);
}
// We can't just build the new object with "new JSTypedArray" here because
@@ -64,8 +57,16 @@ namespace typed_array_createtypedarray {
typedArray.byte_offset = byteOffset;
typedArray.byte_length = byteLength;
typedArray.length = length;
- typedArray.external_pointer = externalPointer;
- typedArray.base_pointer = basePointer;
+ if constexpr (isOnHeap) {
+ typed_array::SetJSTypedArrayOnHeapDataPtr(
+ typedArray, elements, byteOffset);
+ } else {
+ typed_array::SetJSTypedArrayOffHeapDataPtr(
+ typedArray, buffer.backing_store, byteOffset);
+ assert(
+ typedArray.data_ptr ==
+ (buffer.backing_store + Convert<intptr>(byteOffset)));
+ }
SetupTypedArrayEmbedderFields(typedArray);
return typedArray;
}
diff --git a/deps/v8/src/builtins/typed-array-slice.tq b/deps/v8/src/builtins/typed-array-slice.tq
index dc13865590..d17ff4a375 100644
--- a/deps/v8/src/builtins/typed-array-slice.tq
+++ b/deps/v8/src/builtins/typed-array-slice.tq
@@ -23,7 +23,7 @@ namespace typed_array_slice {
// of src and result array are the same and they are not sharing the
// same buffer, use memmove.
if (srcKind != destInfo.kind) goto IfSlow;
- if (BitcastTaggedToWord(dest.buffer) == BitcastTaggedToWord(src.buffer)) {
+ if (dest.buffer == src.buffer) {
goto IfSlow;
}
diff --git a/deps/v8/src/builtins/typed-array.tq b/deps/v8/src/builtins/typed-array.tq
index 59100736a5..1c901abf75 100644
--- a/deps/v8/src/builtins/typed-array.tq
+++ b/deps/v8/src/builtins/typed-array.tq
@@ -71,12 +71,17 @@ namespace typed_array {
ElementsKind): bool;
extern macro LoadFixedTypedArrayElementAsTagged(
RawPtr, Smi, constexpr ElementsKind): Numeric;
- extern macro StoreJSTypedArrayElementFromTagged(
+ extern macro TypedArrayBuiltinsAssembler::StoreJSTypedArrayElementFromTagged(
Context, JSTypedArray, Smi, JSAny, constexpr ElementsKind);
type LoadFn = builtin(Context, JSTypedArray, Smi) => JSAny;
type StoreFn = builtin(Context, JSTypedArray, Smi, JSAny) => JSAny;
+ extern macro TypedArrayBuiltinsAssembler::SetJSTypedArrayOnHeapDataPtr(
+ JSTypedArray, ByteArray, uintptr): void;
+ extern macro TypedArrayBuiltinsAssembler::SetJSTypedArrayOffHeapDataPtr(
+ JSTypedArray, RawPtr, uintptr): void;
+
// AttachedJSTypedArray guards that the array's buffer is not detached.
transient type AttachedJSTypedArray extends JSTypedArray;
@@ -198,7 +203,7 @@ namespace typed_array {
builtin StoreFixedElement<T: type>(
context: Context, typedArray: JSTypedArray, index: Smi,
value: JSAny): JSAny {
- StoreJSTypedArrayElementFromTagged(
+ typed_array::StoreJSTypedArrayElementFromTagged(
context, typedArray, index, value, KindForArrayType<T>());
return Undefined;
}
diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc
index b6b407fb33..9679237ff8 100644
--- a/deps/v8/src/builtins/x64/builtins-x64.cc
+++ b/deps/v8/src/builtins/x64/builtins-x64.cc
@@ -5,8 +5,9 @@
#if V8_TARGET_ARCH_X64
#include "src/api/api-arguments.h"
-#include "src/base/adapters.h"
+#include "src/base/iterator.h"
#include "src/codegen/code-factory.h"
+#include "src/codegen/x64/assembler-x64.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/frame-constants.h"
#include "src/execution/frames.h"
@@ -401,13 +402,13 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ pushq(r13);
__ pushq(r14);
__ pushq(r15);
-#ifdef _WIN64
+#ifdef V8_TARGET_OS_WIN
__ pushq(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
__ pushq(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
#endif
__ pushq(rbx);
-#ifdef _WIN64
+#ifdef V8_TARGET_OS_WIN
// On Win64 XMM6-XMM15 are callee-save.
__ AllocateStackSpace(EntryFrameConstants::kXMMRegistersBlockSize);
__ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 0), xmm6);
@@ -507,7 +508,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
}
// Restore callee-saved registers (X64 conventions).
-#ifdef _WIN64
+#ifdef V8_TARGET_OS_WIN
// On Win64 XMM6-XMM15 are callee-save
__ movdqu(xmm6, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 0));
__ movdqu(xmm7, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 1));
@@ -523,7 +524,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
#endif
__ popq(rbx);
-#ifdef _WIN64
+#ifdef V8_TARGET_OS_WIN
// Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI.
__ popq(rsi);
__ popq(rdi);
@@ -611,17 +612,17 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ Push(rdi);
__ Push(arg_reg_4);
-#ifdef _WIN64
+#ifdef V8_TARGET_OS_WIN
// Load the previous frame pointer to access C arguments on stack
__ movq(kScratchRegister, Operand(rbp, 0));
// Load the number of arguments and setup pointer to the arguments.
__ movq(rax, Operand(kScratchRegister, EntryFrameConstants::kArgcOffset));
__ movq(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset));
-#else // _WIN64
+#else // V8_TARGET_OS_WIN
// Load the number of arguments and setup pointer to the arguments.
__ movq(rax, r8);
__ movq(rbx, r9);
-#endif // _WIN64
+#endif // V8_TARGET_OS_WIN
// Current stack contents:
// [rsp + 2 * kSystemPointerSize ... ] : Internal frame
@@ -851,10 +852,11 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// TODO(juliana): if we remove the code below then we don't need all
// the parameters.
-static void ReplaceClosureCodeWithOptimizedCode(
- MacroAssembler* masm, Register optimized_code, Register closure,
- Register scratch1, Register scratch2, Register scratch3) {
-
+static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
+ Register optimized_code,
+ Register closure,
+ Register scratch1,
+ Register scratch2) {
// Store the optimized code in the closure.
__ StoreTaggedField(FieldOperand(closure, JSFunction::kCodeOffset),
optimized_code);
@@ -895,104 +897,71 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
__ bind(&no_match);
}
-static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
- Register feedback_vector,
- Register scratch1, Register scratch2,
- Register scratch3) {
+static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
+ Register optimization_marker) {
// ----------- S t a t e -------------
// -- rdx : new target (preserved for callee if needed, and caller)
// -- rdi : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
+ // -- optimization_marker : a Smi containing a non-zero optimization marker.
// -----------------------------------
- DCHECK(!AreAliased(feedback_vector, rdx, rdi, scratch1, scratch2, scratch3));
-
- Label optimized_code_slot_is_weak_ref, fallthrough;
-
- Register closure = rdi;
- Register optimized_code_entry = scratch1;
- Register decompr_scratch = COMPRESS_POINTERS_BOOL ? scratch2 : no_reg;
-
- __ LoadAnyTaggedField(
- optimized_code_entry,
- FieldOperand(feedback_vector,
- FeedbackVector::kOptimizedCodeWeakOrSmiOffset),
- decompr_scratch);
-
- // Check if the code entry is a Smi. If yes, we interpret it as an
- // optimisation marker. Otherwise, interpret it as a weak reference to a code
- // object.
- __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref);
-
- {
- // Optimized code slot is a Smi optimization marker.
-
- // Fall through if no optimization trigger.
- __ SmiCompare(optimized_code_entry,
- Smi::FromEnum(OptimizationMarker::kNone));
- __ j(equal, &fallthrough);
-
- // TODO(v8:8394): The logging of first execution will break if
- // feedback vectors are not allocated. We need to find a different way of
- // logging these events if required.
- TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
- OptimizationMarker::kLogFirstExecution,
- Runtime::kFunctionFirstExecution);
- TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
- OptimizationMarker::kCompileOptimized,
- Runtime::kCompileOptimized_NotConcurrent);
- TailCallRuntimeIfMarkerEquals(
- masm, optimized_code_entry,
- OptimizationMarker::kCompileOptimizedConcurrent,
- Runtime::kCompileOptimized_Concurrent);
- {
- // Otherwise, the marker is InOptimizationQueue, so fall through hoping
- // that an interrupt will eventually update the slot with optimized code.
- if (FLAG_debug_code) {
- __ SmiCompare(optimized_code_entry,
- Smi::FromEnum(OptimizationMarker::kInOptimizationQueue));
- __ Assert(equal, AbortReason::kExpectedOptimizationSentinel);
- }
- __ jmp(&fallthrough);
- }
+ DCHECK(!AreAliased(feedback_vector, rdx, rdi, optimization_marker));
+
+ // TODO(v8:8394): The logging of first execution will break if
+ // feedback vectors are not allocated. We need to find a different way of
+ // logging these events if required.
+ TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
+ OptimizationMarker::kLogFirstExecution,
+ Runtime::kFunctionFirstExecution);
+ TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
+ OptimizationMarker::kCompileOptimized,
+ Runtime::kCompileOptimized_NotConcurrent);
+ TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
+ OptimizationMarker::kCompileOptimizedConcurrent,
+ Runtime::kCompileOptimized_Concurrent);
+
+ // Otherwise, the marker is InOptimizationQueue, so fall through hoping
+ // that an interrupt will eventually update the slot with optimized code.
+ if (FLAG_debug_code) {
+ __ SmiCompare(optimization_marker,
+ Smi::FromEnum(OptimizationMarker::kInOptimizationQueue));
+ __ Assert(equal, AbortReason::kExpectedOptimizationSentinel);
}
+}
- {
- // Optimized code slot is a weak reference.
- __ bind(&optimized_code_slot_is_weak_ref);
-
- __ LoadWeakValue(optimized_code_entry, &fallthrough);
+static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
+ Register optimized_code_entry,
+ Register scratch1, Register scratch2) {
+ // ----------- S t a t e -------------
+ // -- rdx : new target (preserved for callee if needed, and caller)
+ // -- rdi : target function (preserved for callee if needed, and caller)
+ // -----------------------------------
- // Check if the optimized code is marked for deopt. If it is, call the
- // runtime to clear it.
- Label found_deoptimized_code;
- __ LoadTaggedPointerField(
- scratch2,
- FieldOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
- __ testl(
- FieldOperand(scratch2, CodeDataContainer::kKindSpecificFlagsOffset),
- Immediate(1 << Code::kMarkedForDeoptimizationBit));
- __ j(not_zero, &found_deoptimized_code);
-
- // Optimized code is good, get it into the closure and link the closure into
- // the optimized functions list, then tail call the optimized code.
- // The feedback vector is no longer used, so re-use it as a scratch
- // register.
- ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
- scratch2, scratch3, feedback_vector);
- static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
- __ Move(rcx, optimized_code_entry);
- __ JumpCodeObject(rcx);
+ Register closure = rdi;
- // Optimized code slot contains deoptimized code, evict it and re-enter the
- // closure's code.
- __ bind(&found_deoptimized_code);
- GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
- }
+ // Check if the optimized code is marked for deopt. If it is, call the
+ // runtime to clear it.
+ Label found_deoptimized_code;
+ __ LoadTaggedPointerField(
+ scratch1,
+ FieldOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
+ __ testl(FieldOperand(scratch1, CodeDataContainer::kKindSpecificFlagsOffset),
+ Immediate(1 << Code::kMarkedForDeoptimizationBit));
+ __ j(not_zero, &found_deoptimized_code);
+
+ // Optimized code is good, get it into the closure and link the closure into
+ // the optimized functions list, then tail call the optimized code.
+ ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
+ scratch1, scratch2);
+ static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
+ __ Move(rcx, optimized_code_entry);
+ __ JumpCodeObject(rcx);
- // Fall-through if the optimized code cell is clear and there is no
- // optimization marker.
- __ bind(&fallthrough);
+ // Optimized code slot contains deoptimized code, evict it and re-enter the
+ // closure's code.
+ __ bind(&found_deoptimized_code);
+ GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
// Advance the current bytecode offset. This simulates what all bytecode
@@ -1019,20 +988,21 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
static_cast<int>(interpreter::Bytecode::kDebugBreakExtraWide));
__ cmpb(bytecode, Immediate(0x3));
__ j(above, &process_bytecode, Label::kNear);
+ // The code to load the next bytecode is common to both wide and extra wide.
+ // We can hoist them up here. incl has to happen before testb since it
+ // modifies the ZF flag.
+ __ incl(bytecode_offset);
__ testb(bytecode, Immediate(0x1));
+ __ movzxbq(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0));
__ j(not_equal, &extra_wide, Label::kNear);
- // Load the next bytecode and update table to the wide scaled table.
- __ incl(bytecode_offset);
- __ movzxbq(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0));
+ // Update table to the wide scaled table.
__ addq(bytecode_size_table,
Immediate(kIntSize * interpreter::Bytecodes::kBytecodeCount));
__ jmp(&process_bytecode, Label::kNear);
__ bind(&extra_wide);
- // Load the next bytecode and update table to the extra wide scaled table.
- __ incl(bytecode_offset);
- __ movzxbq(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0));
+ // Update table to the extra wide scaled table.
__ addq(bytecode_size_table,
Immediate(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
@@ -1101,7 +1071,23 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
- MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, rcx, r11, r15);
+
+ Register optimized_code_entry = rcx;
+ Register decompr_scratch = COMPRESS_POINTERS_BOOL ? r11 : no_reg;
+
+ __ LoadAnyTaggedField(
+ optimized_code_entry,
+ FieldOperand(feedback_vector,
+ FeedbackVector::kOptimizedCodeWeakOrSmiOffset),
+ decompr_scratch);
+
+ // Check if the optimized code slot is not empty.
+ Label optimized_code_slot_not_empty;
+ __ Cmp(optimized_code_entry, Smi::FromEnum(OptimizationMarker::kNone));
+ __ j(not_equal, &optimized_code_slot_not_empty);
+
+ Label not_optimized;
+ __ bind(&not_optimized);
// Increment invocation count for the function.
__ incl(
@@ -1137,28 +1123,26 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Push(rcx);
// Allocate the local and temporary register file on the stack.
+ Label stack_overflow;
{
// Load frame size from the BytecodeArray object.
__ movl(rcx, FieldOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kFrameSizeOffset));
// Do a stack check to ensure we don't go over the limit.
- Label ok;
__ movq(rax, rsp);
__ subq(rax, rcx);
__ cmpq(rax, RealStackLimitAsOperand(masm));
- __ j(above_equal, &ok, Label::kNear);
- __ CallRuntime(Runtime::kThrowStackOverflow);
- __ bind(&ok);
+ __ j(below, &stack_overflow);
// If ok, push undefined as the initial value for all register file entries.
Label loop_header;
Label loop_check;
- __ LoadRoot(rax, RootIndex::kUndefinedValue);
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
__ j(always, &loop_check, Label::kNear);
__ bind(&loop_header);
// TODO(rmcilroy): Consider doing more than one push per loop iteration.
- __ Push(rax);
+ __ Push(kInterpreterAccumulatorRegister);
// Continue loop if not done.
__ bind(&loop_check);
__ subq(rcx, Immediate(kSystemPointerSize));
@@ -1169,16 +1153,15 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// register, initialize it with incoming value which was passed in rdx.
Label no_incoming_new_target_or_generator_register;
__ movsxlq(
- rax,
+ rcx,
FieldOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
- __ testl(rax, rax);
+ __ testl(rcx, rcx);
__ j(zero, &no_incoming_new_target_or_generator_register, Label::kNear);
- __ movq(Operand(rbp, rax, times_system_pointer_size, 0), rdx);
+ __ movq(Operand(rbp, rcx, times_system_pointer_size, 0), rdx);
__ bind(&no_incoming_new_target_or_generator_register);
- // Load accumulator with undefined.
- __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+ // The accumulator is already loaded with undefined.
// Load the dispatch table into a register and dispatch to the bytecode
// handler at the current bytecode offset.
@@ -1201,10 +1184,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Get bytecode array and bytecode offset from the stack frame.
__ movq(kInterpreterBytecodeArrayRegister,
Operand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
- __ movq(kInterpreterBytecodeOffsetRegister,
- Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister,
- kInterpreterBytecodeOffsetRegister);
+ Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
// Either return, or advance to the next bytecode and dispatch.
Label do_return;
@@ -1223,6 +1204,25 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bind(&compile_lazy);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
__ int3(); // Should not return.
+
+ __ bind(&optimized_code_slot_not_empty);
+ Label maybe_has_optimized_code;
+ // Check if optimized code marker is actually a weak reference to the
+ // optimized code as opposed to an optimization marker.
+ __ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code);
+ MaybeOptimizeCode(masm, feedback_vector, optimized_code_entry);
+ // Fall through if there's no runnable optimized code.
+ __ jmp(&not_optimized);
+
+ __ bind(&maybe_has_optimized_code);
+ // Load code entry from the weak reference, if it was cleared, resume
+ // execution of unoptimized code.
+ __ LoadWeakValue(optimized_code_entry, &not_optimized);
+ TailCallOptimizedCodeSlot(masm, optimized_code_entry, r11, r15);
+
+ __ bind(&stack_overflow);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ __ int3(); // Should not return.
}
static void Generate_InterpreterPushArgs(MacroAssembler* masm,
@@ -1425,10 +1425,8 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
}
// Get the target bytecode offset from the frame.
- __ movq(kInterpreterBytecodeOffsetRegister,
- Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister,
- kInterpreterBytecodeOffsetRegister);
+ Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
// Dispatch to the target bytecode.
__ movzxbq(r11, Operand(kInterpreterBytecodeArrayRegister,
@@ -1443,10 +1441,8 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
// Get bytecode array and bytecode offset from the stack frame.
__ movq(kInterpreterBytecodeArrayRegister,
Operand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
- __ movq(kInterpreterBytecodeOffsetRegister,
- Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister,
- kInterpreterBytecodeOffsetRegister);
+ Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
// Load the current bytecode.
__ movzxbq(rbx, Operand(kInterpreterBytecodeArrayRegister,
@@ -1459,8 +1455,9 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
&if_return);
// Convert new bytecode offset to a Smi and save in the stackframe.
- __ SmiTag(rbx, kInterpreterBytecodeOffsetRegister);
- __ movq(Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp), rbx);
+ __ SmiTag(kInterpreterBytecodeOffsetRegister);
+ __ movq(Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp),
+ kInterpreterBytecodeOffsetRegister);
Generate_InterpreterEnterBytecode(masm);
@@ -1485,7 +1482,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// Preserve argument count for later compare.
__ movq(rcx, rax);
// Push the number of arguments to the callee.
- __ SmiTag(rax, rax);
+ __ SmiTag(rax);
__ Push(rax);
// Push a copy of the target function and the new target.
__ Push(rdi);
@@ -1522,7 +1519,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
__ Drop(2);
__ Pop(rcx);
- __ SmiUntag(rcx, rcx);
+ __ SmiUntag(rcx);
scope.GenerateLeaveFrame();
__ PopReturnAddressTo(rbx);
@@ -1536,7 +1533,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
__ Pop(rdx);
__ Pop(rdi);
__ Pop(rax);
- __ SmiUntag(rax, rax);
+ __ SmiUntag(rax);
}
// On failure, tail call back to regular js by re-calling the function
// which has be reset to the compile lazy builtin.
@@ -1563,7 +1560,7 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
int code = config->GetAllocatableGeneralCode(i);
__ popq(Register::from_code(code));
if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) {
- __ SmiUntag(Register::from_code(code), Register::from_code(code));
+ __ SmiUntag(Register::from_code(code));
}
}
__ movq(
@@ -2274,7 +2271,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// TODO(bmeurer): Inline the allocation here to avoid building the frame
// in the fast case? (fall back to AllocateInNewSpace?)
FrameScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(rax, rax);
+ __ SmiTag(rax);
__ Push(rax);
__ Push(rdi);
__ movq(rax, rcx);
@@ -2285,7 +2282,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ movq(rcx, rax);
__ Pop(rdi);
__ Pop(rax);
- __ SmiUntag(rax, rax);
+ __ SmiUntag(rax);
}
__ LoadTaggedPointerField(
rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
@@ -2601,14 +2598,8 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
}
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
- // Lookup the function in the JavaScript frame.
- __ movq(rax, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ movq(rax, Operand(rax, JavaScriptFrameConstants::kFunctionOffset));
-
{
FrameScope scope(masm, StackFrame::INTERNAL);
- // Pass function as argument.
- __ Push(rax);
__ CallRuntime(Runtime::kCompileForOnStackReplacement);
}
@@ -2647,7 +2638,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was pushed to the stack by the caller as int32.
__ Pop(r11);
// Convert to Smi for the runtime call.
- __ SmiTag(r11, r11);
+ __ SmiTag(r11);
{
HardAbortScope hard_abort(masm); // Avoid calls to Abort.
FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
@@ -2716,7 +2707,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// If argv_mode == kArgvInRegister:
// r15: pointer to the first argument
-#ifdef _WIN64
+#ifdef V8_TARGET_OS_WIN
// Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9. It requires the
// stack to be aligned to 16 bytes. It only allows a single-word to be
// returned in register rax. Larger return sizes must be written to an address
@@ -2738,7 +2729,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
const Register kCCallArg3 = rcx;
const int kArgExtraStackSpace = 0;
const int kMaxRegisterResultSize = 2;
-#endif // _WIN64
+#endif // V8_TARGET_OS_WIN
// Enter the exit frame that transitions from JavaScript to C++.
int arg_stack_space =
@@ -2809,7 +2800,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
IsolateAddressId::kPendingExceptionAddress, masm->isolate());
Operand pending_exception_operand =
masm->ExternalReferenceAsOperand(pending_exception_address);
- __ cmpq(r14, pending_exception_operand);
+ __ cmp_tagged(r14, pending_exception_operand);
__ j(equal, &okay, Label::kNear);
__ int3();
__ bind(&okay);
diff --git a/deps/v8/src/codegen/OWNERS b/deps/v8/src/codegen/OWNERS
index feb2f62f78..64d2d7b97d 100644
--- a/deps/v8/src/codegen/OWNERS
+++ b/deps/v8/src/codegen/OWNERS
@@ -1,6 +1,6 @@
bbudge@chromium.org
bmeurer@chromium.org
-clemensh@chromium.org
+clemensb@chromium.org
gdeepti@chromium.org
ishell@chromium.org
jarin@chromium.org
diff --git a/deps/v8/src/codegen/arm/assembler-arm-inl.h b/deps/v8/src/codegen/arm/assembler-arm-inl.h
index 3fbd679104..45ec07a382 100644
--- a/deps/v8/src/codegen/arm/assembler-arm-inl.h
+++ b/deps/v8/src/codegen/arm/assembler-arm-inl.h
@@ -118,7 +118,8 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target,
DCHECK(IsCodeTarget(rmode_) || rmode_ == FULL_EMBEDDED_OBJECT);
Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
icache_flush_mode);
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) {
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() &&
+ !FLAG_disable_write_barriers) {
WriteBarrierForCode(host(), this, target);
}
}
diff --git a/deps/v8/src/codegen/arm/assembler-arm.cc b/deps/v8/src/codegen/arm/assembler-arm.cc
index 9c46063537..6659960bb8 100644
--- a/deps/v8/src/codegen/arm/assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/assembler-arm.cc
@@ -40,6 +40,7 @@
#include "src/base/bits.h"
#include "src/base/cpu.h"
+#include "src/base/overflowing-math.h"
#include "src/codegen/arm/assembler-arm-inl.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/macro-assembler.h"
@@ -452,8 +453,8 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
Handle<HeapObject> object;
switch (request.kind()) {
case HeapObjectRequest::kHeapNumber:
- object = isolate->factory()->NewHeapNumber(request.heap_number(),
- AllocationType::kOld);
+ object = isolate->factory()->NewHeapNumber<AllocationType::kOld>(
+ request.heap_number());
break;
case HeapObjectRequest::kStringConstant: {
const StringConstantBase* str = request.string();
@@ -4802,15 +4803,17 @@ void Assembler::GrowBuffer() {
int rc_delta = (new_start + new_size) - (buffer_start_ + old_size);
size_t reloc_size = (buffer_start_ + old_size) - reloc_info_writer.pos();
MemMove(new_start, buffer_start_, pc_offset());
- MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
- reloc_size);
+ byte* new_reloc_start = reinterpret_cast<byte*>(
+ reinterpret_cast<Address>(reloc_info_writer.pos()) + rc_delta);
+ MemMove(new_reloc_start, reloc_info_writer.pos(), reloc_size);
// Switch buffers.
buffer_ = std::move(new_buffer);
buffer_start_ = new_start;
- pc_ += pc_delta;
- reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
- reloc_info_writer.last_pc() + pc_delta);
+ pc_ = reinterpret_cast<byte*>(reinterpret_cast<Address>(pc_) + pc_delta);
+ byte* new_last_pc = reinterpret_cast<byte*>(
+ reinterpret_cast<Address>(reloc_info_writer.last_pc()) + pc_delta);
+ reloc_info_writer.Reposition(new_reloc_start, new_last_pc);
// None of our relocation types are pc relative pointing outside the code
// buffer nor pc absolute pointing inside the code buffer, so there is no need
@@ -4831,7 +4834,7 @@ void Assembler::dd(uint32_t data) {
// blocked before using dd.
DCHECK(is_const_pool_blocked() || pending_32_bit_constants_.empty());
CheckBuffer();
- *reinterpret_cast<uint32_t*>(pc_) = data;
+ base::WriteUnalignedValue(reinterpret_cast<Address>(pc_), data);
pc_ += sizeof(uint32_t);
}
@@ -4840,7 +4843,7 @@ void Assembler::dq(uint64_t value) {
// blocked before using dq.
DCHECK(is_const_pool_blocked() || pending_32_bit_constants_.empty());
CheckBuffer();
- *reinterpret_cast<uint64_t*>(pc_) = value;
+ base::WriteUnalignedValue(reinterpret_cast<Address>(pc_), value);
pc_ += sizeof(uint64_t);
}
diff --git a/deps/v8/src/codegen/arm/assembler-arm.h b/deps/v8/src/codegen/arm/assembler-arm.h
index f669943f34..1d280e5555 100644
--- a/deps/v8/src/codegen/arm/assembler-arm.h
+++ b/deps/v8/src/codegen/arm/assembler-arm.h
@@ -41,6 +41,7 @@
#define V8_CODEGEN_ARM_ASSEMBLER_ARM_H_
#include <stdio.h>
+#include <memory>
#include <vector>
#include "src/codegen/arm/constants-arm.h"
@@ -305,9 +306,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
explicit Assembler(const AssemblerOptions&,
std::unique_ptr<AssemblerBuffer> = {});
- virtual ~Assembler();
+ ~Assembler() override;
- virtual void AbortedCodeGeneration() { pending_32_bit_constants_.clear(); }
+ void AbortedCodeGeneration() override { pending_32_bit_constants_.clear(); }
// GetCode emits any pending (non-emitted) code and fills the descriptor desc.
static constexpr int kNoHandlerTable = 0;
diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.cc b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
index 7f6d82518e..6f1adfead2 100644
--- a/deps/v8/src/codegen/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
@@ -573,7 +573,7 @@ void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
Condition cond) {
DCHECK_LT(lsb, 32);
if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
- int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
+ int mask = (1u << (width + lsb)) - 1u - ((1u << lsb) - 1u);
and_(dst, src1, Operand(mask), LeaveCC, cond);
if (lsb != 0) {
mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond);
@@ -1602,57 +1602,43 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
}
-void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
- const ParameterCount& expected,
- const ParameterCount& actual) {
- Label skip_hook;
-
- ExternalReference debug_hook_active =
- ExternalReference::debug_hook_on_function_call_address(isolate());
- Move(r4, debug_hook_active);
- ldrsb(r4, MemOperand(r4));
- cmp(r4, Operand(0));
- b(eq, &skip_hook);
-
- {
- // Load receiver to pass it later to DebugOnFunctionCall hook.
- if (actual.is_reg()) {
- mov(r4, actual.reg());
- } else {
- mov(r4, Operand(actual.immediate()));
- }
- ldr(r4, MemOperand(sp, r4, LSL, kPointerSizeLog2));
- FrameScope frame(this,
- has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
- if (expected.is_reg()) {
- SmiTag(expected.reg());
- Push(expected.reg());
- }
- if (actual.is_reg()) {
- SmiTag(actual.reg());
- Push(actual.reg());
- }
- if (new_target.is_valid()) {
- Push(new_target);
- }
- Push(fun);
- Push(fun);
- Push(r4);
- CallRuntime(Runtime::kDebugOnFunctionCall);
- Pop(fun);
- if (new_target.is_valid()) {
- Pop(new_target);
- }
- if (actual.is_reg()) {
- Pop(actual.reg());
- SmiUntag(actual.reg());
- }
- if (expected.is_reg()) {
- Pop(expected.reg());
- SmiUntag(expected.reg());
- }
+void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual) {
+ // Load receiver to pass it later to DebugOnFunctionCall hook.
+ if (actual.is_reg()) {
+ ldr(r4, MemOperand(sp, actual.reg(), LSL, kPointerSizeLog2));
+ } else {
+ ldr(r4, MemOperand(sp, actual.immediate() << kPointerSizeLog2));
+ }
+ FrameScope frame(this, has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ if (expected.is_reg()) {
+ SmiTag(expected.reg());
+ Push(expected.reg());
+ }
+ if (actual.is_reg()) {
+ SmiTag(actual.reg());
+ Push(actual.reg());
+ }
+ if (new_target.is_valid()) {
+ Push(new_target);
+ }
+ Push(fun);
+ Push(fun);
+ Push(r4);
+ CallRuntime(Runtime::kDebugOnFunctionCall);
+ Pop(fun);
+ if (new_target.is_valid()) {
+ Pop(new_target);
+ }
+ if (actual.is_reg()) {
+ Pop(actual.reg());
+ SmiUntag(actual.reg());
+ }
+ if (expected.is_reg()) {
+ Pop(expected.reg());
+ SmiUntag(expected.reg());
}
- bind(&skip_hook);
}
void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
@@ -1665,7 +1651,16 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
DCHECK_IMPLIES(new_target.is_valid(), new_target == r3);
// On function call, call into the debugger if necessary.
- CheckDebugHook(function, new_target, expected, actual);
+ Label debug_hook, continue_after_hook;
+ {
+ ExternalReference debug_hook_active =
+ ExternalReference::debug_hook_on_function_call_address(isolate());
+ Move(r4, debug_hook_active);
+ ldrsb(r4, MemOperand(r4));
+ cmp(r4, Operand(0));
+ b(ne, &debug_hook);
+ }
+ bind(&continue_after_hook);
// Clear the new.target register if not given.
if (!new_target.is_valid()) {
@@ -1687,11 +1682,17 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
DCHECK(flag == JUMP_FUNCTION);
JumpCodeObject(code);
}
-
- // Continue here if InvokePrologue does handle the invocation due to
- // mismatched parameter counts.
- bind(&done);
}
+ b(&done);
+
+ // Deferred debug hook.
+ bind(&debug_hook);
+ CallDebugOnFunctionCall(function, new_target, expected, actual);
+ b(&continue_after_hook);
+
+ // Continue here if InvokePrologue does handle the invocation due to
+ // mismatched parameter counts.
+ bind(&done);
}
void MacroAssembler::InvokeFunction(Register fun, Register new_target,
diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.h b/deps/v8/src/codegen/arm/macro-assembler-arm.h
index bbea40b9a6..4807a6d20d 100644
--- a/deps/v8/src/codegen/arm/macro-assembler-arm.h
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.h
@@ -633,10 +633,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag);
- // On function call, call into the debugger if necessary.
- void CheckDebugHook(Register fun, Register new_target,
- const ParameterCount& expected,
- const ParameterCount& actual);
+ // On function call, call into the debugger.
+ void CallDebugOnFunctionCall(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64-inl.h b/deps/v8/src/codegen/arm64/assembler-arm64-inl.h
index baae106c1c..ce34da7dc2 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64-inl.h
+++ b/deps/v8/src/codegen/arm64/assembler-arm64-inl.h
@@ -54,14 +54,12 @@ inline bool CPURegister::IsSP() const {
}
inline void CPURegList::Combine(const CPURegList& other) {
- DCHECK(IsValid());
DCHECK(other.type() == type_);
DCHECK(other.RegisterSizeInBits() == size_);
list_ |= other.list();
}
inline void CPURegList::Remove(const CPURegList& other) {
- DCHECK(IsValid());
if (other.type() == type_) {
list_ &= ~other.list();
}
@@ -84,13 +82,12 @@ inline void CPURegList::Remove(const CPURegister& other1,
}
inline void CPURegList::Combine(int code) {
- DCHECK(IsValid());
DCHECK(CPURegister::Create(code, size_, type_).IsValid());
list_ |= (1ULL << code);
+ DCHECK(IsValid());
}
inline void CPURegList::Remove(int code) {
- DCHECK(IsValid());
DCHECK(CPURegister::Create(code, size_, type_).IsValid());
list_ &= ~(1ULL << code);
}
@@ -311,6 +308,18 @@ Operand Operand::ToExtendedRegister() const {
return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_);
}
+Operand Operand::ToW() const {
+ if (IsShiftedRegister()) {
+ DCHECK(reg_.Is64Bits());
+ return Operand(reg_.W(), shift(), shift_amount());
+ } else if (IsExtendedRegister()) {
+ DCHECK(reg_.Is64Bits());
+ return Operand(reg_.W(), extend(), shift_amount());
+ }
+ DCHECK(IsImmediate());
+ return *this;
+}
+
Immediate Operand::immediate_for_heap_object_request() const {
DCHECK((heap_object_request().kind() == HeapObjectRequest::kHeapNumber &&
immediate_.rmode() == RelocInfo::FULL_EMBEDDED_OBJECT) ||
@@ -711,7 +720,8 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target,
Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
icache_flush_mode);
}
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) {
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() &&
+ !FLAG_disable_write_barriers) {
WriteBarrierForCode(host(), this, target);
}
}
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.cc b/deps/v8/src/codegen/arm64/assembler-arm64.cc
index c798d3a8a0..ea2f4696bd 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64.cc
+++ b/deps/v8/src/codegen/arm64/assembler-arm64.cc
@@ -63,18 +63,16 @@ void CpuFeatures::PrintFeatures() {}
// CPURegList utilities.
CPURegister CPURegList::PopLowestIndex() {
- DCHECK(IsValid());
if (IsEmpty()) {
return NoCPUReg;
}
- int index = CountTrailingZeros(list_, kRegListSizeInBits);
+ int index = base::bits::CountTrailingZeros(list_);
DCHECK((1LL << index) & list_);
Remove(index);
return CPURegister::Create(index, size_, type_);
}
CPURegister CPURegList::PopHighestIndex() {
- DCHECK(IsValid());
if (IsEmpty()) {
return NoCPUReg;
}
@@ -369,8 +367,9 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
Address pc = reinterpret_cast<Address>(buffer_start_) + request.offset();
switch (request.kind()) {
case HeapObjectRequest::kHeapNumber: {
- Handle<HeapObject> object = isolate->factory()->NewHeapNumber(
- request.heap_number(), AllocationType::kOld);
+ Handle<HeapObject> object =
+ isolate->factory()->NewHeapNumber<AllocationType::kOld>(
+ request.heap_number());
EmbeddedObjectIndex index = AddEmbeddedObject(object);
set_embedded_object_index_referenced_from(pc, index);
break;
@@ -3967,19 +3966,24 @@ void Assembler::LoadStore(const CPURegister& rt, const MemOperand& addr,
bool Assembler::IsImmLSUnscaled(int64_t offset) { return is_int9(offset); }
bool Assembler::IsImmLSScaled(int64_t offset, unsigned size) {
- bool offset_is_size_multiple = (((offset >> size) << size) == offset);
+ bool offset_is_size_multiple =
+ (static_cast<int64_t>(static_cast<uint64_t>(offset >> size) << size) ==
+ offset);
return offset_is_size_multiple && is_uint12(offset >> size);
}
bool Assembler::IsImmLSPair(int64_t offset, unsigned size) {
- bool offset_is_size_multiple = (((offset >> size) << size) == offset);
+ bool offset_is_size_multiple =
+ (static_cast<int64_t>(static_cast<uint64_t>(offset >> size) << size) ==
+ offset);
return offset_is_size_multiple && is_int7(offset >> size);
}
bool Assembler::IsImmLLiteral(int64_t offset) {
int inst_size = static_cast<int>(kInstrSizeLog2);
bool offset_is_inst_multiple =
- (((offset >> inst_size) << inst_size) == offset);
+ (static_cast<int64_t>(static_cast<uint64_t>(offset >> inst_size)
+ << inst_size) == offset);
DCHECK_GT(offset, 0);
offset >>= kLoadLiteralScaleLog2;
return offset_is_inst_multiple && is_intn(offset, ImmLLiteral_width);
@@ -4178,9 +4182,9 @@ bool Assembler::IsImmLogical(uint64_t value, unsigned width, unsigned* n,
// 1110ss 4 UInt(ss)
// 11110s 2 UInt(s)
//
- // So we 'or' (-d << 1) with our computed s to form imms.
+ // So we 'or' (-d * 2) with our computed s to form imms.
*n = out_n;
- *imm_s = ((-d << 1) | (s - 1)) & 0x3F;
+ *imm_s = ((-d * 2) | (s - 1)) & 0x3F;
*imm_r = r;
return true;
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.h b/deps/v8/src/codegen/arm64/assembler-arm64.h
index 04ee6d8b75..23e8acb1f9 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64.h
+++ b/deps/v8/src/codegen/arm64/assembler-arm64.h
@@ -8,6 +8,7 @@
#include <deque>
#include <list>
#include <map>
+#include <memory>
#include <vector>
#include "src/base/optional.h"
@@ -105,6 +106,9 @@ class Operand {
// which helps in the encoding of instructions that use the stack pointer.
inline Operand ToExtendedRegister() const;
+ // Returns new Operand adapted for using with W registers.
+ inline Operand ToW() const;
+
inline Immediate immediate() const;
inline int64_t ImmediateValue() const;
inline RelocInfo::Mode ImmediateRMode() const;
@@ -189,9 +193,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
explicit Assembler(const AssemblerOptions&,
std::unique_ptr<AssemblerBuffer> = {});
- virtual ~Assembler();
+ ~Assembler() override;
- virtual void AbortedCodeGeneration();
+ void AbortedCodeGeneration() override;
// System functions ---------------------------------------------------------
// Start generating code from the beginning of the buffer, discarding any code
@@ -375,7 +379,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Instruction set functions ------------------------------------------------
// Branch / Jump instructions.
- // For branches offsets are scaled, i.e. they in instrcutions not in bytes.
+ // For branches offsets are scaled, i.e. in instructions not in bytes.
// Branch to register.
void br(const Register& xn);
diff --git a/deps/v8/src/codegen/arm64/constants-arm64.h b/deps/v8/src/codegen/arm64/constants-arm64.h
index 914268644a..ccafae5e14 100644
--- a/deps/v8/src/codegen/arm64/constants-arm64.h
+++ b/deps/v8/src/codegen/arm64/constants-arm64.h
@@ -33,6 +33,7 @@ constexpr size_t kMaxPCRelativeCodeRangeInMB = 128;
constexpr uint8_t kInstrSize = 4;
constexpr uint8_t kInstrSizeLog2 = 2;
constexpr uint8_t kLoadLiteralScaleLog2 = 2;
+constexpr uint8_t kLoadLiteralScale = 1 << kLoadLiteralScaleLog2;
constexpr int kMaxLoadLiteralRange = 1 * MB;
const int kNumberOfRegisters = 32;
@@ -146,7 +147,8 @@ const unsigned kFloat16ExponentBias = 15;
// Actual value of root register is offset from the root array's start
// to take advantage of negative displacement values.
// TODO(sigurds): Choose best value.
-constexpr int kRootRegisterBias = 256;
+// TODO(ishell): Choose best value for ptr-compr.
+constexpr int kRootRegisterBias = kSystemPointerSize == kTaggedSize ? 256 : 0;
using float16 = uint16_t;
diff --git a/deps/v8/src/codegen/arm64/instructions-arm64.cc b/deps/v8/src/codegen/arm64/instructions-arm64.cc
index 05f3654da9..ab022affdd 100644
--- a/deps/v8/src/codegen/arm64/instructions-arm64.cc
+++ b/deps/v8/src/codegen/arm64/instructions-arm64.cc
@@ -71,6 +71,7 @@ static uint64_t RotateRight(uint64_t value, unsigned int rotate,
unsigned int width) {
DCHECK_LE(width, 64);
rotate &= 63;
+ if (rotate == 0) return value;
return ((value & ((1ULL << rotate) - 1ULL)) << (width - rotate)) |
(value >> rotate);
}
@@ -191,16 +192,16 @@ int64_t Instruction::ImmPCOffset() {
} else if (BranchType() != UnknownBranchType) {
// All PC-relative branches.
// Relative branch offsets are instruction-size-aligned.
- offset = ImmBranch() << kInstrSizeLog2;
+ offset = ImmBranch() * kInstrSize;
} else if (IsUnresolvedInternalReference()) {
// Internal references are always word-aligned.
- offset = ImmUnresolvedInternalReference() << kInstrSizeLog2;
+ offset = ImmUnresolvedInternalReference() * kInstrSize;
} else {
// Load literal (offset from PC).
DCHECK(IsLdrLiteral());
// The offset is always shifted by 2 bits, even for loads to 64-bits
// registers.
- offset = ImmLLiteral() << kInstrSizeLog2;
+ offset = ImmLLiteral() * kInstrSize;
}
return offset;
}
diff --git a/deps/v8/src/codegen/arm64/instructions-arm64.h b/deps/v8/src/codegen/arm64/instructions-arm64.h
index 1132ba39db..7fe732e2ba 100644
--- a/deps/v8/src/codegen/arm64/instructions-arm64.h
+++ b/deps/v8/src/codegen/arm64/instructions-arm64.h
@@ -5,6 +5,7 @@
#ifndef V8_CODEGEN_ARM64_INSTRUCTIONS_ARM64_H_
#define V8_CODEGEN_ARM64_INSTRUCTIONS_ARM64_H_
+#include "src/base/memory.h"
#include "src/codegen/arm64/constants-arm64.h"
#include "src/codegen/arm64/register-arm64.h"
#include "src/codegen/arm64/utils-arm64.h"
@@ -82,11 +83,13 @@ enum Reg31Mode { Reg31IsStackPointer, Reg31IsZeroRegister };
class Instruction {
public:
V8_INLINE Instr InstructionBits() const {
- return *reinterpret_cast<const Instr*>(this);
+ // Usually this is aligned, but when de/serializing that's not guaranteed.
+ return base::ReadUnalignedValue<Instr>(reinterpret_cast<Address>(this));
}
V8_INLINE void SetInstructionBits(Instr new_instr) {
- *reinterpret_cast<Instr*>(this) = new_instr;
+ // Usually this is aligned, but when de/serializing that's not guaranteed.
+ base::WriteUnalignedValue(reinterpret_cast<Address>(this), new_instr);
}
int Bit(int pos) const { return (InstructionBits() >> pos) & 1; }
@@ -96,7 +99,9 @@ class Instruction {
}
int32_t SignedBits(int msb, int lsb) const {
- int32_t bits = *(reinterpret_cast<const int32_t*>(this));
+ // Usually this is aligned, but when de/serializing that's not guaranteed.
+ int32_t bits =
+ base::ReadUnalignedValue<int32_t>(reinterpret_cast<Address>(this));
return signed_bitextract_32(msb, lsb, bits);
}
@@ -125,7 +130,8 @@ class Instruction {
// formed from ImmPCRelLo and ImmPCRelHi.
int ImmPCRel() const {
DCHECK(IsPCRelAddressing());
- int offset = ((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo());
+ int offset = (static_cast<uint32_t>(ImmPCRelHi()) << ImmPCRelLo_width) |
+ ImmPCRelLo();
int width = ImmPCRelLo_width + ImmPCRelHi_width;
return signed_bitextract_32(width - 1, 0, offset);
}
@@ -404,7 +410,7 @@ class Instruction {
void SetImmLLiteral(Instruction* source);
uintptr_t LiteralAddress() {
- int offset = ImmLLiteral() << kLoadLiteralScaleLog2;
+ int offset = ImmLLiteral() * kLoadLiteralScale;
return reinterpret_cast<uintptr_t>(this) + offset;
}
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
index 62bd9c26bf..261fd1e564 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
@@ -93,6 +93,15 @@ void TurboAssembler::Ccmp(const Register& rn, const Operand& operand,
}
}
+void TurboAssembler::CcmpTagged(const Register& rn, const Operand& operand,
+ StatusFlags nzcv, Condition cond) {
+ if (COMPRESS_POINTERS_BOOL) {
+ Ccmp(rn.W(), operand.ToW(), nzcv, cond);
+ } else {
+ Ccmp(rn, operand, nzcv, cond);
+ }
+}
+
void MacroAssembler::Ccmn(const Register& rn, const Operand& operand,
StatusFlags nzcv, Condition cond) {
DCHECK(allow_macro_instructions());
@@ -157,6 +166,14 @@ void TurboAssembler::Cmp(const Register& rn, const Operand& operand) {
Subs(AppropriateZeroRegFor(rn), rn, operand);
}
+void TurboAssembler::CmpTagged(const Register& rn, const Operand& operand) {
+ if (COMPRESS_POINTERS_BOOL) {
+ Cmp(rn.W(), operand.ToW());
+ } else {
+ Cmp(rn, operand);
+ }
+}
+
void TurboAssembler::Neg(const Register& rd, const Operand& operand) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
@@ -982,7 +999,12 @@ void TurboAssembler::SmiUntag(Register dst, Register src) {
AssertSmi(src);
}
DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
- Asr(dst, src, kSmiShift);
+ if (COMPRESS_POINTERS_BOOL) {
+ Asr(dst.W(), src.W(), kSmiShift);
+ Sxtw(dst, dst);
+ } else {
+ Asr(dst, src, kSmiShift);
+ }
}
void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
@@ -1002,11 +1024,11 @@ void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
}
} else {
DCHECK(SmiValuesAre31Bits());
-#ifdef V8_COMPRESS_POINTERS
- Ldrsw(dst, src);
-#else
- Ldr(dst, src);
-#endif
+ if (COMPRESS_POINTERS_BOOL) {
+ Ldr(dst.W(), src);
+ } else {
+ Ldr(dst, src);
+ }
SmiUntag(dst);
}
}
@@ -1029,13 +1051,11 @@ void TurboAssembler::JumpIfSmi(Register value, Label* smi_label,
}
void TurboAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) {
- Cmp(x, y);
- B(eq, dest);
+ CompareAndBranch(x, y, eq, dest);
}
void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) {
- Cmp(x, y);
- B(lt, dest);
+ CompareAndBranch(x, y, lt, dest);
}
void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
@@ -1083,7 +1103,7 @@ void TurboAssembler::Claim(const Register& count, uint64_t unit_size) {
if (unit_size == 0) return;
DCHECK(base::bits::IsPowerOfTwo(unit_size));
- const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits);
+ const int shift = base::bits::CountTrailingZeros(unit_size);
const Operand size(count, LSL, shift);
if (size.IsZero()) {
@@ -1136,7 +1156,7 @@ void TurboAssembler::Drop(const Register& count, uint64_t unit_size) {
if (unit_size == 0) return;
DCHECK(base::bits::IsPowerOfTwo(unit_size));
- const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits);
+ const int shift = base::bits::CountTrailingZeros(unit_size);
const Operand size(count, LSL, shift);
if (size.IsZero()) {
@@ -1175,7 +1195,7 @@ void TurboAssembler::DropSlots(int64_t count) {
void TurboAssembler::PushArgument(const Register& arg) { Push(padreg, arg); }
-void MacroAssembler::CompareAndBranch(const Register& lhs, const Operand& rhs,
+void TurboAssembler::CompareAndBranch(const Register& lhs, const Operand& rhs,
Condition cond, Label* label) {
if (rhs.IsImmediate() && (rhs.ImmediateValue() == 0) &&
((cond == eq) || (cond == ne))) {
@@ -1190,6 +1210,16 @@ void MacroAssembler::CompareAndBranch(const Register& lhs, const Operand& rhs,
}
}
+void TurboAssembler::CompareTaggedAndBranch(const Register& lhs,
+ const Operand& rhs, Condition cond,
+ Label* label) {
+ if (COMPRESS_POINTERS_BOOL) {
+ CompareAndBranch(lhs.W(), rhs.ToW(), cond, label);
+ } else {
+ CompareAndBranch(lhs, rhs, cond, label);
+ }
+}
+
void TurboAssembler::TestAndBranchIfAnySet(const Register& reg,
const uint64_t bit_pattern,
Label* label) {
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
index 0a721b0647..892458fe8b 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
@@ -295,7 +295,9 @@ void TurboAssembler::Mov(const Register& rd, const Operand& operand,
} else if (RelocInfo::IsEmbeddedObjectMode(operand.ImmediateRMode())) {
Handle<HeapObject> x(
reinterpret_cast<Address*>(operand.ImmediateValue()));
- IndirectLoadConstant(rd, x);
+ // TODO(v8:9706): Fix-it! This load will always uncompress the value
+ // even when we are loading a compressed embedded object.
+ IndirectLoadConstant(rd.X(), x);
return;
}
}
@@ -650,7 +652,14 @@ Operand TurboAssembler::MoveImmediateForShiftedOp(const Register& dst,
// The move was successful; nothing to do here.
} else {
// Pre-shift the immediate to the least-significant bits of the register.
- int shift_low = CountTrailingZeros(imm, reg_size);
+ int shift_low;
+ if (reg_size == 64) {
+ shift_low = base::bits::CountTrailingZeros(imm);
+ } else {
+ DCHECK_EQ(reg_size, 32);
+ shift_low = base::bits::CountTrailingZeros(static_cast<uint32_t>(imm));
+ }
+
if (mode == kLimitShiftForSP) {
// When applied to the stack pointer, the subsequent arithmetic operation
// can use the extend form to shift left by a maximum of four bits. Right
@@ -1456,15 +1465,6 @@ void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
}
-void MacroAssembler::LoadObject(Register result, Handle<Object> object) {
- AllowDeferredHandleDereference heap_object_check;
- if (object->IsHeapObject()) {
- Mov(result, Handle<HeapObject>::cast(object));
- } else {
- Mov(result, Operand(Smi::cast(*object)));
- }
-}
-
void TurboAssembler::Move(Register dst, Smi src) { Mov(dst, src); }
void TurboAssembler::MovePair(Register dst0, Register src0, Register dst1,
@@ -1923,21 +1923,25 @@ void TurboAssembler::Call(ExternalReference target) {
}
void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
- STATIC_ASSERT(kSystemPointerSize == 8);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
-
// The builtin_index register contains the builtin index as a Smi.
// Untagging is folded into the indexing operand below.
-#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
- STATIC_ASSERT(kSmiShiftSize == 0);
- Lsl(builtin_index, builtin_index, kSystemPointerSizeLog2 - kSmiShift);
-#else
- STATIC_ASSERT(kSmiShiftSize == 31);
- Asr(builtin_index, builtin_index, kSmiShift - kSystemPointerSizeLog2);
-#endif
- Add(builtin_index, builtin_index, IsolateData::builtin_entry_table_offset());
- Ldr(builtin_index, MemOperand(kRootRegister, builtin_index));
+ if (SmiValuesAre32Bits()) {
+ Asr(builtin_index, builtin_index, kSmiShift - kSystemPointerSizeLog2);
+ Add(builtin_index, builtin_index,
+ IsolateData::builtin_entry_table_offset());
+ Ldr(builtin_index, MemOperand(kRootRegister, builtin_index));
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ if (COMPRESS_POINTERS_BOOL) {
+ Add(builtin_index, kRootRegister,
+ Operand(builtin_index.W(), SXTW, kSystemPointerSizeLog2 - kSmiShift));
+ } else {
+ Add(builtin_index, kRootRegister,
+ Operand(builtin_index, LSL, kSystemPointerSizeLog2 - kSmiShift));
+ }
+ Ldr(builtin_index,
+ MemOperand(builtin_index, IsolateData::builtin_entry_table_offset()));
+ }
}
void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
@@ -2207,43 +2211,34 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
Bind(&regular_invoke);
}
-void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
- const ParameterCount& expected,
- const ParameterCount& actual) {
- Label skip_hook;
-
- Mov(x4, ExternalReference::debug_hook_on_function_call_address(isolate()));
- Ldrsb(x4, MemOperand(x4));
- Cbz(x4, &skip_hook);
-
- {
- // Load receiver to pass it later to DebugOnFunctionCall hook.
- Operand actual_op = actual.is_immediate() ? Operand(actual.immediate())
- : Operand(actual.reg());
- Mov(x4, actual_op);
- Ldr(x4, MemOperand(sp, x4, LSL, kSystemPointerSizeLog2));
- FrameScope frame(this,
- has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual) {
+ // Load receiver to pass it later to DebugOnFunctionCall hook.
+ if (actual.is_reg()) {
+ Ldr(x4, MemOperand(sp, actual.reg(), LSL, kSystemPointerSizeLog2));
+ } else {
+ Ldr(x4, MemOperand(sp, actual.immediate() << kSystemPointerSizeLog2));
+ }
+ FrameScope frame(this, has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
- Register expected_reg = padreg;
- Register actual_reg = padreg;
- if (expected.is_reg()) expected_reg = expected.reg();
- if (actual.is_reg()) actual_reg = actual.reg();
- if (!new_target.is_valid()) new_target = padreg;
+ Register expected_reg = padreg;
+ Register actual_reg = padreg;
+ if (expected.is_reg()) expected_reg = expected.reg();
+ if (actual.is_reg()) actual_reg = actual.reg();
+ if (!new_target.is_valid()) new_target = padreg;
- // Save values on stack.
- SmiTag(expected_reg);
- SmiTag(actual_reg);
- Push(expected_reg, actual_reg, new_target, fun);
- Push(fun, x4);
- CallRuntime(Runtime::kDebugOnFunctionCall);
+ // Save values on stack.
+ SmiTag(expected_reg);
+ SmiTag(actual_reg);
+ Push(expected_reg, actual_reg, new_target, fun);
+ Push(fun, x4);
+ CallRuntime(Runtime::kDebugOnFunctionCall);
- // Restore values from stack.
- Pop(fun, new_target, actual_reg, expected_reg);
- SmiUntag(actual_reg);
- SmiUntag(expected_reg);
- }
- Bind(&skip_hook);
+ // Restore values from stack.
+ Pop(fun, new_target, actual_reg, expected_reg);
+ SmiUntag(actual_reg);
+ SmiUntag(expected_reg);
}
void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
@@ -2256,7 +2251,13 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
DCHECK_IMPLIES(new_target.is_valid(), new_target.is(x3));
// On function call, call into the debugger if necessary.
- CheckDebugHook(function, new_target, expected, actual);
+ Label debug_hook, continue_after_hook;
+ {
+ Mov(x4, ExternalReference::debug_hook_on_function_call_address(isolate()));
+ Ldrsb(x4, MemOperand(x4));
+ Cbnz(x4, &debug_hook);
+ }
+ bind(&continue_after_hook);
// Clear the new.target register if not given.
if (!new_target.is_valid()) {
@@ -2284,6 +2285,12 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
JumpCodeObject(code);
}
}
+ B(&done);
+
+ // Deferred debug hook.
+ bind(&debug_hook);
+ CallDebugOnFunctionCall(function, new_target, expected, actual);
+ B(&continue_after_hook);
// Continue here if InvokePrologue does handle the invocation due to
// mismatched parameter counts.
@@ -2636,7 +2643,7 @@ void MacroAssembler::CompareRoot(const Register& obj, RootIndex index) {
Register temp = temps.AcquireX();
DCHECK(!AreAliased(obj, temp));
LoadRoot(temp, index);
- Cmp(obj, temp);
+ CmpTagged(obj, temp);
}
void MacroAssembler::JumpIfRoot(const Register& obj, RootIndex index,
@@ -2669,20 +2676,20 @@ void MacroAssembler::JumpIfIsInRange(const Register& value,
void TurboAssembler::LoadTaggedPointerField(const Register& destination,
const MemOperand& field_operand) {
-#ifdef V8_COMPRESS_POINTERS
- DecompressTaggedPointer(destination, field_operand);
-#else
- Ldr(destination, field_operand);
-#endif
+ if (COMPRESS_POINTERS_BOOL) {
+ DecompressTaggedPointer(destination, field_operand);
+ } else {
+ Ldr(destination, field_operand);
+ }
}
void TurboAssembler::LoadAnyTaggedField(const Register& destination,
const MemOperand& field_operand) {
-#ifdef V8_COMPRESS_POINTERS
- DecompressAnyTagged(destination, field_operand);
-#else
- Ldr(destination, field_operand);
-#endif
+ if (COMPRESS_POINTERS_BOOL) {
+ DecompressAnyTagged(destination, field_operand);
+ } else {
+ Ldr(destination, field_operand);
+ }
}
void TurboAssembler::SmiUntagField(Register dst, const MemOperand& src) {
@@ -2691,33 +2698,31 @@ void TurboAssembler::SmiUntagField(Register dst, const MemOperand& src) {
void TurboAssembler::StoreTaggedField(const Register& value,
const MemOperand& dst_field_operand) {
-#ifdef V8_COMPRESS_POINTERS
- RecordComment("[ StoreTagged");
- Str(value.W(), dst_field_operand);
- RecordComment("]");
-#else
- Str(value, dst_field_operand);
-#endif
+ if (COMPRESS_POINTERS_BOOL) {
+ Str(value.W(), dst_field_operand);
+ } else {
+ Str(value, dst_field_operand);
+ }
}
void TurboAssembler::DecompressTaggedSigned(const Register& destination,
const MemOperand& field_operand) {
RecordComment("[ DecompressTaggedSigned");
- Ldrsw(destination, field_operand);
+ Ldr(destination.W(), field_operand);
RecordComment("]");
}
void TurboAssembler::DecompressTaggedSigned(const Register& destination,
const Register& source) {
RecordComment("[ DecompressTaggedSigned");
- Sxtw(destination, source);
+ Mov(destination.W(), source.W());
RecordComment("]");
}
void TurboAssembler::DecompressTaggedPointer(const Register& destination,
const MemOperand& field_operand) {
RecordComment("[ DecompressTaggedPointer");
- Ldrsw(destination, field_operand);
+ Ldr(destination.W(), field_operand);
Add(destination, kRootRegister, destination);
RecordComment("]");
}
@@ -2725,57 +2730,22 @@ void TurboAssembler::DecompressTaggedPointer(const Register& destination,
void TurboAssembler::DecompressTaggedPointer(const Register& destination,
const Register& source) {
RecordComment("[ DecompressTaggedPointer");
- Add(destination, kRootRegister, Operand(source, SXTW));
+ Add(destination, kRootRegister, Operand(source, UXTW));
RecordComment("]");
}
void TurboAssembler::DecompressAnyTagged(const Register& destination,
const MemOperand& field_operand) {
RecordComment("[ DecompressAnyTagged");
- Ldrsw(destination, field_operand);
- if (kUseBranchlessPtrDecompressionInGeneratedCode) {
- UseScratchRegisterScope temps(this);
- // Branchlessly compute |masked_root|:
- // masked_root = HAS_SMI_TAG(destination) ? 0 : kRootRegister;
- STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
- Register masked_root = temps.AcquireX();
- // Sign extend tag bit to entire register.
- Sbfx(masked_root, destination, 0, kSmiTagSize);
- And(masked_root, masked_root, kRootRegister);
- // Now this add operation will either leave the value unchanged if it is a
- // smi or add the isolate root if it is a heap object.
- Add(destination, masked_root, destination);
- } else {
- Label done;
- JumpIfSmi(destination, &done);
- Add(destination, kRootRegister, destination);
- bind(&done);
- }
+ Ldr(destination.W(), field_operand);
+ Add(destination, kRootRegister, destination);
RecordComment("]");
}
void TurboAssembler::DecompressAnyTagged(const Register& destination,
const Register& source) {
RecordComment("[ DecompressAnyTagged");
- if (kUseBranchlessPtrDecompressionInGeneratedCode) {
- UseScratchRegisterScope temps(this);
- // Branchlessly compute |masked_root|:
- // masked_root = HAS_SMI_TAG(destination) ? 0 : kRootRegister;
- STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
- Register masked_root = temps.AcquireX();
- // Sign extend tag bit to entire register.
- Sbfx(masked_root, source, 0, kSmiTagSize);
- And(masked_root, masked_root, kRootRegister);
- // Now this add operation will either leave the value unchanged if it is a
- // smi or add the isolate root if it is a heap object.
- Add(destination, masked_root, Operand(source, SXTW));
- } else {
- Label done;
- Sxtw(destination, source);
- JumpIfSmi(destination, &done);
- Add(destination, kRootRegister, destination);
- bind(&done);
- }
+ Add(destination, kRootRegister, Operand(source, UXTW));
RecordComment("]");
}
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
index 94091e8624..cb3b51eb52 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
@@ -652,6 +652,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
const Operand& operand);
inline void Blr(const Register& xn);
inline void Cmp(const Register& rn, const Operand& operand);
+ inline void CmpTagged(const Register& rn, const Operand& operand);
inline void Subs(const Register& rd, const Register& rn,
const Operand& operand);
void Csel(const Register& rd, const Register& rn, const Operand& operand,
@@ -843,6 +844,13 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CheckPageFlag(const Register& object, int mask, Condition cc,
Label* condition_met);
+ // Compare a register with an operand, and branch to label depending on the
+ // condition. May corrupt the status flags.
+ inline void CompareAndBranch(const Register& lhs, const Operand& rhs,
+ Condition cond, Label* label);
+ inline void CompareTaggedAndBranch(const Register& lhs, const Operand& rhs,
+ Condition cond, Label* label);
+
// Test the bits of register defined by bit_pattern, and branch if ANY of
// those bits are set. May corrupt the status flags.
inline void TestAndBranchIfAnySet(const Register& reg,
@@ -1006,6 +1014,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Conditional macros.
inline void Ccmp(const Register& rn, const Operand& operand, StatusFlags nzcv,
Condition cond);
+ inline void CcmpTagged(const Register& rn, const Operand& operand,
+ StatusFlags nzcv, Condition cond);
inline void Clz(const Register& rd, const Register& rn);
@@ -1597,8 +1607,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
tbx(vd, vn, vn2, vn3, vn4, vm);
}
- void LoadObject(Register result, Handle<Object> object);
-
inline void PushSizeRegList(
RegList registers, unsigned reg_size,
CPURegister::RegisterType type = CPURegister::kRegister) {
@@ -1643,11 +1651,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// be aligned to 16 bytes.
void PeekPair(const CPURegister& dst1, const CPURegister& dst2, int offset);
- // Compare a register with an operand, and branch to label depending on the
- // condition. May corrupt the status flags.
- inline void CompareAndBranch(const Register& lhs, const Operand& rhs,
- Condition cond, Label* label);
-
// Insert one or more instructions into the instruction stream that encode
// some caller-defined data. The instructions used will be executable with no
// side effects.
@@ -1767,10 +1770,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
const ParameterCount& actual, Label* done,
InvokeFlag flag, bool* definitely_mismatches);
- // On function call, call into the debugger if necessary.
- void CheckDebugHook(Register fun, Register new_target,
- const ParameterCount& expected,
- const ParameterCount& actual);
+ // On function call, call into the debugger.
+ void CallDebugOnFunctionCall(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual);
void InvokeFunctionCode(Register function, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag);
diff --git a/deps/v8/src/codegen/arm64/register-arm64.h b/deps/v8/src/codegen/arm64/register-arm64.h
index a782bf9cd8..2bdf0ceea0 100644
--- a/deps/v8/src/codegen/arm64/register-arm64.h
+++ b/deps/v8/src/codegen/arm64/register-arm64.h
@@ -105,7 +105,7 @@ class CPURegister : public RegisterBase<CPURegister, kRegAfterLast> {
enum RegisterType { kRegister, kVRegister, kNoRegister };
static constexpr CPURegister no_reg() {
- return CPURegister{0, 0, kNoRegister};
+ return CPURegister{kCode_no_reg, 0, kNoRegister};
}
template <int code, int size, RegisterType type>
@@ -567,8 +567,6 @@ using Simd128Register = VRegister;
// Lists of registers.
class V8_EXPORT_PRIVATE CPURegList {
public:
- CPURegList() = default;
-
template <typename... CPURegisters>
explicit CPURegList(CPURegister reg0, CPURegisters... regs)
: list_(CPURegister::ListOf(reg0, regs...)),
@@ -597,18 +595,16 @@ class V8_EXPORT_PRIVATE CPURegList {
}
CPURegister::RegisterType type() const {
- DCHECK(IsValid());
return type_;
}
RegList list() const {
- DCHECK(IsValid());
return list_;
}
inline void set_list(RegList new_list) {
- DCHECK(IsValid());
list_ = new_list;
+ DCHECK(IsValid());
}
// Combine another CPURegList into this one. Registers that already exist in
@@ -656,7 +652,6 @@ class V8_EXPORT_PRIVATE CPURegList {
static CPURegList GetSafepointSavedRegisters();
bool IsEmpty() const {
- DCHECK(IsValid());
return list_ == 0;
}
@@ -664,7 +659,6 @@ class V8_EXPORT_PRIVATE CPURegList {
const CPURegister& other2 = NoCPUReg,
const CPURegister& other3 = NoCPUReg,
const CPURegister& other4 = NoCPUReg) const {
- DCHECK(IsValid());
RegList list = 0;
if (!other1.IsNone() && (other1.type() == type_)) list |= other1.bit();
if (!other2.IsNone() && (other2.type() == type_)) list |= other2.bit();
@@ -674,12 +668,10 @@ class V8_EXPORT_PRIVATE CPURegList {
}
int Count() const {
- DCHECK(IsValid());
return CountSetBits(list_, kRegListSizeInBits);
}
int RegisterSizeInBits() const {
- DCHECK(IsValid());
return size_;
}
@@ -690,7 +682,6 @@ class V8_EXPORT_PRIVATE CPURegList {
}
int TotalSizeInBytes() const {
- DCHECK(IsValid());
return RegisterSizeInBytes() * Count();
}
diff --git a/deps/v8/src/codegen/arm64/utils-arm64.cc b/deps/v8/src/codegen/arm64/utils-arm64.cc
index 2f972ce502..dba2eeb7e1 100644
--- a/deps/v8/src/codegen/arm64/utils-arm64.cc
+++ b/deps/v8/src/codegen/arm64/utils-arm64.cc
@@ -89,15 +89,6 @@ int CountLeadingSignBits(int64_t value, int width) {
}
}
-int CountTrailingZeros(uint64_t value, int width) {
- DCHECK((width == 32) || (width == 64));
- if (width == 64) {
- return static_cast<int>(base::bits::CountTrailingZeros64(value));
- }
- return static_cast<int>(base::bits::CountTrailingZeros32(
- static_cast<uint32_t>(value & 0xFFFFFFFFF)));
-}
-
int CountSetBits(uint64_t value, int width) {
DCHECK((width == 32) || (width == 64));
if (width == 64) {
@@ -109,7 +100,7 @@ int CountSetBits(uint64_t value, int width) {
int LowestSetBitPosition(uint64_t value) {
DCHECK_NE(value, 0U);
- return CountTrailingZeros(value, 64) + 1;
+ return base::bits::CountTrailingZeros(value) + 1;
}
int HighestSetBitPosition(uint64_t value) {
@@ -118,12 +109,14 @@ int HighestSetBitPosition(uint64_t value) {
}
uint64_t LargestPowerOf2Divisor(uint64_t value) {
- return value & (-(int64_t)value);
+ // Simulate two's complement (instead of casting to signed and negating) to
+ // avoid undefined behavior on signed overflow.
+ return value & ((~value) + 1);
}
int MaskToBit(uint64_t mask) {
DCHECK_EQ(CountSetBits(mask, 64), 1);
- return CountTrailingZeros(mask, 64);
+ return base::bits::CountTrailingZeros(mask);
}
#undef __
diff --git a/deps/v8/src/codegen/arm64/utils-arm64.h b/deps/v8/src/codegen/arm64/utils-arm64.h
index 6bddce6fff..182d781d55 100644
--- a/deps/v8/src/codegen/arm64/utils-arm64.h
+++ b/deps/v8/src/codegen/arm64/utils-arm64.h
@@ -33,7 +33,6 @@ int float16classify(float16 value);
// Bit counting.
int CountLeadingZeros(uint64_t value, int width);
int CountLeadingSignBits(int64_t value, int width);
-V8_EXPORT_PRIVATE int CountTrailingZeros(uint64_t value, int width);
V8_EXPORT_PRIVATE int CountSetBits(uint64_t value, int width);
int LowestSetBitPosition(uint64_t value);
int HighestSetBitPosition(uint64_t value);
@@ -61,7 +60,7 @@ T ReverseBytes(T value, int block_bytes_log2) {
static const uint8_t permute_table[3][8] = {{6, 7, 4, 5, 2, 3, 0, 1},
{4, 5, 6, 7, 0, 1, 2, 3},
{0, 1, 2, 3, 4, 5, 6, 7}};
- T result = 0;
+ typename std::make_unsigned<T>::type result = 0;
for (int i = 0; i < 8; i++) {
result <<= 8;
result |= bytes[permute_table[block_bytes_log2 - 1][i]];
diff --git a/deps/v8/src/codegen/assembler.cc b/deps/v8/src/codegen/assembler.cc
index 498afb0320..4e354d9e54 100644
--- a/deps/v8/src/codegen/assembler.cc
+++ b/deps/v8/src/codegen/assembler.cc
@@ -92,7 +92,7 @@ class DefaultAssemblerBuffer : public AssemblerBuffer {
std::unique_ptr<AssemblerBuffer> Grow(int new_size) override {
DCHECK_LT(size(), new_size);
- return base::make_unique<DefaultAssemblerBuffer>(new_size);
+ return std::make_unique<DefaultAssemblerBuffer>(new_size);
}
private:
@@ -121,12 +121,12 @@ class ExternalAssemblerBufferImpl : public AssemblerBuffer {
std::unique_ptr<AssemblerBuffer> ExternalAssemblerBuffer(void* start,
int size) {
- return base::make_unique<ExternalAssemblerBufferImpl>(
+ return std::make_unique<ExternalAssemblerBufferImpl>(
reinterpret_cast<byte*>(start), size);
}
std::unique_ptr<AssemblerBuffer> NewAssemblerBuffer(int size) {
- return base::make_unique<DefaultAssemblerBuffer>(size);
+ return std::make_unique<DefaultAssemblerBuffer>(size);
}
// -----------------------------------------------------------------------------
diff --git a/deps/v8/src/codegen/assembler.h b/deps/v8/src/codegen/assembler.h
index 98639583d8..af70c4a48f 100644
--- a/deps/v8/src/codegen/assembler.h
+++ b/deps/v8/src/codegen/assembler.h
@@ -36,6 +36,7 @@
#define V8_CODEGEN_ASSEMBLER_H_
#include <forward_list>
+#include <memory>
#include <unordered_map>
#include "src/base/memory.h"
diff --git a/deps/v8/src/codegen/code-stub-assembler.cc b/deps/v8/src/codegen/code-stub-assembler.cc
index 7dad8cb95e..3051ce3662 100644
--- a/deps/v8/src/codegen/code-stub-assembler.cc
+++ b/deps/v8/src/codegen/code-stub-assembler.cc
@@ -7,9 +7,11 @@
#include "include/v8-internal.h"
#include "src/base/macros.h"
#include "src/codegen/code-factory.h"
+#include "src/codegen/tnode.h"
#include "src/common/globals.h"
#include "src/execution/frames-inl.h"
#include "src/execution/frames.h"
+#include "src/execution/protectors.h"
#include "src/heap/heap-inl.h" // For Page/MemoryChunk. TODO(jkummerow): Drop.
#include "src/logging/counters.h"
#include "src/objects/api-callbacks.h"
@@ -17,6 +19,7 @@
#include "src/objects/descriptor-array.h"
#include "src/objects/function-kind.h"
#include "src/objects/heap-number.h"
+#include "src/objects/js-generator.h"
#include "src/objects/oddball.h"
#include "src/objects/ordered-hash-table-inl.h"
#include "src/objects/property-cell.h"
@@ -26,10 +29,6 @@ namespace v8 {
namespace internal {
using compiler::Node;
-template <class T>
-using TNode = compiler::TNode<T>;
-template <class T>
-using SloppyTNode = compiler::SloppyTNode<T>;
CodeStubAssembler::CodeStubAssembler(compiler::CodeAssemblerState* state)
: compiler::CodeAssembler(state),
@@ -135,6 +134,148 @@ void CodeStubAssembler::Check(SloppyTNode<Word32T> condition_node,
Check(branch, message, file, line, extra_nodes);
}
+template <>
+TNode<Smi> CodeStubAssembler::IntPtrToParameter<Smi>(TNode<IntPtrT> value) {
+ return SmiTag(value);
+}
+template <>
+TNode<IntPtrT> CodeStubAssembler::IntPtrToParameter<IntPtrT>(
+ TNode<IntPtrT> value) {
+ return value;
+}
+
+void CodeStubAssembler::CollectCallableFeedback(
+ TNode<Object> maybe_target, TNode<Context> context,
+ TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot_id) {
+ Label extra_checks(this, Label::kDeferred), done(this);
+
+ // Check if we have monomorphic {target} feedback already.
+ TNode<MaybeObject> feedback =
+ LoadFeedbackVectorSlot(feedback_vector, slot_id);
+ Comment("check if monomorphic");
+ TNode<BoolT> is_monomorphic = IsWeakReferenceToObject(feedback, maybe_target);
+ GotoIf(is_monomorphic, &done);
+
+ // Check if it is a megamorphic {target}.
+ Comment("check if megamorphic");
+ TNode<BoolT> is_megamorphic = TaggedEqual(
+ feedback, HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())));
+ Branch(is_megamorphic, &done, &extra_checks);
+
+ BIND(&extra_checks);
+ {
+ Label initialize(this), mark_megamorphic(this);
+
+ Comment("check if weak reference");
+ TNode<BoolT> is_uninitialized = TaggedEqual(
+ feedback,
+ HeapConstant(FeedbackVector::UninitializedSentinel(isolate())));
+ GotoIf(is_uninitialized, &initialize);
+ CSA_ASSERT(this, IsWeakOrCleared(feedback));
+
+ // If the weak reference is cleared, we have a new chance to become
+ // monomorphic.
+ Comment("check if weak reference is cleared");
+ Branch(IsCleared(feedback), &initialize, &mark_megamorphic);
+
+ BIND(&initialize);
+ {
+ Comment("check if function in same native context");
+ GotoIf(TaggedIsSmi(maybe_target), &mark_megamorphic);
+ TNode<HeapObject> target = CAST(maybe_target);
+ // Check if the {target} is a JSFunction or JSBoundFunction
+ // in the current native context.
+ TVARIABLE(HeapObject, var_current, target);
+ Label loop(this, &var_current), done_loop(this);
+ Goto(&loop);
+ BIND(&loop);
+ {
+ Label if_boundfunction(this), if_function(this);
+ TNode<HeapObject> current = var_current.value();
+ TNode<Uint16T> current_instance_type = LoadInstanceType(current);
+ GotoIf(InstanceTypeEqual(current_instance_type, JS_BOUND_FUNCTION_TYPE),
+ &if_boundfunction);
+ Branch(InstanceTypeEqual(current_instance_type, JS_FUNCTION_TYPE),
+ &if_function, &mark_megamorphic);
+
+ BIND(&if_function);
+ {
+ // Check that the JSFunction {current} is in the current native
+ // context.
+ TNode<Context> current_context =
+ CAST(LoadObjectField(current, JSFunction::kContextOffset));
+ TNode<NativeContext> current_native_context =
+ LoadNativeContext(current_context);
+ Branch(
+ TaggedEqual(LoadNativeContext(context), current_native_context),
+ &done_loop, &mark_megamorphic);
+ }
+ BIND(&if_boundfunction);
+ {
+ // Continue with the [[BoundTargetFunction]] of {target}.
+ var_current = LoadObjectField<HeapObject>(
+ current, JSBoundFunction::kBoundTargetFunctionOffset);
+ Goto(&loop);
+ }
+ }
+ BIND(&done_loop);
+ StoreWeakReferenceInFeedbackVector(feedback_vector, slot_id, target);
+ ReportFeedbackUpdate(feedback_vector, slot_id, "Call:Initialize");
+ Goto(&done);
+ }
+
+ BIND(&mark_megamorphic);
+ {
+ // MegamorphicSentinel is an immortal immovable object so
+ // write-barrier is not needed.
+ Comment("transition to megamorphic");
+ DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kmegamorphic_symbol));
+ StoreFeedbackVectorSlot(
+ feedback_vector, slot_id,
+ HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())),
+ SKIP_WRITE_BARRIER);
+ ReportFeedbackUpdate(feedback_vector, slot_id,
+ "Call:TransitionMegamorphic");
+ Goto(&done);
+ }
+ }
+
+ BIND(&done);
+}
+
+void CodeStubAssembler::CollectCallFeedback(
+ TNode<Object> maybe_target, TNode<Context> context,
+ TNode<HeapObject> maybe_feedback_vector, TNode<UintPtrT> slot_id) {
+ Label feedback_done(this);
+ // If feedback_vector is not valid, then nothing to do.
+ GotoIf(IsUndefined(maybe_feedback_vector), &feedback_done);
+
+ // Increment the call count.
+ TNode<FeedbackVector> feedback_vector = CAST(maybe_feedback_vector);
+ IncrementCallCount(feedback_vector, slot_id);
+
+ // Collect the callable {target} feedback.
+ CollectCallableFeedback(maybe_target, context, feedback_vector, slot_id);
+ Goto(&feedback_done);
+
+ BIND(&feedback_done);
+}
+
+void CodeStubAssembler::IncrementCallCount(
+ TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot_id) {
+ Comment("increment call count");
+ TNode<Smi> call_count =
+ CAST(LoadFeedbackVectorSlot(feedback_vector, slot_id, kTaggedSize));
+ // The lowest {FeedbackNexus::CallCountField::kShift} bits of the call
+ // count are used as flags. To increment the call count by 1 we hence
+ // have to increment by 1 << {FeedbackNexus::CallCountField::kShift}.
+ TNode<Smi> new_count = SmiAdd(
+ call_count, SmiConstant(1 << FeedbackNexus::CallCountField::kShift));
+ // Count is Smi, so we don't need a write barrier.
+ StoreFeedbackVectorSlot(feedback_vector, slot_id, new_count,
+ SKIP_WRITE_BARRIER, kTaggedSize);
+}
+
void CodeStubAssembler::FastCheck(TNode<BoolT> condition) {
Label ok(this), not_ok(this, Label::kDeferred);
Branch(condition, &ok, &not_ok);
@@ -221,7 +362,7 @@ TNode<Object> CodeStubAssembler::NoContextConstant() {
}
#define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \
- compiler::TNode<std::remove_pointer<std::remove_reference<decltype( \
+ TNode<std::remove_pointer<std::remove_reference<decltype( \
std::declval<Heap>().rootAccessorName())>::type>::type> \
CodeStubAssembler::name##Constant() { \
return UncheckedCast<std::remove_pointer<std::remove_reference<decltype( \
@@ -232,7 +373,7 @@ HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR)
#undef HEAP_CONSTANT_ACCESSOR
#define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \
- compiler::TNode<std::remove_pointer<std::remove_reference<decltype( \
+ TNode<std::remove_pointer<std::remove_reference<decltype( \
std::declval<ReadOnlyRoots>().rootAccessorName())>::type>::type> \
CodeStubAssembler::name##Constant() { \
return UncheckedCast<std::remove_pointer<std::remove_reference<decltype( \
@@ -242,14 +383,12 @@ HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR)
HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR)
#undef HEAP_CONSTANT_ACCESSOR
-#define HEAP_CONSTANT_TEST(rootIndexName, rootAccessorName, name) \
- compiler::TNode<BoolT> CodeStubAssembler::Is##name( \
- SloppyTNode<Object> value) { \
- return TaggedEqual(value, name##Constant()); \
- } \
- compiler::TNode<BoolT> CodeStubAssembler::IsNot##name( \
- SloppyTNode<Object> value) { \
- return TaggedNotEqual(value, name##Constant()); \
+#define HEAP_CONSTANT_TEST(rootIndexName, rootAccessorName, name) \
+ TNode<BoolT> CodeStubAssembler::Is##name(SloppyTNode<Object> value) { \
+ return TaggedEqual(value, name##Constant()); \
+ } \
+ TNode<BoolT> CodeStubAssembler::IsNot##name(SloppyTNode<Object> value) { \
+ return TaggedNotEqual(value, name##Constant()); \
}
HEAP_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_TEST)
#undef HEAP_CONSTANT_TEST
@@ -264,6 +403,21 @@ TNode<BInt> CodeStubAssembler::BIntConstant(int value) {
#endif
}
+template <>
+TNode<Smi> CodeStubAssembler::IntPtrOrSmiConstant<Smi>(int value) {
+ return SmiConstant(value);
+}
+
+template <>
+TNode<IntPtrT> CodeStubAssembler::IntPtrOrSmiConstant<IntPtrT>(int value) {
+ return IntPtrConstant(value);
+}
+
+template <>
+TNode<RawPtrT> CodeStubAssembler::IntPtrOrSmiConstant<RawPtrT>(int value) {
+ return ReinterpretCast<RawPtrT>(IntPtrConstant(value));
+}
+
Node* CodeStubAssembler::IntPtrOrSmiConstant(int value, ParameterMode mode) {
if (mode == SMI_PARAMETERS) {
return SmiConstant(value);
@@ -273,41 +427,29 @@ Node* CodeStubAssembler::IntPtrOrSmiConstant(int value, ParameterMode mode) {
}
}
-TNode<BoolT> CodeStubAssembler::IntPtrOrSmiEqual(Node* left, Node* right,
- ParameterMode mode) {
- if (mode == SMI_PARAMETERS) {
- return SmiEqual(CAST(left), CAST(right));
- } else {
- DCHECK_EQ(INTPTR_PARAMETERS, mode);
- return IntPtrEqual(UncheckedCast<IntPtrT>(left),
- UncheckedCast<IntPtrT>(right));
+bool CodeStubAssembler::IsIntPtrOrSmiConstantZero(TNode<Smi> test) {
+ Smi smi_test;
+ if (ToSmiConstant(test, &smi_test) && smi_test.value() == 0) {
+ return true;
}
+ return false;
}
-TNode<BoolT> CodeStubAssembler::IntPtrOrSmiNotEqual(Node* left, Node* right,
- ParameterMode mode) {
- if (mode == SMI_PARAMETERS) {
- return SmiNotEqual(CAST(left), CAST(right));
- } else {
- DCHECK_EQ(INTPTR_PARAMETERS, mode);
- return WordNotEqual(UncheckedCast<IntPtrT>(left),
- UncheckedCast<IntPtrT>(right));
+bool CodeStubAssembler::IsIntPtrOrSmiConstantZero(TNode<IntPtrT> test) {
+ int32_t constant_test;
+ if (ToInt32Constant(test, &constant_test) && constant_test == 0) {
+ return true;
}
+ return false;
}
bool CodeStubAssembler::IsIntPtrOrSmiConstantZero(Node* test,
ParameterMode mode) {
- int32_t constant_test;
- Smi smi_test;
if (mode == INTPTR_PARAMETERS) {
- if (ToInt32Constant(test, &constant_test) && constant_test == 0) {
- return true;
- }
+ return IsIntPtrOrSmiConstantZero(UncheckedCast<IntPtrT>(test));
} else {
DCHECK_EQ(mode, SMI_PARAMETERS);
- if (ToSmiConstant(test, &smi_test) && smi_test.value() == 0) {
- return true;
- }
+ return IsIntPtrOrSmiConstantZero(UncheckedCast<Smi>(test));
}
return false;
}
@@ -352,6 +494,10 @@ Node* CodeStubAssembler::MatchesParameterMode(Node* value, ParameterMode mode) {
}
TNode<BoolT> CodeStubAssembler::WordIsPowerOfTwo(SloppyTNode<IntPtrT> value) {
+ intptr_t constant;
+ if (ToIntPtrConstant(value, &constant)) {
+ return BoolConstant(base::bits::IsPowerOfTwo(constant));
+ }
// value && !(value & (value - 1))
return IntPtrEqual(
Select<IntPtrT>(
@@ -578,21 +724,44 @@ TNode<Float64T> CodeStubAssembler::Float64Trunc(SloppyTNode<Float64T> x) {
TNode<BoolT> CodeStubAssembler::IsValidSmi(TNode<Smi> smi) {
if (SmiValuesAre32Bits() && kSystemPointerSize == kInt64Size) {
// Check that the Smi value is zero in the lower bits.
- TNode<IntPtrT> value = BitcastTaggedSignedToWord(smi);
+ TNode<IntPtrT> value = BitcastTaggedToWordForTagAndSmiBits(smi);
return Word32Equal(Int32Constant(0), TruncateIntPtrToInt32(value));
}
return Int32TrueConstant();
}
-Node* CodeStubAssembler::SmiShiftBitsConstant() {
- return IntPtrConstant(kSmiShiftSize + kSmiTagSize);
+TNode<BoolT> CodeStubAssembler::IsValidSmiIndex(TNode<Smi> smi) {
+ if (COMPRESS_POINTERS_BOOL) {
+ return WordEqual(
+ BitcastTaggedToWordForTagAndSmiBits(smi),
+ BitcastTaggedToWordForTagAndSmiBits(NormalizeSmiIndex(smi)));
+ }
+ return Int32TrueConstant();
+}
+
+TNode<Smi> CodeStubAssembler::NormalizeSmiIndex(TNode<Smi> smi_index) {
+ if (COMPRESS_POINTERS_BOOL) {
+ TNode<Int32T> raw =
+ TruncateWordToInt32(BitcastTaggedToWordForTagAndSmiBits(smi_index));
+ smi_index = BitcastWordToTaggedSigned(ChangeInt32ToIntPtr(raw));
+ }
+ return smi_index;
}
TNode<Smi> CodeStubAssembler::SmiFromInt32(SloppyTNode<Int32T> value) {
- TNode<IntPtrT> value_intptr = ChangeInt32ToIntPtr(value);
- TNode<Smi> smi =
- BitcastWordToTaggedSigned(WordShl(value_intptr, SmiShiftBitsConstant()));
- return smi;
+ if (COMPRESS_POINTERS_BOOL) {
+ static_assert(!COMPRESS_POINTERS_BOOL || (kSmiShiftSize + kSmiTagSize == 1),
+ "Use shifting instead of add");
+ return BitcastWordToTaggedSigned(
+ ChangeUint32ToWord(Int32Add(value, value)));
+ }
+ return SmiTag(ChangeInt32ToIntPtr(value));
+}
+
+TNode<Smi> CodeStubAssembler::SmiFromUint32(TNode<Uint32T> value) {
+ CSA_ASSERT(this, IntPtrLessThan(ChangeUint32ToWord(value),
+ IntPtrConstant(Smi::kMaxValue)));
+ return SmiFromInt32(Signed(value));
}
TNode<BoolT> CodeStubAssembler::IsValidPositiveSmi(TNode<IntPtrT> value) {
@@ -612,6 +781,9 @@ TNode<Smi> CodeStubAssembler::SmiTag(SloppyTNode<IntPtrT> value) {
if (ToInt32Constant(value, &constant_value) && Smi::IsValid(constant_value)) {
return SmiConstant(constant_value);
}
+ if (COMPRESS_POINTERS_BOOL) {
+ return SmiFromInt32(TruncateIntPtrToInt32(value));
+ }
TNode<Smi> smi =
BitcastWordToTaggedSigned(WordShl(value, SmiShiftBitsConstant()));
return smi;
@@ -622,11 +794,19 @@ TNode<IntPtrT> CodeStubAssembler::SmiUntag(SloppyTNode<Smi> value) {
if (ToIntPtrConstant(value, &constant_value)) {
return IntPtrConstant(constant_value >> (kSmiShiftSize + kSmiTagSize));
}
- return Signed(
- WordSar(BitcastTaggedSignedToWord(value), SmiShiftBitsConstant()));
+ if (COMPRESS_POINTERS_BOOL) {
+ return ChangeInt32ToIntPtr(SmiToInt32(value));
+ }
+ return Signed(WordSar(BitcastTaggedToWordForTagAndSmiBits(value),
+ SmiShiftBitsConstant()));
}
TNode<Int32T> CodeStubAssembler::SmiToInt32(SloppyTNode<Smi> value) {
+ if (COMPRESS_POINTERS_BOOL) {
+ return Signed(Word32Sar(
+ TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(value)),
+ SmiShiftBitsConstant32()));
+ }
TNode<IntPtrT> result = SmiUntag(value);
return TruncateIntPtrToInt32(result);
}
@@ -673,13 +853,13 @@ TNode<Smi> CodeStubAssembler::TrySmiAdd(TNode<Smi> lhs, TNode<Smi> rhs,
Label* if_overflow) {
if (SmiValuesAre32Bits()) {
return BitcastWordToTaggedSigned(
- TryIntPtrAdd(BitcastTaggedSignedToWord(lhs),
- BitcastTaggedSignedToWord(rhs), if_overflow));
+ TryIntPtrAdd(BitcastTaggedToWordForTagAndSmiBits(lhs),
+ BitcastTaggedToWordForTagAndSmiBits(rhs), if_overflow));
} else {
DCHECK(SmiValuesAre31Bits());
TNode<PairT<Int32T, BoolT>> pair = Int32AddWithOverflow(
- TruncateIntPtrToInt32(BitcastTaggedSignedToWord(lhs)),
- TruncateIntPtrToInt32(BitcastTaggedSignedToWord(rhs)));
+ TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(lhs)),
+ TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(rhs)));
TNode<BoolT> overflow = Projection<1>(pair);
GotoIf(overflow, if_overflow);
TNode<Int32T> result = Projection<0>(pair);
@@ -690,8 +870,9 @@ TNode<Smi> CodeStubAssembler::TrySmiAdd(TNode<Smi> lhs, TNode<Smi> rhs,
TNode<Smi> CodeStubAssembler::TrySmiSub(TNode<Smi> lhs, TNode<Smi> rhs,
Label* if_overflow) {
if (SmiValuesAre32Bits()) {
- TNode<PairT<IntPtrT, BoolT>> pair = IntPtrSubWithOverflow(
- BitcastTaggedSignedToWord(lhs), BitcastTaggedSignedToWord(rhs));
+ TNode<PairT<IntPtrT, BoolT>> pair =
+ IntPtrSubWithOverflow(BitcastTaggedToWordForTagAndSmiBits(lhs),
+ BitcastTaggedToWordForTagAndSmiBits(rhs));
TNode<BoolT> overflow = Projection<1>(pair);
GotoIf(overflow, if_overflow);
TNode<IntPtrT> result = Projection<0>(pair);
@@ -699,8 +880,8 @@ TNode<Smi> CodeStubAssembler::TrySmiSub(TNode<Smi> lhs, TNode<Smi> rhs,
} else {
DCHECK(SmiValuesAre31Bits());
TNode<PairT<Int32T, BoolT>> pair = Int32SubWithOverflow(
- TruncateIntPtrToInt32(BitcastTaggedSignedToWord(lhs)),
- TruncateIntPtrToInt32(BitcastTaggedSignedToWord(rhs)));
+ TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(lhs)),
+ TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(rhs)));
TNode<BoolT> overflow = Projection<1>(pair);
GotoIf(overflow, if_overflow);
TNode<Int32T> result = Projection<0>(pair);
@@ -878,7 +1059,7 @@ TNode<Number> CodeStubAssembler::SmiMul(TNode<Smi> a, TNode<Smi> b) {
}
BIND(&answer_zero);
{
- TNode<Word32T> or_result = Word32Or(lhs32, rhs32);
+ TNode<Int32T> or_result = Word32Or(lhs32, rhs32);
Label if_should_be_negative_zero(this), if_should_be_zero(this);
Branch(Int32LessThan(or_result, zero), &if_should_be_negative_zero,
&if_should_be_zero);
@@ -982,41 +1163,27 @@ TNode<Int32T> CodeStubAssembler::TruncateIntPtrToInt32(
return ReinterpretCast<Int32T>(value);
}
-TNode<BoolT> CodeStubAssembler::TaggedIsSmi(SloppyTNode<Object> a) {
- STATIC_ASSERT(kSmiTagMask < kMaxUInt32);
- return Word32Equal(Word32And(TruncateIntPtrToInt32(BitcastTaggedToWord(a)),
- Int32Constant(kSmiTagMask)),
- Int32Constant(0));
-}
-
TNode<BoolT> CodeStubAssembler::TaggedIsSmi(TNode<MaybeObject> a) {
STATIC_ASSERT(kSmiTagMask < kMaxUInt32);
return Word32Equal(
- Word32And(TruncateIntPtrToInt32(BitcastMaybeObjectToWord(a)),
+ Word32And(TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(a)),
Int32Constant(kSmiTagMask)),
Int32Constant(0));
}
-TNode<BoolT> CodeStubAssembler::TaggedIsNotSmi(SloppyTNode<Object> a) {
- // Although BitcastTaggedSignedToWord is generally unsafe on HeapObjects, we
- // can nonetheless use it to inspect the Smi tag. The assumption here is that
- // the GC will not exchange Smis for HeapObjects or vice-versa.
- TNode<IntPtrT> a_bitcast = BitcastTaggedSignedToWord(UncheckedCast<Smi>(a));
- STATIC_ASSERT(kSmiTagMask < kMaxUInt32);
- return Word32NotEqual(
- Word32And(TruncateIntPtrToInt32(a_bitcast), Int32Constant(kSmiTagMask)),
- Int32Constant(0));
+TNode<BoolT> CodeStubAssembler::TaggedIsNotSmi(TNode<MaybeObject> a) {
+ return Word32BinaryNot(TaggedIsSmi(a));
}
TNode<BoolT> CodeStubAssembler::TaggedIsPositiveSmi(SloppyTNode<Object> a) {
#if defined(V8_HOST_ARCH_32_BIT) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
return Word32Equal(
Word32And(
- TruncateIntPtrToInt32(BitcastTaggedToWord(a)),
- Uint32Constant(kSmiTagMask | static_cast<int32_t>(kSmiSignMask))),
+ TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(a)),
+ Uint32Constant(static_cast<uint32_t>(kSmiTagMask | kSmiSignMask))),
Int32Constant(0));
#else
- return WordEqual(WordAnd(BitcastTaggedToWord(a),
+ return WordEqual(WordAnd(BitcastTaggedToWordForTagAndSmiBits(a),
IntPtrConstant(kSmiTagMask | kSmiSignMask)),
IntPtrConstant(0));
#endif
@@ -1052,55 +1219,6 @@ TNode<Float64T> CodeStubAssembler::LoadDoubleWithHoleCheck(
INTPTR_PARAMETERS, if_hole);
}
-void CodeStubAssembler::BranchIfPrototypesHaveNoElements(
- Node* receiver_map, Label* definitely_no_elements,
- Label* possibly_elements) {
- CSA_SLOW_ASSERT(this, IsMap(receiver_map));
- VARIABLE(var_map, MachineRepresentation::kTagged, receiver_map);
- Label loop_body(this, &var_map);
- TNode<FixedArray> empty_fixed_array = EmptyFixedArrayConstant();
- TNode<NumberDictionary> empty_slow_element_dictionary =
- EmptySlowElementDictionaryConstant();
- Goto(&loop_body);
-
- BIND(&loop_body);
- {
- Node* map = var_map.value();
- TNode<HeapObject> prototype = LoadMapPrototype(map);
- GotoIf(IsNull(prototype), definitely_no_elements);
- TNode<Map> prototype_map = LoadMap(prototype);
- TNode<Uint16T> prototype_instance_type = LoadMapInstanceType(prototype_map);
-
- // Pessimistically assume elements if a Proxy, Special API Object,
- // or JSPrimitiveWrapper wrapper is found on the prototype chain. After this
- // instance type check, it's not necessary to check for interceptors or
- // access checks.
- Label if_custom(this, Label::kDeferred), if_notcustom(this);
- Branch(IsCustomElementsReceiverInstanceType(prototype_instance_type),
- &if_custom, &if_notcustom);
-
- BIND(&if_custom);
- {
- // For string JSPrimitiveWrapper wrappers we still support the checks as
- // long as they wrap the empty string.
- GotoIfNot(
- InstanceTypeEqual(prototype_instance_type, JS_PRIMITIVE_WRAPPER_TYPE),
- possibly_elements);
- Node* prototype_value = LoadJSPrimitiveWrapperValue(prototype);
- Branch(IsEmptyString(prototype_value), &if_notcustom, possibly_elements);
- }
-
- BIND(&if_notcustom);
- {
- TNode<FixedArrayBase> prototype_elements = LoadElements(CAST(prototype));
- var_map.Bind(prototype_map);
- GotoIf(TaggedEqual(prototype_elements, empty_fixed_array), &loop_body);
- Branch(TaggedEqual(prototype_elements, empty_slow_element_dictionary),
- &loop_body, possibly_elements);
- }
- }
-}
-
void CodeStubAssembler::BranchIfJSReceiver(SloppyTNode<Object> object,
Label* if_true, Label* if_false) {
GotoIf(TaggedIsSmi(object), if_false);
@@ -1118,19 +1236,6 @@ void CodeStubAssembler::GotoIfForceSlowPath(Label* if_true) {
#endif
}
-void CodeStubAssembler::GotoIfDebugExecutionModeChecksSideEffects(
- Label* if_true) {
- STATIC_ASSERT(sizeof(DebugInfo::ExecutionMode) >= sizeof(int32_t));
-
- TNode<ExternalReference> execution_mode_address = ExternalConstant(
- ExternalReference::debug_execution_mode_address(isolate()));
- TNode<Int32T> execution_mode =
- UncheckedCast<Int32T>(Load(MachineType::Int32(), execution_mode_address));
-
- GotoIf(Word32Equal(execution_mode, Int32Constant(DebugInfo::kSideEffects)),
- if_true);
-}
-
TNode<HeapObject> CodeStubAssembler::AllocateRaw(TNode<IntPtrT> size_in_bytes,
AllocationFlags flags,
TNode<RawPtrT> top_address,
@@ -1557,7 +1662,7 @@ void CodeStubAssembler::GotoIfMapHasSlowProperties(TNode<Map> map,
}
TNode<HeapObject> CodeStubAssembler::LoadFastProperties(
- SloppyTNode<JSObject> object) {
+ SloppyTNode<JSReceiver> object) {
CSA_SLOW_ASSERT(this, Word32BinaryNot(IsDictionaryMap(LoadMap(object))));
TNode<Object> properties = LoadJSReceiverPropertiesOrHash(object);
return Select<HeapObject>(
@@ -1566,7 +1671,7 @@ TNode<HeapObject> CodeStubAssembler::LoadFastProperties(
}
TNode<HeapObject> CodeStubAssembler::LoadSlowProperties(
- SloppyTNode<JSObject> object) {
+ SloppyTNode<JSReceiver> object) {
CSA_SLOW_ASSERT(this, IsDictionaryMap(LoadMap(object)));
TNode<Object> properties = LoadJSReceiverPropertiesOrHash(object);
return Select<HeapObject>(
@@ -1862,18 +1967,8 @@ TNode<Uint32T> CodeStubAssembler::LoadStringLengthAsWord32(
return LoadObjectField<Uint32T>(string, String::kLengthOffset);
}
-Node* CodeStubAssembler::PointerToSeqStringData(Node* seq_string) {
- CSA_ASSERT(this, IsString(seq_string));
- CSA_ASSERT(this,
- IsSequentialStringInstanceType(LoadInstanceType(seq_string)));
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- return IntPtrAdd(
- BitcastTaggedToWord(seq_string),
- IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-}
-
-Node* CodeStubAssembler::LoadJSPrimitiveWrapperValue(Node* object) {
- CSA_ASSERT(this, IsJSPrimitiveWrapper(object));
+TNode<Object> CodeStubAssembler::LoadJSPrimitiveWrapperValue(
+ TNode<JSPrimitiveWrapper> object) {
return LoadObjectField(object, JSPrimitiveWrapper::kValueOffset);
}
@@ -1887,15 +1982,9 @@ void CodeStubAssembler::DispatchMaybeObject(TNode<MaybeObject> maybe_object,
GotoIf(IsCleared(maybe_object), if_cleared);
- GotoIf(Word32Equal(Word32And(TruncateIntPtrToInt32(
- BitcastMaybeObjectToWord(maybe_object)),
- Int32Constant(kHeapObjectTagMask)),
- Int32Constant(kHeapObjectTag)),
- &inner_if_strong);
+ GotoIf(IsStrong(maybe_object), &inner_if_strong);
- *extracted =
- BitcastWordToTagged(WordAnd(BitcastMaybeObjectToWord(maybe_object),
- IntPtrConstant(~kWeakHeapObjectMask)));
+ *extracted = GetHeapObjectAssumeWeak(maybe_object);
Goto(if_weak);
BIND(&inner_if_smi);
@@ -1908,10 +1997,10 @@ void CodeStubAssembler::DispatchMaybeObject(TNode<MaybeObject> maybe_object,
}
TNode<BoolT> CodeStubAssembler::IsStrong(TNode<MaybeObject> value) {
- return Word32Equal(
- Word32And(TruncateIntPtrToInt32(BitcastMaybeObjectToWord(value)),
- Int32Constant(kHeapObjectTagMask)),
- Int32Constant(kHeapObjectTag));
+ return Word32Equal(Word32And(TruncateIntPtrToInt32(
+ BitcastTaggedToWordForTagAndSmiBits(value)),
+ Int32Constant(kHeapObjectTagMask)),
+ Int32Constant(kHeapObjectTag));
}
TNode<HeapObject> CodeStubAssembler::GetHeapObjectIfStrong(
@@ -1921,10 +2010,10 @@ TNode<HeapObject> CodeStubAssembler::GetHeapObjectIfStrong(
}
TNode<BoolT> CodeStubAssembler::IsWeakOrCleared(TNode<MaybeObject> value) {
- return Word32Equal(
- Word32And(TruncateIntPtrToInt32(BitcastMaybeObjectToWord(value)),
- Int32Constant(kHeapObjectTagMask)),
- Int32Constant(kWeakHeapObjectTag));
+ return Word32Equal(Word32And(TruncateIntPtrToInt32(
+ BitcastTaggedToWordForTagAndSmiBits(value)),
+ Int32Constant(kHeapObjectTagMask)),
+ Int32Constant(kWeakHeapObjectTag));
}
TNode<BoolT> CodeStubAssembler::IsCleared(TNode<MaybeObject> value) {
@@ -1932,11 +2021,6 @@ TNode<BoolT> CodeStubAssembler::IsCleared(TNode<MaybeObject> value) {
Int32Constant(kClearedWeakHeapObjectLower32));
}
-TNode<BoolT> CodeStubAssembler::IsNotCleared(TNode<MaybeObject> value) {
- return Word32NotEqual(TruncateIntPtrToInt32(BitcastMaybeObjectToWord(value)),
- Int32Constant(kClearedWeakHeapObjectLower32));
-}
-
TNode<HeapObject> CodeStubAssembler::GetHeapObjectAssumeWeak(
TNode<MaybeObject> value) {
CSA_ASSERT(this, IsWeakOrCleared(value));
@@ -1951,43 +2035,41 @@ TNode<HeapObject> CodeStubAssembler::GetHeapObjectAssumeWeak(
return GetHeapObjectAssumeWeak(value);
}
-TNode<BoolT> CodeStubAssembler::IsWeakReferenceTo(TNode<MaybeObject> object,
- TNode<Object> value) {
-#if defined(V8_HOST_ARCH_32_BIT) || defined(V8_COMPRESS_POINTERS)
- STATIC_ASSERT(kTaggedSize == kInt32Size);
- return Word32Equal(
- Word32And(TruncateWordToInt32(BitcastMaybeObjectToWord(object)),
- Uint32Constant(
- static_cast<uint32_t>(~kWeakHeapObjectMask & kMaxUInt32))),
- TruncateWordToInt32(BitcastTaggedToWord(value)));
-#else
- return WordEqual(WordAnd(BitcastMaybeObjectToWord(object),
- IntPtrConstant(~kWeakHeapObjectMask)),
- BitcastTaggedToWord(value));
-
-#endif
-}
-
-TNode<BoolT> CodeStubAssembler::IsStrongReferenceTo(TNode<MaybeObject> object,
- TNode<Object> value) {
- return TaggedEqual(BitcastWordToTagged(BitcastMaybeObjectToWord(object)),
- value);
-}
-
-TNode<BoolT> CodeStubAssembler::IsNotWeakReferenceTo(TNode<MaybeObject> object,
- TNode<Object> value) {
-#if defined(V8_HOST_ARCH_32_BIT) || defined(V8_COMPRESS_POINTERS)
- return Word32NotEqual(
- Word32And(TruncateWordToInt32(BitcastMaybeObjectToWord(object)),
- Uint32Constant(
- static_cast<uint32_t>(~kWeakHeapObjectMask & kMaxUInt32))),
- TruncateWordToInt32(BitcastTaggedToWord(value)));
-#else
- return WordNotEqual(WordAnd(BitcastMaybeObjectToWord(object),
- IntPtrConstant(~kWeakHeapObjectMask)),
- BitcastTaggedToWord(value));
-
-#endif
+// This version generates
+// (maybe_object & ~mask) == value
+// It works for non-Smi |maybe_object| and for both Smi and HeapObject values
+// but requires a big constant for ~mask.
+TNode<BoolT> CodeStubAssembler::IsWeakReferenceToObject(
+ TNode<MaybeObject> maybe_object, TNode<Object> value) {
+ CSA_ASSERT(this, TaggedIsNotSmi(maybe_object));
+ if (COMPRESS_POINTERS_BOOL) {
+ return Word32Equal(
+ Word32And(TruncateWordToInt32(BitcastMaybeObjectToWord(maybe_object)),
+ Uint32Constant(~static_cast<uint32_t>(kWeakHeapObjectMask))),
+ TruncateWordToInt32(BitcastTaggedToWord(value)));
+ } else {
+ return WordEqual(WordAnd(BitcastMaybeObjectToWord(maybe_object),
+ IntPtrConstant(~kWeakHeapObjectMask)),
+ BitcastTaggedToWord(value));
+ }
+}
+
+// This version generates
+// maybe_object == (heap_object | mask)
+// It works for any |maybe_object| values and generates a better code because it
+// uses a small constant for mask.
+TNode<BoolT> CodeStubAssembler::IsWeakReferenceTo(
+ TNode<MaybeObject> maybe_object, TNode<HeapObject> heap_object) {
+ if (COMPRESS_POINTERS_BOOL) {
+ return Word32Equal(
+ TruncateWordToInt32(BitcastMaybeObjectToWord(maybe_object)),
+ Word32Or(TruncateWordToInt32(BitcastTaggedToWord(heap_object)),
+ Int32Constant(kWeakHeapObjectMask)));
+ } else {
+ return WordEqual(BitcastMaybeObjectToWord(maybe_object),
+ WordOr(BitcastTaggedToWord(heap_object),
+ IntPtrConstant(kWeakHeapObjectMask)));
+ }
}
TNode<MaybeObject> CodeStubAssembler::MakeWeak(TNode<HeapObject> value) {
@@ -2123,16 +2205,27 @@ TNode<IntPtrT> CodeStubAssembler::LoadPropertyArrayLength(
return Signed(DecodeWord<PropertyArray::LengthField>(value));
}
-TNode<RawPtrT> CodeStubAssembler::LoadJSTypedArrayBackingStore(
+TNode<RawPtrT> CodeStubAssembler::LoadJSTypedArrayDataPtr(
TNode<JSTypedArray> typed_array) {
- // Backing store = external_pointer + base_pointer.
- Node* external_pointer =
- LoadObjectField(typed_array, JSTypedArray::kExternalPointerOffset,
- MachineType::Pointer());
- TNode<Object> base_pointer =
- LoadObjectField(typed_array, JSTypedArray::kBasePointerOffset);
- return UncheckedCast<RawPtrT>(
- IntPtrAdd(external_pointer, BitcastTaggedToWord(base_pointer)));
+ // Data pointer = external_pointer + static_cast<Tagged_t>(base_pointer).
+ TNode<RawPtrT> external_pointer = LoadObjectField<RawPtrT>(
+ typed_array, JSTypedArray::kExternalPointerOffset);
+
+ TNode<IntPtrT> base_pointer;
+ if (COMPRESS_POINTERS_BOOL) {
+ TNode<Int32T> compressed_base =
+ LoadObjectField<Int32T>(typed_array, JSTypedArray::kBasePointerOffset);
+ // Zero-extend TaggedT to WordT according to current compression scheme
+ // so that the addition with |external_pointer| (which already contains
+ // compensated offset value) below will decompress the tagged value.
+ // See JSTypedArray::ExternalPointerCompensationForOnHeapArray() for
+ // details.
+ base_pointer = Signed(ChangeUint32ToWord(compressed_base));
+ } else {
+ base_pointer =
+ LoadObjectField<IntPtrT>(typed_array, JSTypedArray::kBasePointerOffset);
+ }
+ return RawPtrAdd(external_pointer, base_pointer);
}
TNode<BigInt> CodeStubAssembler::LoadFixedBigInt64ArrayElementAsTagged(
@@ -2267,8 +2360,7 @@ TNode<BigInt> CodeStubAssembler::BigIntFromInt64(TNode<IntPtrT> value) {
return var_result.value();
}
-compiler::TNode<BigInt>
-CodeStubAssembler::LoadFixedBigUint64ArrayElementAsTagged(
+TNode<BigInt> CodeStubAssembler::LoadFixedBigUint64ArrayElementAsTagged(
SloppyTNode<RawPtrT> data_pointer, SloppyTNode<IntPtrT> offset) {
Label if_zero(this), done(this);
if (Is64()) {
@@ -2416,59 +2508,30 @@ TNode<Numeric> CodeStubAssembler::LoadFixedTypedArrayElementAsTagged(
return var_result.value();
}
-void CodeStubAssembler::StoreJSTypedArrayElementFromTagged(
- TNode<Context> context, TNode<JSTypedArray> typed_array,
- TNode<Smi> index_node, TNode<Object> value, ElementsKind elements_kind) {
- TNode<RawPtrT> data_pointer = LoadJSTypedArrayBackingStore(typed_array);
- switch (elements_kind) {
- case UINT8_ELEMENTS:
- case UINT8_CLAMPED_ELEMENTS:
- case INT8_ELEMENTS:
- case UINT16_ELEMENTS:
- case INT16_ELEMENTS:
- StoreElement(data_pointer, elements_kind, index_node,
- SmiToInt32(CAST(value)), SMI_PARAMETERS);
- break;
- case UINT32_ELEMENTS:
- case INT32_ELEMENTS:
- StoreElement(data_pointer, elements_kind, index_node,
- TruncateTaggedToWord32(context, value), SMI_PARAMETERS);
- break;
- case FLOAT32_ELEMENTS:
- StoreElement(data_pointer, elements_kind, index_node,
- TruncateFloat64ToFloat32(LoadHeapNumberValue(CAST(value))),
- SMI_PARAMETERS);
- break;
- case FLOAT64_ELEMENTS:
- StoreElement(data_pointer, elements_kind, index_node,
- LoadHeapNumberValue(CAST(value)), SMI_PARAMETERS);
- break;
- case BIGUINT64_ELEMENTS:
- case BIGINT64_ELEMENTS:
- StoreElement(data_pointer, elements_kind, index_node,
- UncheckedCast<BigInt>(value), SMI_PARAMETERS);
- break;
- default:
- UNREACHABLE();
- }
-}
-
+template <typename TIndex>
TNode<MaybeObject> CodeStubAssembler::LoadFeedbackVectorSlot(
- Node* object, Node* slot_index_node, int additional_offset,
- ParameterMode parameter_mode) {
- CSA_SLOW_ASSERT(this, IsFeedbackVector(object));
- CSA_SLOW_ASSERT(this, MatchesParameterMode(slot_index_node, parameter_mode));
+ TNode<FeedbackVector> feedback_vector, TNode<TIndex> slot,
+ int additional_offset) {
int32_t header_size =
FeedbackVector::kFeedbackSlotsOffset + additional_offset - kHeapObjectTag;
- TNode<IntPtrT> offset = ElementOffsetFromIndex(
- slot_index_node, HOLEY_ELEMENTS, parameter_mode, header_size);
+ TNode<IntPtrT> offset =
+ ElementOffsetFromIndex(slot, HOLEY_ELEMENTS, header_size);
CSA_SLOW_ASSERT(
- this, IsOffsetInBounds(offset, LoadFeedbackVectorLength(CAST(object)),
+ this, IsOffsetInBounds(offset, LoadFeedbackVectorLength(feedback_vector),
FeedbackVector::kHeaderSize));
- return UncheckedCast<MaybeObject>(
- Load(MachineType::AnyTagged(), object, offset));
+ return Load<MaybeObject>(feedback_vector, offset);
}
+template TNode<MaybeObject> CodeStubAssembler::LoadFeedbackVectorSlot(
+ TNode<FeedbackVector> feedback_vector, TNode<Smi> slot,
+ int additional_offset);
+template TNode<MaybeObject> CodeStubAssembler::LoadFeedbackVectorSlot(
+ TNode<FeedbackVector> feedback_vector, TNode<IntPtrT> slot,
+ int additional_offset);
+template TNode<MaybeObject> CodeStubAssembler::LoadFeedbackVectorSlot(
+ TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot,
+ int additional_offset);
+
template <typename Array>
TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ArrayElement(
TNode<Array> object, int array_header_size, Node* index_node,
@@ -2617,6 +2680,13 @@ TNode<Float64T> CodeStubAssembler::LoadDoubleWithHoleCheck(
return UncheckedCast<Float64T>(Load(machine_type, base, offset));
}
+TNode<BoolT> CodeStubAssembler::LoadContextHasExtensionField(
+ SloppyTNode<Context> context) {
+ TNode<IntPtrT> value =
+ LoadAndUntagObjectField(context, Context::kLengthOffset);
+ return IsSetWord<Context::HasExtensionField>(value);
+}
+
TNode<Object> CodeStubAssembler::LoadContextElement(
SloppyTNode<Context> context, int slot_index) {
int offset = Context::SlotOffset(slot_index);
@@ -2626,15 +2696,15 @@ TNode<Object> CodeStubAssembler::LoadContextElement(
TNode<Object> CodeStubAssembler::LoadContextElement(
SloppyTNode<Context> context, SloppyTNode<IntPtrT> slot_index) {
- TNode<IntPtrT> offset = ElementOffsetFromIndex(
- slot_index, PACKED_ELEMENTS, INTPTR_PARAMETERS, Context::SlotOffset(0));
+ TNode<IntPtrT> offset = ElementOffsetFromIndex(slot_index, PACKED_ELEMENTS,
+ Context::SlotOffset(0));
return UncheckedCast<Object>(Load(MachineType::AnyTagged(), context, offset));
}
TNode<Object> CodeStubAssembler::LoadContextElement(TNode<Context> context,
TNode<Smi> slot_index) {
- TNode<IntPtrT> offset = ElementOffsetFromIndex(
- slot_index, PACKED_ELEMENTS, SMI_PARAMETERS, Context::SlotOffset(0));
+ TNode<IntPtrT> offset = ElementOffsetFromIndex(slot_index, PACKED_ELEMENTS,
+ Context::SlotOffset(0));
return UncheckedCast<Object>(Load(MachineType::AnyTagged(), context, offset));
}
@@ -2949,33 +3019,30 @@ void CodeStubAssembler::StoreFixedDoubleArrayElement(
StoreNoWriteBarrier(rep, object, offset, value_silenced);
}
-void CodeStubAssembler::StoreFeedbackVectorSlot(Node* object,
- Node* slot_index_node,
- Node* value,
- WriteBarrierMode barrier_mode,
- int additional_offset,
- ParameterMode parameter_mode) {
- CSA_SLOW_ASSERT(this, IsFeedbackVector(object));
- CSA_SLOW_ASSERT(this, MatchesParameterMode(slot_index_node, parameter_mode));
+void CodeStubAssembler::StoreFeedbackVectorSlot(
+ TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot,
+ TNode<AnyTaggedT> value, WriteBarrierMode barrier_mode,
+ int additional_offset) {
DCHECK(IsAligned(additional_offset, kTaggedSize));
DCHECK(barrier_mode == SKIP_WRITE_BARRIER ||
barrier_mode == UNSAFE_SKIP_WRITE_BARRIER ||
barrier_mode == UPDATE_WRITE_BARRIER);
int header_size =
FeedbackVector::kFeedbackSlotsOffset + additional_offset - kHeapObjectTag;
- TNode<IntPtrT> offset = ElementOffsetFromIndex(
- slot_index_node, HOLEY_ELEMENTS, parameter_mode, header_size);
- // Check that slot_index_node <= object.length.
+ TNode<IntPtrT> offset =
+ ElementOffsetFromIndex(Signed(slot), HOLEY_ELEMENTS, header_size);
+ // Check that slot <= feedback_vector.length.
CSA_ASSERT(this,
- IsOffsetInBounds(offset, LoadFeedbackVectorLength(CAST(object)),
+ IsOffsetInBounds(offset, LoadFeedbackVectorLength(feedback_vector),
FeedbackVector::kHeaderSize));
if (barrier_mode == SKIP_WRITE_BARRIER) {
- StoreNoWriteBarrier(MachineRepresentation::kTagged, object, offset, value);
+ StoreNoWriteBarrier(MachineRepresentation::kTagged, feedback_vector, offset,
+ value);
} else if (barrier_mode == UNSAFE_SKIP_WRITE_BARRIER) {
- UnsafeStoreNoWriteBarrier(MachineRepresentation::kTagged, object, offset,
- value);
+ UnsafeStoreNoWriteBarrier(MachineRepresentation::kTagged, feedback_vector,
+ offset, value);
} else {
- Store(object, offset, value);
+ Store(feedback_vector, offset, value);
}
}
@@ -3045,33 +3112,29 @@ TNode<Smi> CodeStubAssembler::BuildAppendJSArray(ElementsKind kind,
Label success(this);
TVARIABLE(Smi, var_tagged_length);
ParameterMode mode = OptimalParameterMode();
- VARIABLE(var_length, OptimalParameterRepresentation(),
- TaggedToParameter(LoadFastJSArrayLength(array), mode));
- VARIABLE(var_elements, MachineRepresentation::kTagged, LoadElements(array));
+ TVARIABLE(BInt, var_length, SmiToBInt(LoadFastJSArrayLength(array)));
+ TVARIABLE(FixedArrayBase, var_elements, LoadElements(array));
// Resize the capacity of the fixed array if it doesn't fit.
TNode<IntPtrT> first = arg_index->value();
- Node* growth = IntPtrToParameter(
- IntPtrSub(UncheckedCast<IntPtrT>(args->GetLength(INTPTR_PARAMETERS)),
- first),
- mode);
+ TNode<BInt> growth = IntPtrToBInt(IntPtrSub(args->GetLength(), first));
PossiblyGrowElementsCapacity(mode, kind, array, var_length.value(),
&var_elements, growth, &pre_bailout);
// Push each argument onto the end of the array now that there is enough
// capacity.
CodeStubAssembler::VariableList push_vars({&var_length}, zone());
- Node* elements = var_elements.value();
+ TNode<FixedArrayBase> elements = var_elements.value();
args->ForEach(
push_vars,
- [this, kind, mode, elements, &var_length, &pre_bailout](Node* arg) {
+ [&](TNode<Object> arg) {
TryStoreArrayElement(kind, mode, &pre_bailout, elements,
var_length.value(), arg);
- Increment(&var_length, 1, mode);
+ Increment(&var_length);
},
- first, nullptr);
+ first);
{
- TNode<Smi> length = ParameterToTagged(var_length.value(), mode);
+ TNode<Smi> length = BIntToSmi(var_length.value());
var_tagged_length = length;
StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length);
Goto(&success);
@@ -3111,8 +3174,7 @@ void CodeStubAssembler::BuildAppendJSArray(ElementsKind kind, Node* array,
CSA_SLOW_ASSERT(this, IsJSArray(array));
Comment("BuildAppendJSArray: ", ElementsKindToString(kind));
ParameterMode mode = OptimalParameterMode();
- VARIABLE(var_length, OptimalParameterRepresentation(),
- TaggedToParameter(LoadFastJSArrayLength(array), mode));
+ TVARIABLE(BInt, var_length, SmiToBInt(LoadFastJSArrayLength(array)));
VARIABLE(var_elements, MachineRepresentation::kTagged, LoadElements(array));
// Resize the capacity of the fixed array if it doesn't fit.
@@ -3124,9 +3186,9 @@ void CodeStubAssembler::BuildAppendJSArray(ElementsKind kind, Node* array,
// capacity.
TryStoreArrayElement(kind, mode, bailout, var_elements.value(),
var_length.value(), value);
- Increment(&var_length, 1, mode);
+ Increment(&var_length);
- TNode<Smi> length = ParameterToTagged(var_length.value(), mode);
+ TNode<Smi> length = BIntToSmi(var_length.value());
StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length);
}
@@ -3138,7 +3200,7 @@ Node* CodeStubAssembler::AllocateCellWithValue(Node* value,
return result;
}
-Node* CodeStubAssembler::LoadCellValue(Node* cell) {
+TNode<Object> CodeStubAssembler::LoadCellValue(Node* cell) {
CSA_SLOW_ASSERT(this, HasInstanceType(cell, CELL_TYPE));
return LoadObjectField(cell, Cell::kValueOffset);
}
@@ -3278,7 +3340,8 @@ TNode<ByteArray> CodeStubAssembler::AllocateByteArray(TNode<UintPtrT> length,
TNode<IntPtrT> raw_size =
GetArrayAllocationSize(Signed(length), UINT8_ELEMENTS, INTPTR_PARAMETERS,
ByteArray::kHeaderSize + kObjectAlignmentMask);
- TNode<WordT> size = WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask));
+ TNode<IntPtrT> size =
+ WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask));
Branch(IntPtrLessThanOrEqual(size, IntPtrConstant(kMaxRegularHeapObjectSize)),
&if_sizeissmall, &if_notsizeissmall);
@@ -3352,7 +3415,8 @@ TNode<String> CodeStubAssembler::AllocateSeqOneByteString(
TNode<IntPtrT> raw_size = GetArrayAllocationSize(
Signed(ChangeUint32ToWord(length)), UINT8_ELEMENTS, INTPTR_PARAMETERS,
SeqOneByteString::kHeaderSize + kObjectAlignmentMask);
- TNode<WordT> size = WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask));
+ TNode<IntPtrT> size =
+ WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask));
Branch(IntPtrLessThanOrEqual(size, IntPtrConstant(kMaxRegularHeapObjectSize)),
&if_sizeissmall, &if_notsizeissmall);
@@ -3423,7 +3487,8 @@ TNode<String> CodeStubAssembler::AllocateSeqTwoByteString(
TNode<IntPtrT> raw_size = GetArrayAllocationSize(
Signed(ChangeUint32ToWord(length)), UINT16_ELEMENTS, INTPTR_PARAMETERS,
SeqOneByteString::kHeaderSize + kObjectAlignmentMask);
- TNode<WordT> size = WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask));
+ TNode<IntPtrT> size =
+ WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask));
Branch(IntPtrLessThanOrEqual(size, IntPtrConstant(kMaxRegularHeapObjectSize)),
&if_sizeissmall, &if_notsizeissmall);
@@ -3496,35 +3561,6 @@ TNode<String> CodeStubAssembler::AllocateSlicedTwoByteString(
offset);
}
-TNode<String> CodeStubAssembler::AllocateConsString(TNode<Uint32T> length,
- TNode<String> left,
- TNode<String> right) {
- // Added string can be a cons string.
- Comment("Allocating ConsString");
- TNode<Int32T> left_instance_type = LoadInstanceType(left);
- TNode<Int32T> right_instance_type = LoadInstanceType(right);
-
- // Determine the resulting ConsString map to use depending on whether
- // any of {left} or {right} has two byte encoding.
- STATIC_ASSERT(kOneByteStringTag != 0);
- STATIC_ASSERT(kTwoByteStringTag == 0);
- TNode<Int32T> combined_instance_type =
- Word32And(left_instance_type, right_instance_type);
- TNode<Map> result_map = CAST(Select<Object>(
- IsSetWord32(combined_instance_type, kStringEncodingMask),
- [=] { return ConsOneByteStringMapConstant(); },
- [=] { return ConsStringMapConstant(); }));
- TNode<HeapObject> result = AllocateInNewSpace(ConsString::kSize);
- StoreMapNoWriteBarrier(result, result_map);
- StoreObjectFieldNoWriteBarrier(result, ConsString::kLengthOffset, length,
- MachineRepresentation::kWord32);
- StoreObjectFieldNoWriteBarrier(result, ConsString::kHashFieldOffset,
- Int32Constant(String::kEmptyHashField),
- MachineRepresentation::kWord32);
- StoreObjectFieldNoWriteBarrier(result, ConsString::kFirstOffset, left);
- StoreObjectFieldNoWriteBarrier(result, ConsString::kSecondOffset, right);
- return CAST(result);
-}
TNode<NameDictionary> CodeStubAssembler::AllocateNameDictionary(
int at_least_space_for) {
@@ -3762,106 +3798,26 @@ template V8_EXPORT_PRIVATE TNode<SmallOrderedHashSet>
CodeStubAssembler::AllocateSmallOrderedHashTable<SmallOrderedHashSet>(
TNode<IntPtrT> capacity);
-template <typename CollectionType>
-void CodeStubAssembler::FindOrderedHashTableEntry(
- Node* table, Node* hash,
- const std::function<void(TNode<Object>, Label*, Label*)>& key_compare,
- Variable* entry_start_position, Label* entry_found, Label* not_found) {
- // Get the index of the bucket.
- TNode<IntPtrT> const number_of_buckets =
- SmiUntag(CAST(UnsafeLoadFixedArrayElement(
- CAST(table), CollectionType::NumberOfBucketsIndex())));
- TNode<WordT> const bucket =
- WordAnd(hash, IntPtrSub(number_of_buckets, IntPtrConstant(1)));
- TNode<IntPtrT> const first_entry = SmiUntag(CAST(UnsafeLoadFixedArrayElement(
- CAST(table), bucket,
- CollectionType::HashTableStartIndex() * kTaggedSize)));
-
- // Walk the bucket chain.
- TNode<IntPtrT> entry_start;
- Label if_key_found(this);
- {
- TVARIABLE(IntPtrT, var_entry, first_entry);
- Label loop(this, {&var_entry, entry_start_position}),
- continue_next_entry(this);
- Goto(&loop);
- BIND(&loop);
-
- // If the entry index is the not-found sentinel, we are done.
- GotoIf(IntPtrEqual(var_entry.value(),
- IntPtrConstant(CollectionType::kNotFound)),
- not_found);
-
- // Make sure the entry index is within range.
- CSA_ASSERT(
- this,
- UintPtrLessThan(
- var_entry.value(),
- SmiUntag(SmiAdd(
- CAST(UnsafeLoadFixedArrayElement(
- CAST(table), CollectionType::NumberOfElementsIndex())),
- CAST(UnsafeLoadFixedArrayElement(
- CAST(table),
- CollectionType::NumberOfDeletedElementsIndex()))))));
-
- // Compute the index of the entry relative to kHashTableStartIndex.
- entry_start =
- IntPtrAdd(IntPtrMul(var_entry.value(),
- IntPtrConstant(CollectionType::kEntrySize)),
- number_of_buckets);
-
- // Load the key from the entry.
- TNode<Object> const candidate_key = UnsafeLoadFixedArrayElement(
- CAST(table), entry_start,
- CollectionType::HashTableStartIndex() * kTaggedSize);
-
- key_compare(candidate_key, &if_key_found, &continue_next_entry);
-
- BIND(&continue_next_entry);
- // Load the index of the next entry in the bucket chain.
- var_entry = SmiUntag(CAST(UnsafeLoadFixedArrayElement(
- CAST(table), entry_start,
- (CollectionType::HashTableStartIndex() + CollectionType::kChainOffset) *
- kTaggedSize)));
-
- Goto(&loop);
- }
-
- BIND(&if_key_found);
- entry_start_position->Bind(entry_start);
- Goto(entry_found);
-}
-
-template void CodeStubAssembler::FindOrderedHashTableEntry<OrderedHashMap>(
- Node* table, Node* hash,
- const std::function<void(TNode<Object>, Label*, Label*)>& key_compare,
- Variable* entry_start_position, Label* entry_found, Label* not_found);
-template void CodeStubAssembler::FindOrderedHashTableEntry<OrderedHashSet>(
- Node* table, Node* hash,
- const std::function<void(TNode<Object>, Label*, Label*)>& key_compare,
- Variable* entry_start_position, Label* entry_found, Label* not_found);
-
Node* CodeStubAssembler::AllocateStruct(Node* map, AllocationFlags flags) {
Comment("AllocateStruct");
CSA_ASSERT(this, IsMap(map));
TNode<IntPtrT> size = TimesTaggedSize(LoadMapInstanceSizeInWords(map));
TNode<HeapObject> object = Allocate(size, flags);
StoreMapNoWriteBarrier(object, map);
- InitializeStructBody(object, map, size, Struct::kHeaderSize);
+ InitializeStructBody(object, size, Struct::kHeaderSize);
return object;
}
-void CodeStubAssembler::InitializeStructBody(Node* object, Node* map,
- Node* size, int start_offset) {
- CSA_SLOW_ASSERT(this, IsMap(map));
+void CodeStubAssembler::InitializeStructBody(TNode<HeapObject> object,
+ TNode<IntPtrT> size,
+ int start_offset) {
Comment("InitializeStructBody");
TNode<Oddball> filler = UndefinedConstant();
// Calculate the untagged field addresses.
- object = BitcastTaggedToWord(object);
- TNode<WordT> start_address =
- IntPtrAdd(object, IntPtrConstant(start_offset - kHeapObjectTag));
- TNode<WordT> end_address =
- IntPtrSub(IntPtrAdd(object, size), IntPtrConstant(kHeapObjectTag));
+ TNode<IntPtrT> start_address =
+ IntPtrAdd(BitcastTaggedToWord(object),
+ IntPtrConstant(start_offset - kHeapObjectTag));
+ TNode<IntPtrT> end_address = IntPtrAdd(start_address, size);
StoreFieldsNoWriteBarrier(start_address, end_address, filler);
}
@@ -3883,8 +3839,9 @@ TNode<JSObject> CodeStubAssembler::AllocateJSObjectFromMap(
}
void CodeStubAssembler::InitializeJSObjectFromMap(
- Node* object, Node* map, Node* instance_size, Node* properties,
- Node* elements, SlackTrackingMode slack_tracking_mode) {
+ SloppyTNode<HeapObject> object, SloppyTNode<Map> map,
+ SloppyTNode<IntPtrT> instance_size, Node* properties, Node* elements,
+ SlackTrackingMode slack_tracking_mode) {
CSA_SLOW_ASSERT(this, IsMap(map));
// This helper assumes that the object is in new-space, as guarded by the
// check in AllocatedJSObjectFromMap.
@@ -3915,7 +3872,8 @@ void CodeStubAssembler::InitializeJSObjectFromMap(
}
void CodeStubAssembler::InitializeJSObjectBodyNoSlackTracking(
- Node* object, Node* map, Node* instance_size, int start_offset) {
+ SloppyTNode<HeapObject> object, SloppyTNode<Map> map,
+ SloppyTNode<IntPtrT> instance_size, int start_offset) {
STATIC_ASSERT(Map::kNoSlackTracking == 0);
CSA_ASSERT(
this, IsClearWord32<Map::ConstructionCounterBits>(LoadMapBitField3(map)));
@@ -3924,8 +3882,8 @@ void CodeStubAssembler::InitializeJSObjectBodyNoSlackTracking(
}
void CodeStubAssembler::InitializeJSObjectBodyWithSlackTracking(
- Node* object, Node* map, Node* instance_size) {
- CSA_SLOW_ASSERT(this, IsMap(map));
+ SloppyTNode<HeapObject> object, SloppyTNode<Map> map,
+ SloppyTNode<IntPtrT> instance_size) {
Comment("InitializeJSObjectBodyNoSlackTracking");
// Perform in-object slack tracking if requested.
@@ -3953,9 +3911,9 @@ void CodeStubAssembler::InitializeJSObjectBodyWithSlackTracking(
// The object still has in-object slack therefore the |unsed_or_unused|
// field contain the "used" value.
- TNode<UintPtrT> used_size = TimesTaggedSize(ChangeUint32ToWord(
+ TNode<IntPtrT> used_size = Signed(TimesTaggedSize(ChangeUint32ToWord(
LoadObjectField(map, Map::kUsedOrUnusedInstanceSizeInWordsOffset,
- MachineType::Uint8())));
+ MachineType::Uint8()))));
Comment("iInitialize filler fields");
InitializeFieldsWithRoot(object, used_size, instance_size,
@@ -3984,19 +3942,19 @@ void CodeStubAssembler::InitializeJSObjectBodyWithSlackTracking(
BIND(&end);
}
-void CodeStubAssembler::StoreFieldsNoWriteBarrier(Node* start_address,
- Node* end_address,
- Node* value) {
+void CodeStubAssembler::StoreFieldsNoWriteBarrier(TNode<IntPtrT> start_address,
+ TNode<IntPtrT> end_address,
+ TNode<Object> value) {
Comment("StoreFieldsNoWriteBarrier");
CSA_ASSERT(this, WordIsAligned(start_address, kTaggedSize));
CSA_ASSERT(this, WordIsAligned(end_address, kTaggedSize));
- BuildFastLoop(
+ BuildFastLoop<IntPtrT>(
start_address, end_address,
- [this, value](Node* current) {
+ [=](TNode<IntPtrT> current) {
UnsafeStoreNoWriteBarrier(MachineRepresentation::kTagged, current,
value);
},
- kTaggedSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+ kTaggedSize, IndexAdvanceMode::kPost);
}
TNode<BoolT> CodeStubAssembler::IsValidFastJSArrayCapacity(
@@ -4008,12 +3966,12 @@ TNode<BoolT> CodeStubAssembler::IsValidFastJSArrayCapacity(
TNode<JSArray> CodeStubAssembler::AllocateJSArray(
TNode<Map> array_map, TNode<FixedArrayBase> elements, TNode<Smi> length,
- Node* allocation_site, int array_header_size) {
+ TNode<AllocationSite> allocation_site, int array_header_size) {
Comment("begin allocation of JSArray passing in elements");
CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
int base_size = array_header_size;
- if (allocation_site != nullptr) {
+ if (!allocation_site.is_null()) {
base_size += AllocationMemento::kSize;
}
@@ -4027,8 +3985,9 @@ TNode<JSArray> CodeStubAssembler::AllocateJSArray(
std::pair<TNode<JSArray>, TNode<FixedArrayBase>>
CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
ElementsKind kind, TNode<Map> array_map, TNode<Smi> length,
- Node* allocation_site, Node* capacity, ParameterMode capacity_mode,
- AllocationFlags allocation_flags, int array_header_size) {
+ TNode<AllocationSite> allocation_site, Node* capacity,
+ ParameterMode capacity_mode, AllocationFlags allocation_flags,
+ int array_header_size) {
Comment("begin allocation of JSArray with elements");
CHECK_EQ(allocation_flags & ~kAllowLargeObjectAllocation, 0);
CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
@@ -4065,7 +4024,9 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
BIND(&nonempty);
{
int base_size = array_header_size;
- if (allocation_site != nullptr) base_size += AllocationMemento::kSize;
+ if (!allocation_site.is_null()) {
+ base_size += AllocationMemento::kSize;
+ }
const int elements_offset = base_size;
@@ -4138,8 +4099,8 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
}
TNode<JSArray> CodeStubAssembler::AllocateUninitializedJSArray(
- TNode<Map> array_map, TNode<Smi> length, Node* allocation_site,
- TNode<IntPtrT> size_in_bytes) {
+ TNode<Map> array_map, TNode<Smi> length,
+ TNode<AllocationSite> allocation_site, TNode<IntPtrT> size_in_bytes) {
CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
// Allocate space for the JSArray and the elements FixedArray in one go.
@@ -4150,7 +4111,7 @@ TNode<JSArray> CodeStubAssembler::AllocateUninitializedJSArray(
StoreObjectFieldRoot(array, JSArray::kPropertiesOrHashOffset,
RootIndex::kEmptyFixedArray);
- if (allocation_site != nullptr) {
+ if (!allocation_site.is_null()) {
InitializeAllocationMemento(array, IntPtrConstant(JSArray::kSize),
allocation_site);
}
@@ -4160,7 +4121,7 @@ TNode<JSArray> CodeStubAssembler::AllocateUninitializedJSArray(
TNode<JSArray> CodeStubAssembler::AllocateJSArray(
ElementsKind kind, TNode<Map> array_map, Node* capacity, TNode<Smi> length,
- Node* allocation_site, ParameterMode capacity_mode,
+ TNode<AllocationSite> allocation_site, ParameterMode capacity_mode,
AllocationFlags allocation_flags) {
CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
CSA_SLOW_ASSERT(this, MatchesParameterMode(capacity, capacity_mode));
@@ -4189,10 +4150,9 @@ TNode<JSArray> CodeStubAssembler::AllocateJSArray(
return array;
}
-Node* CodeStubAssembler::ExtractFastJSArray(Node* context, Node* array,
- Node* begin, Node* count,
- ParameterMode mode, Node* capacity,
- Node* allocation_site) {
+Node* CodeStubAssembler::ExtractFastJSArray(
+ TNode<Context> context, TNode<JSArray> array, Node* begin, Node* count,
+ ParameterMode mode, Node* capacity, TNode<AllocationSite> allocation_site) {
TNode<Map> original_array_map = LoadMap(array);
TNode<Int32T> elements_kind = LoadMapElementsKind(original_array_map);
@@ -4209,18 +4169,16 @@ Node* CodeStubAssembler::ExtractFastJSArray(Node* context, Node* array,
return result;
}
-Node* CodeStubAssembler::CloneFastJSArray(Node* context, Node* array,
- ParameterMode mode,
- Node* allocation_site,
- HoleConversionMode convert_holes) {
+TNode<JSArray> CodeStubAssembler::CloneFastJSArray(
+ TNode<Context> context, TNode<JSArray> array, ParameterMode mode,
+ TNode<AllocationSite> allocation_site, HoleConversionMode convert_holes) {
// TODO(dhai): we should be able to assert IsFastJSArray(array) here, but this
// function is also used to copy boilerplates even when the no-elements
// protector is invalid. This function should be renamed to reflect its uses.
- CSA_ASSERT(this, IsJSArray(array));
TNode<Number> length = LoadJSArrayLength(array);
- Node* new_elements = nullptr;
- VARIABLE(var_new_elements, MachineRepresentation::kTagged);
+ TNode<FixedArrayBase> new_elements;
+ TVARIABLE(FixedArrayBase, var_new_elements);
TVARIABLE(Int32T, var_elements_kind, LoadMapElementsKind(LoadMap(array)));
Label allocate_jsarray(this), holey_extract(this),
@@ -4240,7 +4198,7 @@ Node* CodeStubAssembler::CloneFastJSArray(Node* context, Node* array,
TaggedToParameter(CAST(length), mode), nullptr,
ExtractFixedArrayFlag::kAllFixedArraysDontCopyCOW, mode,
nullptr, var_elements_kind.value());
- var_new_elements.Bind(new_elements);
+ var_new_elements = new_elements;
Goto(&allocate_jsarray);
if (need_conversion) {
@@ -4257,7 +4215,7 @@ Node* CodeStubAssembler::CloneFastJSArray(Node* context, Node* array,
LoadElements(array), IntPtrOrSmiConstant(0, mode),
TaggedToParameter(CAST(length), mode), nullptr,
ExtractFixedArrayFlag::kAllFixedArrays, mode, &var_holes_converted);
- var_new_elements.Bind(new_elements);
+ var_new_elements = new_elements;
// If the array type didn't change, use the original elements kind.
GotoIfNot(var_holes_converted.value(), &allocate_jsarray);
// Otherwise use PACKED_ELEMENTS for the target's elements kind.
@@ -4283,8 +4241,8 @@ Node* CodeStubAssembler::CloneFastJSArray(Node* context, Node* array,
TNode<Map> array_map =
LoadJSArrayElementsMap(var_elements_kind.value(), native_context);
- TNode<JSArray> result = AllocateJSArray(
- array_map, CAST(var_new_elements.value()), CAST(length), allocation_site);
+ TNode<JSArray> result = AllocateJSArray(array_map, var_new_elements.value(),
+ CAST(length), allocation_site);
return result;
}
@@ -4555,14 +4513,14 @@ TNode<FixedArrayBase> CodeStubAssembler::ExtractFixedDoubleArrayFillingHoles(
const int first_element_offset = FixedArray::kHeaderSize - kHeapObjectTag;
TNode<IntPtrT> first_from_element_offset =
ElementOffsetFromIndex(first, kind, mode, 0);
- TNode<WordT> limit_offset = IntPtrAdd(first_from_element_offset,
- IntPtrConstant(first_element_offset));
+ TNode<IntPtrT> limit_offset = IntPtrAdd(first_from_element_offset,
+ IntPtrConstant(first_element_offset));
TVARIABLE(IntPtrT, var_from_offset,
ElementOffsetFromIndex(IntPtrOrSmiAdd(first, count, mode), kind,
mode, first_element_offset));
Label decrement(this, {&var_from_offset}), done(this);
- TNode<WordT> to_array_adjusted =
+ TNode<IntPtrT> to_array_adjusted =
IntPtrSub(BitcastTaggedToWord(to_elements), first_from_element_offset);
Branch(WordEqual(var_from_offset.value(), limit_offset), &done, &decrement);
@@ -4908,12 +4866,10 @@ void CodeStubAssembler::MoveElements(ElementsKind kind,
TNode<IntPtrT> elements_intptr = BitcastTaggedToWord(elements);
TNode<IntPtrT> target_data_ptr =
IntPtrAdd(elements_intptr,
- ElementOffsetFromIndex(dst_index, kind, INTPTR_PARAMETERS,
- fa_base_data_offset));
+ ElementOffsetFromIndex(dst_index, kind, fa_base_data_offset));
TNode<IntPtrT> source_data_ptr =
IntPtrAdd(elements_intptr,
- ElementOffsetFromIndex(src_index, kind, INTPTR_PARAMETERS,
- fa_base_data_offset));
+ ElementOffsetFromIndex(src_index, kind, fa_base_data_offset));
TNode<ExternalReference> memmove =
ExternalConstant(ExternalReference::libc_memmove_function());
CallCFunction(memmove, MachineType::Pointer(),
@@ -4997,10 +4953,10 @@ void CodeStubAssembler::CopyElements(ElementsKind kind,
IntPtrMul(length, IntPtrConstant(ElementsKindToByteSize(kind)));
static const int32_t fa_base_data_offset =
FixedArrayBase::kHeaderSize - kHeapObjectTag;
- TNode<IntPtrT> src_offset_start = ElementOffsetFromIndex(
- src_index, kind, INTPTR_PARAMETERS, fa_base_data_offset);
- TNode<IntPtrT> dst_offset_start = ElementOffsetFromIndex(
- dst_index, kind, INTPTR_PARAMETERS, fa_base_data_offset);
+ TNode<IntPtrT> src_offset_start =
+ ElementOffsetFromIndex(src_index, kind, fa_base_data_offset);
+ TNode<IntPtrT> dst_offset_start =
+ ElementOffsetFromIndex(dst_index, kind, fa_base_data_offset);
TNode<IntPtrT> src_elements_intptr = BitcastTaggedToWord(src_elements);
TNode<IntPtrT> source_data_ptr =
IntPtrAdd(src_elements_intptr, src_offset_start);
@@ -5283,65 +5239,6 @@ void CodeStubAssembler::CopyPropertyArrayValues(Node* from_array,
Comment("] CopyPropertyArrayValues");
}
-void CodeStubAssembler::CopyStringCharacters(Node* from_string, Node* to_string,
- TNode<IntPtrT> from_index,
- TNode<IntPtrT> to_index,
- TNode<IntPtrT> character_count,
- String::Encoding from_encoding,
- String::Encoding to_encoding) {
- // Cannot assert IsString(from_string) and IsString(to_string) here because
- // CSA::SubString can pass in faked sequential strings when handling external
- // subject strings.
- bool from_one_byte = from_encoding == String::ONE_BYTE_ENCODING;
- bool to_one_byte = to_encoding == String::ONE_BYTE_ENCODING;
- DCHECK_IMPLIES(to_one_byte, from_one_byte);
- Comment("CopyStringCharacters ",
- from_one_byte ? "ONE_BYTE_ENCODING" : "TWO_BYTE_ENCODING", " -> ",
- to_one_byte ? "ONE_BYTE_ENCODING" : "TWO_BYTE_ENCODING");
-
- ElementsKind from_kind = from_one_byte ? UINT8_ELEMENTS : UINT16_ELEMENTS;
- ElementsKind to_kind = to_one_byte ? UINT8_ELEMENTS : UINT16_ELEMENTS;
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- int header_size = SeqOneByteString::kHeaderSize - kHeapObjectTag;
- TNode<IntPtrT> from_offset = ElementOffsetFromIndex(
- from_index, from_kind, INTPTR_PARAMETERS, header_size);
- TNode<IntPtrT> to_offset =
- ElementOffsetFromIndex(to_index, to_kind, INTPTR_PARAMETERS, header_size);
- TNode<IntPtrT> byte_count =
- ElementOffsetFromIndex(character_count, from_kind, INTPTR_PARAMETERS);
- TNode<WordT> limit_offset = IntPtrAdd(from_offset, byte_count);
-
- // Prepare the fast loop
- MachineType type =
- from_one_byte ? MachineType::Uint8() : MachineType::Uint16();
- MachineRepresentation rep = to_one_byte ? MachineRepresentation::kWord8
- : MachineRepresentation::kWord16;
- int from_increment = 1 << ElementsKindToShiftSize(from_kind);
- int to_increment = 1 << ElementsKindToShiftSize(to_kind);
-
- VARIABLE(current_to_offset, MachineType::PointerRepresentation(), to_offset);
- VariableList vars({&current_to_offset}, zone());
- int to_index_constant = 0, from_index_constant = 0;
- bool index_same = (from_encoding == to_encoding) &&
- (from_index == to_index ||
- (ToInt32Constant(from_index, &from_index_constant) &&
- ToInt32Constant(to_index, &to_index_constant) &&
- from_index_constant == to_index_constant));
- BuildFastLoop(
- vars, from_offset, limit_offset,
- [this, from_string, to_string, &current_to_offset, to_increment, type,
- rep, index_same](Node* offset) {
- Node* value = Load(type, from_string, offset);
- StoreNoWriteBarrier(rep, to_string,
- index_same ? offset : current_to_offset.value(),
- value);
- if (!index_same) {
- Increment(&current_to_offset, to_increment);
- }
- },
- from_increment, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
-}
-
Node* CodeStubAssembler::LoadElementAndPrepareForStore(Node* array,
Node* offset,
ElementsKind from_kind,
@@ -5381,9 +5278,9 @@ Node* CodeStubAssembler::CalculateNewElementsCapacity(Node* old_capacity,
return IntPtrOrSmiAdd(new_capacity, padding, mode);
}
-Node* CodeStubAssembler::TryGrowElementsCapacity(Node* object, Node* elements,
- ElementsKind kind, Node* key,
- Label* bailout) {
+TNode<FixedArrayBase> CodeStubAssembler::TryGrowElementsCapacity(
+ Node* object, Node* elements, ElementsKind kind, Node* key,
+ Label* bailout) {
CSA_SLOW_ASSERT(this, TaggedIsNotSmi(object));
CSA_SLOW_ASSERT(this, IsFixedArrayWithKindOrEmpty(elements, kind));
CSA_SLOW_ASSERT(this, TaggedIsSmi(key));
@@ -5395,11 +5292,9 @@ Node* CodeStubAssembler::TryGrowElementsCapacity(Node* object, Node* elements,
TaggedToParameter(capacity, mode), mode, bailout);
}
-Node* CodeStubAssembler::TryGrowElementsCapacity(Node* object, Node* elements,
- ElementsKind kind, Node* key,
- Node* capacity,
- ParameterMode mode,
- Label* bailout) {
+TNode<FixedArrayBase> CodeStubAssembler::TryGrowElementsCapacity(
+ Node* object, Node* elements, ElementsKind kind, Node* key, Node* capacity,
+ ParameterMode mode, Label* bailout) {
Comment("TryGrowElementsCapacity");
CSA_SLOW_ASSERT(this, TaggedIsNotSmi(object));
CSA_SLOW_ASSERT(this, IsFixedArrayWithKindOrEmpty(elements, kind));
@@ -5418,7 +5313,7 @@ Node* CodeStubAssembler::TryGrowElementsCapacity(Node* object, Node* elements,
new_capacity, mode, bailout);
}
-Node* CodeStubAssembler::GrowElementsCapacity(
+TNode<FixedArrayBase> CodeStubAssembler::GrowElementsCapacity(
Node* object, Node* elements, ElementsKind from_kind, ElementsKind to_kind,
Node* capacity, Node* new_capacity, ParameterMode mode, Label* bailout) {
Comment("[ GrowElementsCapacity");
@@ -5471,45 +5366,22 @@ void CodeStubAssembler::InitializeAllocationMemento(Node* base,
Comment("]");
}
-Node* CodeStubAssembler::TryTaggedToFloat64(Node* value,
- Label* if_valueisnotnumber) {
- Label out(this);
- VARIABLE(var_result, MachineRepresentation::kFloat64);
-
- // Check if the {value} is a Smi or a HeapObject.
- Label if_valueissmi(this), if_valueisnotsmi(this);
- Branch(TaggedIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
-
- BIND(&if_valueissmi);
- {
- // Convert the Smi {value}.
- var_result.Bind(SmiToFloat64(value));
- Goto(&out);
- }
-
- BIND(&if_valueisnotsmi);
- {
- // Check if {value} is a HeapNumber.
- Label if_valueisheapnumber(this);
- Branch(IsHeapNumber(value), &if_valueisheapnumber, if_valueisnotnumber);
-
- BIND(&if_valueisheapnumber);
- {
- // Load the floating point value.
- var_result.Bind(LoadHeapNumberValue(value));
- Goto(&out);
- }
- }
- BIND(&out);
- return var_result.value();
+TNode<Float64T> CodeStubAssembler::TryTaggedToFloat64(
+ TNode<Object> value, Label* if_valueisnotnumber) {
+ return Select<Float64T>(
+ TaggedIsSmi(value), [&]() { return SmiToFloat64(CAST(value)); },
+ [&]() {
+ GotoIfNot(IsHeapNumber(CAST(value)), if_valueisnotnumber);
+ return LoadHeapNumberValue(CAST(value));
+ });
}
-Node* CodeStubAssembler::TruncateTaggedToFloat64(Node* context, Node* value) {
+TNode<Float64T> CodeStubAssembler::TruncateTaggedToFloat64(
+ SloppyTNode<Context> context, SloppyTNode<Object> value) {
// We might need to loop once due to ToNumber conversion.
- VARIABLE(var_value, MachineRepresentation::kTagged);
- VARIABLE(var_result, MachineRepresentation::kFloat64);
+ TVARIABLE(Object, var_value, value);
+ TVARIABLE(Float64T, var_result);
Label loop(this, &var_value), done_loop(this, &var_result);
- var_value.Bind(value);
Goto(&loop);
BIND(&loop);
{
@@ -5520,14 +5392,13 @@ Node* CodeStubAssembler::TruncateTaggedToFloat64(Node* context, Node* value) {
// Convert {value} to Float64 if it is a number and convert it to a number
// otherwise.
- Node* const result = TryTaggedToFloat64(value, &if_valueisnotnumber);
- var_result.Bind(result);
+ var_result = TryTaggedToFloat64(value, &if_valueisnotnumber);
Goto(&done_loop);
BIND(&if_valueisnotnumber);
{
// Convert the {value} to a Number first.
- var_value.Bind(CallBuiltin(Builtins::kNonNumberToNumber, context, value));
+ var_value = CallBuiltin(Builtins::kNonNumberToNumber, context, value);
Goto(&loop);
}
}
@@ -5535,8 +5406,9 @@ Node* CodeStubAssembler::TruncateTaggedToFloat64(Node* context, Node* value) {
return var_result.value();
}
-Node* CodeStubAssembler::TruncateTaggedToWord32(Node* context, Node* value) {
- VARIABLE(var_result, MachineRepresentation::kWord32);
+TNode<Word32T> CodeStubAssembler::TruncateTaggedToWord32(
+ SloppyTNode<Context> context, SloppyTNode<Object> value) {
+ TVARIABLE(Word32T, var_result);
Label done(this);
TaggedToWord32OrBigIntImpl<Object::Conversion::kToNumber>(context, value,
&done, &var_result);
@@ -5546,38 +5418,33 @@ Node* CodeStubAssembler::TruncateTaggedToWord32(Node* context, Node* value) {
// Truncate {value} to word32 and jump to {if_number} if it is a Number,
// or find that it is a BigInt and jump to {if_bigint}.
-void CodeStubAssembler::TaggedToWord32OrBigInt(Node* context, Node* value,
- Label* if_number,
- Variable* var_word32,
- Label* if_bigint,
- Variable* var_bigint) {
+void CodeStubAssembler::TaggedToWord32OrBigInt(
+ TNode<Context> context, TNode<Object> value, Label* if_number,
+ TVariable<Word32T>* var_word32, Label* if_bigint,
+ TVariable<Object>* var_maybe_bigint) {
TaggedToWord32OrBigIntImpl<Object::Conversion::kToNumeric>(
- context, value, if_number, var_word32, if_bigint, var_bigint);
+ context, value, if_number, var_word32, if_bigint, var_maybe_bigint);
}
// Truncate {value} to word32 and jump to {if_number} if it is a Number,
// or find that it is a BigInt and jump to {if_bigint}. In either case,
// store the type feedback in {var_feedback}.
void CodeStubAssembler::TaggedToWord32OrBigIntWithFeedback(
- Node* context, Node* value, Label* if_number, Variable* var_word32,
- Label* if_bigint, Variable* var_bigint, Variable* var_feedback) {
+ TNode<Context> context, TNode<Object> value, Label* if_number,
+ TVariable<Word32T>* var_word32, Label* if_bigint,
+ TVariable<Object>* var_maybe_bigint, TVariable<Smi>* var_feedback) {
TaggedToWord32OrBigIntImpl<Object::Conversion::kToNumeric>(
- context, value, if_number, var_word32, if_bigint, var_bigint,
+ context, value, if_number, var_word32, if_bigint, var_maybe_bigint,
var_feedback);
}
template <Object::Conversion conversion>
void CodeStubAssembler::TaggedToWord32OrBigIntImpl(
- Node* context, Node* value, Label* if_number, Variable* var_word32,
- Label* if_bigint, Variable* var_bigint, Variable* var_feedback) {
- DCHECK(var_word32->rep() == MachineRepresentation::kWord32);
- DCHECK(var_bigint == nullptr ||
- var_bigint->rep() == MachineRepresentation::kTagged);
- DCHECK(var_feedback == nullptr ||
- var_feedback->rep() == MachineRepresentation::kTaggedSigned);
-
+ TNode<Context> context, TNode<Object> value, Label* if_number,
+ TVariable<Word32T>* var_word32, Label* if_bigint,
+ TVariable<Object>* var_maybe_bigint, TVariable<Smi>* var_feedback) {
// We might need to loop after conversion.
- VARIABLE(var_value, MachineRepresentation::kTagged, value);
+ TVARIABLE(Object, var_value, value);
OverwriteFeedback(var_feedback, BinaryOperationFeedback::kNone);
Variable* loop_vars[] = {&var_value, var_feedback};
int num_vars =
@@ -5592,12 +5459,13 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl(
GotoIf(TaggedIsNotSmi(value), &not_smi);
// {value} is a Smi.
- var_word32->Bind(SmiToInt32(value));
+ *var_word32 = SmiToInt32(CAST(value));
CombineFeedback(var_feedback, BinaryOperationFeedback::kSignedSmall);
Goto(if_number);
BIND(&not_smi);
- TNode<Map> map = LoadMap(value);
+ TNode<HeapObject> value_heap_object = CAST(value);
+ TNode<Map> map = LoadMap(value_heap_object);
GotoIf(IsHeapNumberMap(map), &is_heap_number);
TNode<Uint16T> instance_type = LoadMapInstanceType(map);
if (conversion == Object::Conversion::kToNumeric) {
@@ -5610,7 +5478,7 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl(
// We do not require an Or with earlier feedback here because once we
// convert the value to a Numeric, we cannot reach this path. We can
// only reach this path on the first pass when the feedback is kNone.
- CSA_ASSERT(this, SmiEqual(CAST(var_feedback->value()),
+ CSA_ASSERT(this, SmiEqual(var_feedback->value(),
SmiConstant(BinaryOperationFeedback::kNone)));
}
GotoIf(InstanceTypeEqual(instance_type, ODDBALL_TYPE), &is_oddball);
@@ -5618,25 +5486,25 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl(
auto builtin = conversion == Object::Conversion::kToNumeric
? Builtins::kNonNumberToNumeric
: Builtins::kNonNumberToNumber;
- var_value.Bind(CallBuiltin(builtin, context, value));
+ var_value = CallBuiltin(builtin, context, value);
OverwriteFeedback(var_feedback, BinaryOperationFeedback::kAny);
Goto(&loop);
BIND(&is_oddball);
- var_value.Bind(LoadObjectField(value, Oddball::kToNumberOffset));
+ var_value = LoadObjectField(value_heap_object, Oddball::kToNumberOffset);
OverwriteFeedback(var_feedback,
BinaryOperationFeedback::kNumberOrOddball);
Goto(&loop);
}
BIND(&is_heap_number);
- var_word32->Bind(TruncateHeapNumberValueToWord32(CAST(value)));
+ *var_word32 = TruncateHeapNumberValueToWord32(CAST(value));
CombineFeedback(var_feedback, BinaryOperationFeedback::kNumber);
Goto(if_number);
if (conversion == Object::Conversion::kToNumeric) {
BIND(&is_bigint);
- var_bigint->Bind(value);
+ *var_maybe_bigint = value;
CombineFeedback(var_feedback, BinaryOperationFeedback::kBigInt);
Goto(if_bigint);
}
@@ -5650,14 +5518,14 @@ TNode<Int32T> CodeStubAssembler::TruncateHeapNumberValueToWord32(
}
void CodeStubAssembler::TryHeapNumberToSmi(TNode<HeapNumber> number,
- TVariable<Smi>& var_result_smi,
+ TVariable<Smi>* var_result_smi,
Label* if_smi) {
TNode<Float64T> value = LoadHeapNumberValue(number);
TryFloat64ToSmi(value, var_result_smi, if_smi);
}
void CodeStubAssembler::TryFloat64ToSmi(TNode<Float64T> value,
- TVariable<Smi>& var_result_smi,
+ TVariable<Smi>* var_result_smi,
Label* if_smi) {
TNode<Int32T> value32 = RoundFloat64ToInt32(value);
TNode<Float64T> value64 = ChangeInt32ToFloat64(value32);
@@ -5674,13 +5542,13 @@ void CodeStubAssembler::TryFloat64ToSmi(TNode<Float64T> value,
BIND(&if_int32);
{
if (SmiValuesAre32Bits()) {
- var_result_smi = SmiTag(ChangeInt32ToIntPtr(value32));
+ *var_result_smi = SmiTag(ChangeInt32ToIntPtr(value32));
} else {
DCHECK(SmiValuesAre31Bits());
TNode<PairT<Int32T, BoolT>> pair = Int32AddWithOverflow(value32, value32);
TNode<BoolT> overflow = Projection<1>(pair);
GotoIf(overflow, &if_heap_number);
- var_result_smi =
+ *var_result_smi =
BitcastWordToTaggedSigned(ChangeInt32ToIntPtr(Projection<0>(pair)));
}
Goto(if_smi);
@@ -5693,7 +5561,7 @@ TNode<Number> CodeStubAssembler::ChangeFloat64ToTagged(
Label if_smi(this), done(this);
TVARIABLE(Smi, var_smi_result);
TVARIABLE(Number, var_result);
- TryFloat64ToSmi(value, var_smi_result, &if_smi);
+ TryFloat64ToSmi(value, &var_smi_result, &if_smi);
var_result = AllocateHeapNumberWithValue(value);
Goto(&done);
@@ -6144,42 +6012,42 @@ TNode<BoolT> CodeStubAssembler::IsUndetectableMap(SloppyTNode<Map> map) {
}
TNode<BoolT> CodeStubAssembler::IsNoElementsProtectorCellInvalid() {
- TNode<Smi> invalid = SmiConstant(Isolate::kProtectorInvalid);
+ TNode<Smi> invalid = SmiConstant(Protectors::kProtectorInvalid);
TNode<PropertyCell> cell = NoElementsProtectorConstant();
TNode<Object> cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
return TaggedEqual(cell_value, invalid);
}
TNode<BoolT> CodeStubAssembler::IsArrayIteratorProtectorCellInvalid() {
- TNode<Smi> invalid = SmiConstant(Isolate::kProtectorInvalid);
+ TNode<Smi> invalid = SmiConstant(Protectors::kProtectorInvalid);
TNode<PropertyCell> cell = ArrayIteratorProtectorConstant();
TNode<Object> cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
return TaggedEqual(cell_value, invalid);
}
TNode<BoolT> CodeStubAssembler::IsPromiseResolveProtectorCellInvalid() {
- TNode<Smi> invalid = SmiConstant(Isolate::kProtectorInvalid);
- TNode<Cell> cell = PromiseResolveProtectorConstant();
- TNode<Object> cell_value = LoadObjectField(cell, Cell::kValueOffset);
+ TNode<Smi> invalid = SmiConstant(Protectors::kProtectorInvalid);
+ TNode<PropertyCell> cell = PromiseResolveProtectorConstant();
+ TNode<Object> cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
return TaggedEqual(cell_value, invalid);
}
TNode<BoolT> CodeStubAssembler::IsPromiseThenProtectorCellInvalid() {
- TNode<Smi> invalid = SmiConstant(Isolate::kProtectorInvalid);
+ TNode<Smi> invalid = SmiConstant(Protectors::kProtectorInvalid);
TNode<PropertyCell> cell = PromiseThenProtectorConstant();
TNode<Object> cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
return TaggedEqual(cell_value, invalid);
}
TNode<BoolT> CodeStubAssembler::IsArraySpeciesProtectorCellInvalid() {
- TNode<Smi> invalid = SmiConstant(Isolate::kProtectorInvalid);
+ TNode<Smi> invalid = SmiConstant(Protectors::kProtectorInvalid);
TNode<PropertyCell> cell = ArraySpeciesProtectorConstant();
TNode<Object> cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
return TaggedEqual(cell_value, invalid);
}
TNode<BoolT> CodeStubAssembler::IsTypedArraySpeciesProtectorCellInvalid() {
- TNode<Smi> invalid = SmiConstant(Isolate::kProtectorInvalid);
+ TNode<Smi> invalid = SmiConstant(Protectors::kProtectorInvalid);
TNode<PropertyCell> cell = TypedArraySpeciesProtectorConstant();
TNode<Object> cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
return TaggedEqual(cell_value, invalid);
@@ -6190,12 +6058,12 @@ TNode<BoolT> CodeStubAssembler::IsRegExpSpeciesProtectorCellInvalid(
TNode<PropertyCell> cell = CAST(LoadContextElement(
native_context, Context::REGEXP_SPECIES_PROTECTOR_INDEX));
TNode<Object> cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
- TNode<Smi> invalid = SmiConstant(Isolate::kProtectorInvalid);
+ TNode<Smi> invalid = SmiConstant(Protectors::kProtectorInvalid);
return TaggedEqual(cell_value, invalid);
}
TNode<BoolT> CodeStubAssembler::IsPromiseSpeciesProtectorCellInvalid() {
- TNode<Smi> invalid = SmiConstant(Isolate::kProtectorInvalid);
+ TNode<Smi> invalid = SmiConstant(Protectors::kProtectorInvalid);
TNode<PropertyCell> cell = PromiseSpeciesProtectorConstant();
TNode<Object> cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
return TaggedEqual(cell_value, invalid);
@@ -6394,6 +6262,10 @@ TNode<BoolT> CodeStubAssembler::IsJSGlobalProxy(
return IsJSGlobalProxyMap(LoadMap(object));
}
+TNode<BoolT> CodeStubAssembler::IsJSGeneratorMap(TNode<Map> map) {
+ return InstanceTypeEqual(LoadMapInstanceType(map), JS_GENERATOR_OBJECT_TYPE);
+}
+
TNode<BoolT> CodeStubAssembler::IsJSObjectInstanceType(
SloppyTNode<Int32T> instance_type) {
STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
@@ -6428,6 +6300,11 @@ TNode<BoolT> CodeStubAssembler::IsJSStringIterator(
return HasInstanceType(object, JS_STRING_ITERATOR_TYPE);
}
+TNode<BoolT> CodeStubAssembler::IsJSRegExpStringIterator(
+ SloppyTNode<HeapObject> object) {
+ return HasInstanceType(object, JS_REG_EXP_STRING_ITERATOR_TYPE);
+}
+
TNode<BoolT> CodeStubAssembler::IsMap(SloppyTNode<HeapObject> map) {
return IsMetaMap(LoadMap(map));
}
@@ -6656,7 +6533,7 @@ TNode<BoolT> CodeStubAssembler::IsBigInt(SloppyTNode<HeapObject> object) {
TNode<BoolT> CodeStubAssembler::IsPrimitiveInstanceType(
SloppyTNode<Int32T> instance_type) {
return Int32LessThanOrEqual(instance_type,
- Int32Constant(LAST_PRIMITIVE_TYPE));
+ Int32Constant(LAST_PRIMITIVE_HEAP_OBJECT_TYPE));
}
TNode<BoolT> CodeStubAssembler::IsPrivateSymbol(
@@ -6716,8 +6593,7 @@ TNode<BoolT> CodeStubAssembler::IsNumberDictionary(
return HasInstanceType(object, NUMBER_DICTIONARY_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsJSGeneratorObject(
- SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsJSGeneratorObject(TNode<HeapObject> object) {
return HasInstanceType(object, JS_GENERATOR_OBJECT_TYPE);
}
@@ -6762,7 +6638,7 @@ TNode<BoolT> CodeStubAssembler::IsJSDataView(TNode<HeapObject> object) {
}
TNode<BoolT> CodeStubAssembler::IsJSRegExp(SloppyTNode<HeapObject> object) {
- return HasInstanceType(object, JS_REGEXP_TYPE);
+ return HasInstanceType(object, JS_REG_EXP_TYPE);
}
TNode<BoolT> CodeStubAssembler::IsNumber(SloppyTNode<Object> object) {
@@ -7011,189 +6887,6 @@ TNode<String> CodeStubAssembler::StringFromSingleCharCode(TNode<Int32T> code) {
return CAST(var_result.value());
}
-// A wrapper around CopyStringCharacters which determines the correct string
-// encoding, allocates a corresponding sequential string, and then copies the
-// given character range using CopyStringCharacters.
-// |from_string| must be a sequential string.
-// 0 <= |from_index| <= |from_index| + |character_count| < from_string.length.
-TNode<String> CodeStubAssembler::AllocAndCopyStringCharacters(
- Node* from, Node* from_instance_type, TNode<IntPtrT> from_index,
- TNode<IntPtrT> character_count) {
- Label end(this), one_byte_sequential(this), two_byte_sequential(this);
- TVARIABLE(String, var_result);
-
- Branch(IsOneByteStringInstanceType(from_instance_type), &one_byte_sequential,
- &two_byte_sequential);
-
- // The subject string is a sequential one-byte string.
- BIND(&one_byte_sequential);
- {
- TNode<String> result = AllocateSeqOneByteString(
- Unsigned(TruncateIntPtrToInt32(character_count)));
- CopyStringCharacters(from, result, from_index, IntPtrConstant(0),
- character_count, String::ONE_BYTE_ENCODING,
- String::ONE_BYTE_ENCODING);
- var_result = result;
- Goto(&end);
- }
-
- // The subject string is a sequential two-byte string.
- BIND(&two_byte_sequential);
- {
- TNode<String> result = AllocateSeqTwoByteString(
- Unsigned(TruncateIntPtrToInt32(character_count)));
- CopyStringCharacters(from, result, from_index, IntPtrConstant(0),
- character_count, String::TWO_BYTE_ENCODING,
- String::TWO_BYTE_ENCODING);
- var_result = result;
- Goto(&end);
- }
-
- BIND(&end);
- return var_result.value();
-}
-
-TNode<String> CodeStubAssembler::SubString(TNode<String> string,
- TNode<IntPtrT> from,
- TNode<IntPtrT> to) {
- TVARIABLE(String, var_result);
- ToDirectStringAssembler to_direct(state(), string);
- Label end(this), runtime(this);
-
- TNode<IntPtrT> const substr_length = IntPtrSub(to, from);
- TNode<IntPtrT> const string_length = LoadStringLengthAsWord(string);
-
- // Begin dispatching based on substring length.
-
- Label original_string_or_invalid_length(this);
- GotoIf(UintPtrGreaterThanOrEqual(substr_length, string_length),
- &original_string_or_invalid_length);
-
- // A real substring (substr_length < string_length).
- Label empty(this);
- GotoIf(IntPtrEqual(substr_length, IntPtrConstant(0)), &empty);
-
- Label single_char(this);
- GotoIf(IntPtrEqual(substr_length, IntPtrConstant(1)), &single_char);
-
- // Deal with different string types: update the index if necessary
- // and extract the underlying string.
-
- TNode<String> direct_string = to_direct.TryToDirect(&runtime);
- TNode<IntPtrT> offset = IntPtrAdd(from, to_direct.offset());
- TNode<Int32T> const instance_type = to_direct.instance_type();
-
- // The subject string can only be external or sequential string of either
- // encoding at this point.
- Label external_string(this);
- {
- if (FLAG_string_slices) {
- Label next(this);
-
- // Short slice. Copy instead of slicing.
- GotoIf(IntPtrLessThan(substr_length,
- IntPtrConstant(SlicedString::kMinLength)),
- &next);
-
- // Allocate new sliced string.
-
- Counters* counters = isolate()->counters();
- IncrementCounter(counters->sub_string_native(), 1);
-
- Label one_byte_slice(this), two_byte_slice(this);
- Branch(IsOneByteStringInstanceType(to_direct.instance_type()),
- &one_byte_slice, &two_byte_slice);
-
- BIND(&one_byte_slice);
- {
- var_result = AllocateSlicedOneByteString(
- Unsigned(TruncateIntPtrToInt32(substr_length)), direct_string,
- SmiTag(offset));
- Goto(&end);
- }
-
- BIND(&two_byte_slice);
- {
- var_result = AllocateSlicedTwoByteString(
- Unsigned(TruncateIntPtrToInt32(substr_length)), direct_string,
- SmiTag(offset));
- Goto(&end);
- }
-
- BIND(&next);
- }
-
- // The subject string can only be external or sequential string of either
- // encoding at this point.
- GotoIf(to_direct.is_external(), &external_string);
-
- var_result = AllocAndCopyStringCharacters(direct_string, instance_type,
- offset, substr_length);
-
- Counters* counters = isolate()->counters();
- IncrementCounter(counters->sub_string_native(), 1);
-
- Goto(&end);
- }
-
- // Handle external string.
- BIND(&external_string);
- {
- TNode<RawPtrT> const fake_sequential_string =
- to_direct.PointerToString(&runtime);
-
- var_result = AllocAndCopyStringCharacters(
- fake_sequential_string, instance_type, offset, substr_length);
-
- Counters* counters = isolate()->counters();
- IncrementCounter(counters->sub_string_native(), 1);
-
- Goto(&end);
- }
-
- BIND(&empty);
- {
- var_result = EmptyStringConstant();
- Goto(&end);
- }
-
- // Substrings of length 1 are generated through CharCodeAt and FromCharCode.
- BIND(&single_char);
- {
- TNode<Int32T> char_code = StringCharCodeAt(string, from);
- var_result = StringFromSingleCharCode(char_code);
- Goto(&end);
- }
-
- BIND(&original_string_or_invalid_length);
- {
- CSA_ASSERT(this, IntPtrEqual(substr_length, string_length));
-
- // Equal length - check if {from, to} == {0, str.length}.
- GotoIf(UintPtrGreaterThan(from, IntPtrConstant(0)), &runtime);
-
- // Return the original string (substr_length == string_length).
-
- Counters* counters = isolate()->counters();
- IncrementCounter(counters->sub_string_native(), 1);
-
- var_result = string;
- Goto(&end);
- }
-
- // Fall back to a runtime call.
- BIND(&runtime);
- {
- var_result =
- CAST(CallRuntime(Runtime::kStringSubstring, NoContextConstant(), string,
- SmiTag(from), SmiTag(to)));
- Goto(&end);
- }
-
- BIND(&end);
- return var_result.value();
-}
-
ToDirectStringAssembler::ToDirectStringAssembler(
compiler::CodeAssemblerState* state, TNode<String> string, Flags flags)
: CodeStubAssembler(state),
@@ -7204,8 +6897,7 @@ ToDirectStringAssembler::ToDirectStringAssembler(
flags_(flags) {}
TNode<String> ToDirectStringAssembler::TryToDirect(Label* if_bailout) {
- VariableList vars({&var_string_, &var_offset_, &var_instance_type_}, zone());
- Label dispatch(this, vars);
+ Label dispatch(this, {&var_string_, &var_offset_, &var_instance_type_});
Label if_iscons(this);
Label if_isexternal(this);
Label if_issliced(this);
@@ -7333,232 +7025,6 @@ TNode<RawPtrT> ToDirectStringAssembler::TryToSequential(
return var_result.value();
}
-void CodeStubAssembler::BranchIfCanDerefIndirectString(
- TNode<String> string, TNode<Int32T> instance_type, Label* can_deref,
- Label* cannot_deref) {
- TNode<Int32T> representation =
- Word32And(instance_type, Int32Constant(kStringRepresentationMask));
- GotoIf(Word32Equal(representation, Int32Constant(kThinStringTag)), can_deref);
- GotoIf(Word32NotEqual(representation, Int32Constant(kConsStringTag)),
- cannot_deref);
- // Cons string.
- TNode<String> rhs =
- LoadObjectField<String>(string, ConsString::kSecondOffset);
- GotoIf(IsEmptyString(rhs), can_deref);
- Goto(cannot_deref);
-}
-
-TNode<String> CodeStubAssembler::DerefIndirectString(
- TNode<String> string, TNode<Int32T> instance_type, Label* cannot_deref) {
- Label deref(this);
- BranchIfCanDerefIndirectString(string, instance_type, &deref, cannot_deref);
- BIND(&deref);
- STATIC_ASSERT(static_cast<int>(ThinString::kActualOffset) ==
- static_cast<int>(ConsString::kFirstOffset));
- return LoadObjectField<String>(string, ThinString::kActualOffset);
-}
-
-void CodeStubAssembler::DerefIndirectString(TVariable<String>* var_string,
- TNode<Int32T> instance_type) {
-#ifdef DEBUG
- Label can_deref(this), cannot_deref(this);
- BranchIfCanDerefIndirectString(var_string->value(), instance_type, &can_deref,
- &cannot_deref);
- BIND(&cannot_deref);
- DebugBreak(); // Should be able to dereference string.
- Goto(&can_deref);
- BIND(&can_deref);
-#endif // DEBUG
-
- STATIC_ASSERT(static_cast<int>(ThinString::kActualOffset) ==
- static_cast<int>(ConsString::kFirstOffset));
- *var_string =
- LoadObjectField<String>(var_string->value(), ThinString::kActualOffset);
-}
-
-void CodeStubAssembler::MaybeDerefIndirectString(TVariable<String>* var_string,
- TNode<Int32T> instance_type,
- Label* did_deref,
- Label* cannot_deref) {
- Label deref(this);
- BranchIfCanDerefIndirectString(var_string->value(), instance_type, &deref,
- cannot_deref);
-
- BIND(&deref);
- {
- DerefIndirectString(var_string, instance_type);
- Goto(did_deref);
- }
-}
-
-void CodeStubAssembler::MaybeDerefIndirectStrings(
- TVariable<String>* var_left, TNode<Int32T> left_instance_type,
- TVariable<String>* var_right, TNode<Int32T> right_instance_type,
- Label* did_something) {
- Label did_nothing_left(this), did_something_left(this),
- didnt_do_anything(this);
- MaybeDerefIndirectString(var_left, left_instance_type, &did_something_left,
- &did_nothing_left);
-
- BIND(&did_something_left);
- {
- MaybeDerefIndirectString(var_right, right_instance_type, did_something,
- did_something);
- }
-
- BIND(&did_nothing_left);
- {
- MaybeDerefIndirectString(var_right, right_instance_type, did_something,
- &didnt_do_anything);
- }
-
- BIND(&didnt_do_anything);
- // Fall through if neither string was an indirect string.
-}
-
-TNode<String> CodeStubAssembler::StringAdd(Node* context, TNode<String> left,
- TNode<String> right) {
- TVARIABLE(String, result);
- Label check_right(this), runtime(this, Label::kDeferred), cons(this),
- done(this, &result), done_native(this, &result);
- Counters* counters = isolate()->counters();
-
- TNode<Uint32T> left_length = LoadStringLengthAsWord32(left);
- GotoIfNot(Word32Equal(left_length, Uint32Constant(0)), &check_right);
- result = right;
- Goto(&done_native);
-
- BIND(&check_right);
- TNode<Uint32T> right_length = LoadStringLengthAsWord32(right);
- GotoIfNot(Word32Equal(right_length, Uint32Constant(0)), &cons);
- result = left;
- Goto(&done_native);
-
- BIND(&cons);
- {
- TNode<Uint32T> new_length = Uint32Add(left_length, right_length);
-
- // If new length is greater than String::kMaxLength, goto runtime to
- // throw. Note: we also need to invalidate the string length protector, so
- // can't just throw here directly.
- GotoIf(Uint32GreaterThan(new_length, Uint32Constant(String::kMaxLength)),
- &runtime);
-
- TVARIABLE(String, var_left, left);
- TVARIABLE(String, var_right, right);
- Variable* input_vars[2] = {&var_left, &var_right};
- Label non_cons(this, 2, input_vars);
- Label slow(this, Label::kDeferred);
- GotoIf(Uint32LessThan(new_length, Uint32Constant(ConsString::kMinLength)),
- &non_cons);
-
- result =
- AllocateConsString(new_length, var_left.value(), var_right.value());
- Goto(&done_native);
-
- BIND(&non_cons);
-
- Comment("Full string concatenate");
- TNode<Int32T> left_instance_type = LoadInstanceType(var_left.value());
- TNode<Int32T> right_instance_type = LoadInstanceType(var_right.value());
- // Compute intersection and difference of instance types.
-
- TNode<Int32T> ored_instance_types =
- Word32Or(left_instance_type, right_instance_type);
- TNode<Word32T> xored_instance_types =
- Word32Xor(left_instance_type, right_instance_type);
-
- // Check if both strings have the same encoding and both are sequential.
- GotoIf(IsSetWord32(xored_instance_types, kStringEncodingMask), &runtime);
- GotoIf(IsSetWord32(ored_instance_types, kStringRepresentationMask), &slow);
-
- TNode<IntPtrT> word_left_length = Signed(ChangeUint32ToWord(left_length));
- TNode<IntPtrT> word_right_length = Signed(ChangeUint32ToWord(right_length));
-
- Label two_byte(this);
- GotoIf(Word32Equal(Word32And(ored_instance_types,
- Int32Constant(kStringEncodingMask)),
- Int32Constant(kTwoByteStringTag)),
- &two_byte);
- // One-byte sequential string case
- result = AllocateSeqOneByteString(new_length);
- CopyStringCharacters(var_left.value(), result.value(), IntPtrConstant(0),
- IntPtrConstant(0), word_left_length,
- String::ONE_BYTE_ENCODING, String::ONE_BYTE_ENCODING);
- CopyStringCharacters(var_right.value(), result.value(), IntPtrConstant(0),
- word_left_length, word_right_length,
- String::ONE_BYTE_ENCODING, String::ONE_BYTE_ENCODING);
- Goto(&done_native);
-
- BIND(&two_byte);
- {
- // Two-byte sequential string case
- result = AllocateSeqTwoByteString(new_length);
- CopyStringCharacters(var_left.value(), result.value(), IntPtrConstant(0),
- IntPtrConstant(0), word_left_length,
- String::TWO_BYTE_ENCODING,
- String::TWO_BYTE_ENCODING);
- CopyStringCharacters(var_right.value(), result.value(), IntPtrConstant(0),
- word_left_length, word_right_length,
- String::TWO_BYTE_ENCODING,
- String::TWO_BYTE_ENCODING);
- Goto(&done_native);
- }
-
- BIND(&slow);
- {
- // Try to unwrap indirect strings, restart the above attempt on success.
- MaybeDerefIndirectStrings(&var_left, left_instance_type, &var_right,
- right_instance_type, &non_cons);
- Goto(&runtime);
- }
- }
- BIND(&runtime);
- {
- result = CAST(CallRuntime(Runtime::kStringAdd, context, left, right));
- Goto(&done);
- }
-
- BIND(&done_native);
- {
- IncrementCounter(counters->string_add_native(), 1);
- Goto(&done);
- }
-
- BIND(&done);
- return result.value();
-}
-
-TNode<String> CodeStubAssembler::StringFromSingleUTF16EncodedCodePoint(
- TNode<Int32T> codepoint) {
- VARIABLE(var_result, MachineRepresentation::kTagged, EmptyStringConstant());
-
- Label if_isword16(this), if_isword32(this), return_result(this);
-
- Branch(Uint32LessThan(codepoint, Int32Constant(0x10000)), &if_isword16,
- &if_isword32);
-
- BIND(&if_isword16);
- {
- var_result.Bind(StringFromSingleCharCode(codepoint));
- Goto(&return_result);
- }
-
- BIND(&if_isword32);
- {
- TNode<String> value = AllocateSeqTwoByteString(2);
- StoreNoWriteBarrier(
- MachineRepresentation::kWord32, value,
- IntPtrConstant(SeqTwoByteString::kHeaderSize - kHeapObjectTag),
- codepoint);
- var_result.Bind(value);
- Goto(&return_result);
- }
-
- BIND(&return_result);
- return CAST(var_result.value());
-}
-
TNode<Number> CodeStubAssembler::StringToNumber(TNode<String> input) {
Label runtime(this, Label::kDeferred);
Label end(this);
@@ -7585,22 +7051,22 @@ TNode<Number> CodeStubAssembler::StringToNumber(TNode<String> input) {
return var_result.value();
}
-TNode<String> CodeStubAssembler::NumberToString(TNode<Number> input) {
+TNode<String> CodeStubAssembler::NumberToString(TNode<Number> input,
+ Label* bailout) {
TVARIABLE(String, result);
TVARIABLE(Smi, smi_input);
- Label runtime(this, Label::kDeferred), if_smi(this), if_heap_number(this),
- done(this, &result);
+ Label if_smi(this), if_heap_number(this), done(this, &result);
// Load the number string cache.
TNode<FixedArray> number_string_cache = NumberStringCacheConstant();
// Make the hash mask from the length of the number string cache. It
// contains two elements (number and string) for each cache entry.
- // TODO(ishell): cleanup mask handling.
- TNode<IntPtrT> mask =
- BitcastTaggedSignedToWord(LoadFixedArrayBaseLength(number_string_cache));
- TNode<IntPtrT> one = IntPtrConstant(1);
- mask = IntPtrSub(mask, one);
+ TNode<IntPtrT> number_string_cache_length =
+ LoadAndUntagFixedArrayBaseLength(number_string_cache);
+ TNode<Int32T> one = Int32Constant(1);
+ TNode<Word32T> mask = Int32Sub(
+ Word32Shr(TruncateWordToInt32(number_string_cache_length), one), one);
GotoIfNot(TaggedIsSmi(input), &if_heap_number);
smi_input = CAST(input);
@@ -7611,36 +7077,35 @@ TNode<String> CodeStubAssembler::NumberToString(TNode<Number> input) {
Comment("NumberToString - HeapNumber");
TNode<HeapNumber> heap_number_input = CAST(input);
// Try normalizing the HeapNumber.
- TryHeapNumberToSmi(heap_number_input, smi_input, &if_smi);
+ TryHeapNumberToSmi(heap_number_input, &smi_input, &if_smi);
// Make a hash from the two 32-bit values of the double.
TNode<Int32T> low =
LoadObjectField<Int32T>(heap_number_input, HeapNumber::kValueOffset);
TNode<Int32T> high = LoadObjectField<Int32T>(
heap_number_input, HeapNumber::kValueOffset + kIntSize);
- TNode<Word32T> hash = Word32Xor(low, high);
- TNode<IntPtrT> word_hash = WordShl(ChangeInt32ToIntPtr(hash), one);
- TNode<WordT> index =
- WordAnd(word_hash, WordSar(mask, SmiShiftBitsConstant()));
+ TNode<Word32T> hash = Word32And(Word32Xor(low, high), mask);
+ TNode<IntPtrT> entry_index =
+ Signed(ChangeUint32ToWord(Int32Add(hash, hash)));
// Cache entry's key must be a heap number
TNode<Object> number_key =
- UnsafeLoadFixedArrayElement(number_string_cache, index);
- GotoIf(TaggedIsSmi(number_key), &runtime);
+ UnsafeLoadFixedArrayElement(number_string_cache, entry_index);
+ GotoIf(TaggedIsSmi(number_key), bailout);
TNode<HeapObject> number_key_heap_object = CAST(number_key);
- GotoIfNot(IsHeapNumber(number_key_heap_object), &runtime);
+ GotoIfNot(IsHeapNumber(number_key_heap_object), bailout);
// Cache entry's key must match the heap number value we're looking for.
TNode<Int32T> low_compare = LoadObjectField<Int32T>(
number_key_heap_object, HeapNumber::kValueOffset);
TNode<Int32T> high_compare = LoadObjectField<Int32T>(
number_key_heap_object, HeapNumber::kValueOffset + kIntSize);
- GotoIfNot(Word32Equal(low, low_compare), &runtime);
- GotoIfNot(Word32Equal(high, high_compare), &runtime);
+ GotoIfNot(Word32Equal(low, low_compare), bailout);
+ GotoIfNot(Word32Equal(high, high_compare), bailout);
// Heap number match, return value from cache entry.
- result = CAST(
- UnsafeLoadFixedArrayElement(number_string_cache, index, kTaggedSize));
+ result = CAST(UnsafeLoadFixedArrayElement(number_string_cache, entry_index,
+ kTaggedSize));
Goto(&done);
}
@@ -7648,17 +7113,28 @@ TNode<String> CodeStubAssembler::NumberToString(TNode<Number> input) {
{
Comment("NumberToString - Smi");
// Load the smi key, make sure it matches the smi we're looking for.
- TNode<Object> smi_index = BitcastWordToTagged(WordAnd(
- WordShl(BitcastTaggedSignedToWord(smi_input.value()), one), mask));
+ TNode<Word32T> hash = Word32And(SmiToInt32(smi_input.value()), mask);
+ TNode<IntPtrT> entry_index =
+ Signed(ChangeUint32ToWord(Int32Add(hash, hash)));
TNode<Object> smi_key = UnsafeLoadFixedArrayElement(
- number_string_cache, smi_index, 0, SMI_PARAMETERS);
- GotoIf(TaggedNotEqual(smi_key, smi_input.value()), &runtime);
+ number_string_cache, entry_index, 0, INTPTR_PARAMETERS);
+ GotoIf(TaggedNotEqual(smi_key, smi_input.value()), bailout);
// Smi match, return value from cache entry.
- result = CAST(UnsafeLoadFixedArrayElement(number_string_cache, smi_index,
- kTaggedSize, SMI_PARAMETERS));
+ result = CAST(UnsafeLoadFixedArrayElement(number_string_cache, entry_index,
+ kTaggedSize, INTPTR_PARAMETERS));
Goto(&done);
}
+ BIND(&done);
+ return result.value();
+}
+
+TNode<String> CodeStubAssembler::NumberToString(TNode<Number> input) {
+ TVARIABLE(String, result);
+ Label runtime(this, Label::kDeferred), done(this, &result);
+
+ result = NumberToString(input, &runtime);
+ Goto(&done);
BIND(&runtime);
{
@@ -8290,102 +7766,129 @@ void CodeStubAssembler::DecrementCounter(StatsCounter* counter, int delta) {
}
}
-void CodeStubAssembler::Increment(Variable* variable, int value,
- ParameterMode mode) {
- DCHECK_IMPLIES(mode == INTPTR_PARAMETERS,
- variable->rep() == MachineType::PointerRepresentation());
- DCHECK_IMPLIES(mode == SMI_PARAMETERS, CanBeTaggedSigned(variable->rep()));
- variable->Bind(IntPtrOrSmiAdd(variable->value(),
- IntPtrOrSmiConstant(value, mode), mode));
+template <typename TIndex>
+void CodeStubAssembler::Increment(TVariable<TIndex>* variable, int value) {
+ *variable =
+ IntPtrOrSmiAdd(variable->value(), IntPtrOrSmiConstant<TIndex>(value));
}
+// Instantiate Increment for Smi and IntPtrT.
+// TODO(v8:9708): Consider renaming to [Smi|IntPtrT|RawPtrT]Increment.
+template void CodeStubAssembler::Increment<Smi>(TVariable<Smi>* variable,
+ int value);
+template void CodeStubAssembler::Increment<IntPtrT>(
+ TVariable<IntPtrT>* variable, int value);
+template void CodeStubAssembler::Increment<RawPtrT>(
+ TVariable<RawPtrT>* variable, int value);
+
void CodeStubAssembler::Use(Label* label) {
GotoIf(Word32Equal(Int32Constant(0), Int32Constant(1)), label);
}
-void CodeStubAssembler::TryToName(Node* key, Label* if_keyisindex,
- Variable* var_index, Label* if_keyisunique,
- Variable* var_unique, Label* if_bailout,
+void CodeStubAssembler::TryToName(SloppyTNode<Object> key, Label* if_keyisindex,
+ TVariable<IntPtrT>* var_index,
+ Label* if_keyisunique,
+ TVariable<Name>* var_unique,
+ Label* if_bailout,
Label* if_notinternalized) {
- DCHECK_EQ(MachineType::PointerRepresentation(), var_index->rep());
- DCHECK_EQ(MachineRepresentation::kTagged, var_unique->rep());
Comment("TryToName");
- Label if_hascachedindex(this), if_keyisnotindex(this), if_thinstring(this),
- if_keyisother(this, Label::kDeferred);
+ Label if_keyisnotindex(this);
// Handle Smi and HeapNumber keys.
- var_index->Bind(TryToIntptr(key, &if_keyisnotindex));
+ *var_index = TryToIntptr(key, &if_keyisnotindex);
Goto(if_keyisindex);
BIND(&if_keyisnotindex);
- TNode<Map> key_map = LoadMap(key);
- var_unique->Bind(key);
- // Symbols are unique.
- GotoIf(IsSymbolMap(key_map), if_keyisunique);
- TNode<Uint16T> key_instance_type = LoadMapInstanceType(key_map);
- // Miss if |key| is not a String.
- STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
- GotoIfNot(IsStringInstanceType(key_instance_type), &if_keyisother);
-
- // |key| is a String. Check if it has a cached array index.
- TNode<Uint32T> hash = LoadNameHashField(key);
- GotoIf(IsClearWord32(hash, Name::kDoesNotContainCachedArrayIndexMask),
- &if_hascachedindex);
- // No cached array index. If the string knows that it contains an index,
- // then it must be an uncacheable index. Handle this case in the runtime.
- GotoIf(IsClearWord32(hash, Name::kIsNotArrayIndexMask), if_bailout);
- // Check if we have a ThinString.
- GotoIf(InstanceTypeEqual(key_instance_type, THIN_STRING_TYPE),
- &if_thinstring);
- GotoIf(InstanceTypeEqual(key_instance_type, THIN_ONE_BYTE_STRING_TYPE),
- &if_thinstring);
- // Finally, check if |key| is internalized.
- STATIC_ASSERT(kNotInternalizedTag != 0);
- GotoIf(IsSetWord32(key_instance_type, kIsNotInternalizedMask),
- if_notinternalized != nullptr ? if_notinternalized : if_bailout);
- Goto(if_keyisunique);
+ {
+ Label if_symbol(this), if_string(this),
+ if_keyisother(this, Label::kDeferred);
+ TNode<HeapObject> key_heap_object = CAST(key);
+ TNode<Map> key_map = LoadMap(key_heap_object);
- BIND(&if_thinstring);
- var_unique->Bind(
- LoadObjectField<String>(CAST(key), ThinString::kActualOffset));
- Goto(if_keyisunique);
+ GotoIf(IsSymbolMap(key_map), &if_symbol);
- BIND(&if_hascachedindex);
- var_index->Bind(DecodeWordFromWord32<Name::ArrayIndexValueBits>(hash));
- Goto(if_keyisindex);
+ // Miss if |key| is not a String.
+ STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
+ TNode<Uint16T> key_instance_type = LoadMapInstanceType(key_map);
+ Branch(IsStringInstanceType(key_instance_type), &if_string, &if_keyisother);
+
+ // Symbols are unique.
+ BIND(&if_symbol);
+ {
+ *var_unique = CAST(key);
+ Goto(if_keyisunique);
+ }
- BIND(&if_keyisother);
- GotoIfNot(InstanceTypeEqual(key_instance_type, ODDBALL_TYPE), if_bailout);
- var_unique->Bind(LoadObjectField(key, Oddball::kToStringOffset));
- Goto(if_keyisunique);
+ BIND(&if_string);
+ {
+ Label if_hascachedindex(this), if_thinstring(this);
+
+ // |key| is a String. Check if it has a cached array index.
+ TNode<String> key_string = CAST(key);
+ TNode<Uint32T> hash = LoadNameHashField(key_string);
+ GotoIf(IsClearWord32(hash, Name::kDoesNotContainCachedArrayIndexMask),
+ &if_hascachedindex);
+ // No cached array index. If the string knows that it contains an index,
+ // then it must be an uncacheable index. Handle this case in the runtime.
+ GotoIf(IsClearWord32(hash, Name::kIsNotArrayIndexMask), if_bailout);
+ // Check if we have a ThinString.
+ GotoIf(InstanceTypeEqual(key_instance_type, THIN_STRING_TYPE),
+ &if_thinstring);
+ GotoIf(InstanceTypeEqual(key_instance_type, THIN_ONE_BYTE_STRING_TYPE),
+ &if_thinstring);
+ // Finally, check if |key| is internalized.
+ STATIC_ASSERT(kNotInternalizedTag != 0);
+ GotoIf(IsSetWord32(key_instance_type, kIsNotInternalizedMask),
+ if_notinternalized != nullptr ? if_notinternalized : if_bailout);
+
+ *var_unique = key_string;
+ Goto(if_keyisunique);
+
+ BIND(&if_thinstring);
+ *var_unique =
+ LoadObjectField<String>(key_string, ThinString::kActualOffset);
+ Goto(if_keyisunique);
+
+ BIND(&if_hascachedindex);
+ *var_index =
+ Signed(DecodeWordFromWord32<Name::ArrayIndexValueBits>(hash));
+ Goto(if_keyisindex);
+ }
+
+ BIND(&if_keyisother);
+ {
+ GotoIfNot(InstanceTypeEqual(key_instance_type, ODDBALL_TYPE), if_bailout);
+ *var_unique =
+ LoadObjectField<String>(key_heap_object, Oddball::kToStringOffset);
+ Goto(if_keyisunique);
+ }
+ }
}
void CodeStubAssembler::TryInternalizeString(
- Node* string, Label* if_index, Variable* var_index, Label* if_internalized,
- Variable* var_internalized, Label* if_not_internalized, Label* if_bailout) {
- DCHECK(var_index->rep() == MachineType::PointerRepresentation());
- DCHECK_EQ(var_internalized->rep(), MachineRepresentation::kTagged);
- CSA_SLOW_ASSERT(this, IsString(string));
+ SloppyTNode<String> string, Label* if_index, TVariable<IntPtrT>* var_index,
+ Label* if_internalized, TVariable<Name>* var_internalized,
+ Label* if_not_internalized, Label* if_bailout) {
TNode<ExternalReference> function =
ExternalConstant(ExternalReference::try_internalize_string_function());
TNode<ExternalReference> const isolate_ptr =
ExternalConstant(ExternalReference::isolate_address(isolate()));
- Node* result =
- CallCFunction(function, MachineType::AnyTagged(),
- std::make_pair(MachineType::Pointer(), isolate_ptr),
- std::make_pair(MachineType::AnyTagged(), string));
+ TNode<Object> result =
+ CAST(CallCFunction(function, MachineType::AnyTagged(),
+ std::make_pair(MachineType::Pointer(), isolate_ptr),
+ std::make_pair(MachineType::AnyTagged(), string)));
Label internalized(this);
GotoIf(TaggedIsNotSmi(result), &internalized);
- TNode<IntPtrT> word_result = SmiUntag(result);
+ TNode<IntPtrT> word_result = SmiUntag(CAST(result));
GotoIf(IntPtrEqual(word_result, IntPtrConstant(ResultSentinel::kNotFound)),
if_not_internalized);
GotoIf(IntPtrEqual(word_result, IntPtrConstant(ResultSentinel::kUnsupported)),
if_bailout);
- var_index->Bind(word_result);
+ *var_index = word_result;
Goto(if_index);
BIND(&internalized);
- var_internalized->Bind(result);
+ *var_internalized = CAST(result);
Goto(if_internalized);
}
@@ -8712,31 +8215,6 @@ TNode<Object> CodeStubAssembler::BasicLoadNumberDictionaryElement(
return LoadValueByKeyIndex<NumberDictionary>(dictionary, index);
}
-void CodeStubAssembler::BasicStoreNumberDictionaryElement(
- TNode<NumberDictionary> dictionary, TNode<IntPtrT> intptr_index,
- TNode<Object> value, Label* not_data, Label* if_hole, Label* read_only) {
- TVARIABLE(IntPtrT, var_entry);
- Label if_found(this);
- NumberDictionaryLookup(dictionary, intptr_index, &if_found, &var_entry,
- if_hole);
- BIND(&if_found);
-
- // Check that the value is a data property.
- TNode<IntPtrT> index = EntryToIndex<NumberDictionary>(var_entry.value());
- TNode<Uint32T> details =
- LoadDetailsByKeyIndex<NumberDictionary>(dictionary, index);
- TNode<Uint32T> kind = DecodeWord32<PropertyDetails::KindField>(details);
- // TODO(jkummerow): Support accessors without missing?
- GotoIfNot(Word32Equal(kind, Int32Constant(kData)), not_data);
-
- // Check that the property is writeable.
- GotoIf(IsSetWord32(details, PropertyDetails::kAttributesReadOnlyMask),
- read_only);
-
- // Finally, store the value.
- StoreValueByKeyIndex<NumberDictionary>(dictionary, index, value);
-}
-
template <class Dictionary>
void CodeStubAssembler::FindInsertionEntry(TNode<Dictionary> dictionary,
TNode<Name> key,
@@ -8858,16 +8336,16 @@ void CodeStubAssembler::LookupLinear(TNode<Name> unique_name,
first_inclusive,
IntPtrMul(ChangeInt32ToIntPtr(number_of_valid_entries), factor));
- BuildFastLoop(
+ BuildFastLoop<IntPtrT>(
last_exclusive, first_inclusive,
- [=](SloppyTNode<IntPtrT> name_index) {
+ [=](TNode<IntPtrT> name_index) {
TNode<MaybeObject> element =
LoadArrayElement(array, Array::kHeaderSize, name_index);
TNode<Name> candidate_name = CAST(element);
*var_name_index = name_index;
GotoIf(TaggedEqual(candidate_name, unique_name), if_found);
},
- -Array::kEntrySize, INTPTR_PARAMETERS, IndexAdvanceMode::kPre);
+ -Array::kEntrySize, IndexAdvanceMode::kPre);
Goto(if_not_found);
}
@@ -9029,7 +8507,7 @@ void CodeStubAssembler::ForEachEnumerableOwnProperty(
TNode<Uint16T> type = LoadMapInstanceType(map);
TNode<Uint32T> bit_field3 = EnsureOnlyHasSimpleProperties(map, type, bailout);
- TNode<DescriptorArray> descriptors = LoadMapDescriptors(map);
+ TVARIABLE(DescriptorArray, var_descriptors, LoadMapDescriptors(map));
TNode<Uint32T> nof_descriptors =
DecodeWord32<Map::NumberOfOwnDescriptorsBits>(bit_field3);
@@ -9044,25 +8522,23 @@ void CodeStubAssembler::ForEachEnumerableOwnProperty(
// Note: var_end_key_index is exclusive for the loop
TVARIABLE(IntPtrT, var_end_key_index,
ToKeyIndex<DescriptorArray>(nof_descriptors));
- VariableList list(
- {&var_stable, &var_has_symbol, &var_is_symbol_processing_loop,
- &var_start_key_index, &var_end_key_index},
- zone());
+ VariableList list({&var_descriptors, &var_stable, &var_has_symbol,
+ &var_is_symbol_processing_loop, &var_start_key_index,
+ &var_end_key_index},
+ zone());
Label descriptor_array_loop(
- this, {&var_stable, &var_has_symbol, &var_is_symbol_processing_loop,
- &var_start_key_index, &var_end_key_index});
+ this, {&var_descriptors, &var_stable, &var_has_symbol,
+ &var_is_symbol_processing_loop, &var_start_key_index,
+ &var_end_key_index});
Goto(&descriptor_array_loop);
BIND(&descriptor_array_loop);
- BuildFastLoop(
+ BuildFastLoop<IntPtrT>(
list, var_start_key_index.value(), var_end_key_index.value(),
- [=, &var_stable, &var_has_symbol, &var_is_symbol_processing_loop,
- &var_start_key_index, &var_end_key_index](Node* index) {
- TNode<IntPtrT> descriptor_key_index =
- TNode<IntPtrT>::UncheckedCast(index);
+ [&](TNode<IntPtrT> descriptor_key_index) {
TNode<Name> next_key =
- LoadKeyByKeyIndex(descriptors, descriptor_key_index);
+ LoadKeyByKeyIndex(var_descriptors.value(), descriptor_key_index);
TVARIABLE(Object, var_value, SmiConstant(0));
Label callback(this), next_iteration(this);
@@ -9117,7 +8593,7 @@ void CodeStubAssembler::ForEachEnumerableOwnProperty(
// Directly decode from the descriptor array if |object| did not
// change shape.
var_map = map;
- var_meta_storage = descriptors;
+ var_meta_storage = var_descriptors.value();
var_entry = Signed(descriptor_key_index);
Goto(&if_found_fast);
}
@@ -9183,19 +8659,21 @@ void CodeStubAssembler::ForEachEnumerableOwnProperty(
BIND(&callback);
body(next_key, var_value.value());
- // Check if |object| is still stable, i.e. we can proceed using
- // property details from preloaded |descriptors|.
- var_stable = Select<BoolT>(
- var_stable.value(),
- [=] { return TaggedEqual(LoadMap(object), map); },
- [=] { return Int32FalseConstant(); });
+ // Check if |object| is still stable, i.e. the descriptors in the
+ // preloaded |descriptors| are still the same modulo in-place
+ // representation changes.
+ GotoIfNot(var_stable.value(), &next_iteration);
+ var_stable = TaggedEqual(LoadMap(object), map);
+ // Reload the descriptors just in case the actual array changed, and
+ // any of the field representations changed in-place.
+ var_descriptors = LoadMapDescriptors(map);
Goto(&next_iteration);
}
}
BIND(&next_iteration);
},
- DescriptorArray::kEntrySize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+ DescriptorArray::kEntrySize, IndexAdvanceMode::kPost);
if (mode == kEnumerationOrder) {
Label done(this);
@@ -9205,14 +8683,73 @@ void CodeStubAssembler::ForEachEnumerableOwnProperty(
var_is_symbol_processing_loop = Int32TrueConstant();
// Add DescriptorArray::kEntrySize to make the var_end_key_index exclusive
// as BuildFastLoop() expects.
- Increment(&var_end_key_index, DescriptorArray::kEntrySize,
- INTPTR_PARAMETERS);
+ Increment(&var_end_key_index, DescriptorArray::kEntrySize);
Goto(&descriptor_array_loop);
BIND(&done);
}
}
+TNode<Object> CodeStubAssembler::GetConstructor(TNode<Map> map) {
+ TVARIABLE(HeapObject, var_maybe_constructor);
+ var_maybe_constructor = map;
+ Label loop(this, &var_maybe_constructor), done(this);
+ GotoIfNot(IsMap(var_maybe_constructor.value()), &done);
+ Goto(&loop);
+
+ BIND(&loop);
+ {
+ var_maybe_constructor = CAST(LoadObjectField(
+ var_maybe_constructor.value(), Map::kConstructorOrBackPointerOffset));
+ GotoIf(IsMap(var_maybe_constructor.value()), &loop);
+ Goto(&done);
+ }
+
+ BIND(&done);
+ return var_maybe_constructor.value();
+}
+
+TNode<NativeContext> CodeStubAssembler::GetCreationContext(
+ TNode<JSReceiver> receiver, Label* if_bailout) {
+ TNode<Map> receiver_map = LoadMap(receiver);
+ TNode<Object> constructor = GetConstructor(receiver_map);
+
+ TVARIABLE(JSFunction, var_function);
+
+ Label done(this), if_jsfunction(this), if_jsgenerator(this);
+ GotoIf(TaggedIsSmi(constructor), if_bailout);
+
+ TNode<Map> function_map = LoadMap(CAST(constructor));
+ GotoIf(IsJSFunctionMap(function_map), &if_jsfunction);
+ GotoIf(IsJSGeneratorMap(function_map), &if_jsgenerator);
+ // Remote objects don't have a creation context.
+ GotoIf(IsFunctionTemplateInfoMap(function_map), if_bailout);
+
+ CSA_ASSERT(this, IsJSFunctionMap(receiver_map));
+ var_function = CAST(receiver);
+ Goto(&done);
+
+ BIND(&if_jsfunction);
+ {
+ var_function = CAST(constructor);
+ Goto(&done);
+ }
+
+ BIND(&if_jsgenerator);
+ {
+ var_function = LoadJSGeneratorObjectFunction(CAST(receiver));
+ Goto(&done);
+ }
+
+ BIND(&done);
+ TNode<Context> context = LoadJSFunctionContext(var_function.value());
+
+ GotoIfNot(IsContext(context), if_bailout);
+
+ TNode<NativeContext> native_context = LoadNativeContext(context);
+ return native_context;
+}
+
void CodeStubAssembler::DescriptorLookup(
SloppyTNode<Name> unique_name, SloppyTNode<DescriptorArray> descriptors,
SloppyTNode<Uint32T> bitfield3, Label* if_found,
@@ -9302,7 +8839,7 @@ void CodeStubAssembler::TryLookupPropertyInSimpleObject(
}
void CodeStubAssembler::TryLookupProperty(
- SloppyTNode<JSObject> object, SloppyTNode<Map> map,
+ SloppyTNode<JSReceiver> object, SloppyTNode<Map> map,
SloppyTNode<Int32T> instance_type, SloppyTNode<Name> unique_name,
Label* if_found_fast, Label* if_found_dict, Label* if_found_global,
TVariable<HeapObject>* var_meta_storage, TVariable<IntPtrT>* var_name_index,
@@ -9310,7 +8847,7 @@ void CodeStubAssembler::TryLookupProperty(
Label if_objectisspecial(this);
GotoIf(IsSpecialReceiverInstanceType(instance_type), &if_objectisspecial);
- TryLookupPropertyInSimpleObject(object, map, unique_name, if_found_fast,
+ TryLookupPropertyInSimpleObject(CAST(object), map, unique_name, if_found_fast,
if_found_dict, var_meta_storage,
var_name_index, if_not_found);
@@ -9547,25 +9084,44 @@ TNode<Object> CodeStubAssembler::CallGetterIfAccessor(
// AccessorPair case.
{
if (mode == kCallJSGetter) {
+ Label if_callable(this), if_function_template_info(this);
Node* accessor_pair = value;
TNode<HeapObject> getter =
CAST(LoadObjectField(accessor_pair, AccessorPair::kGetterOffset));
TNode<Map> getter_map = LoadMap(getter);
- TNode<Uint16T> instance_type = LoadMapInstanceType(getter_map);
- // FunctionTemplateInfo getters are not supported yet.
- GotoIf(InstanceTypeEqual(instance_type, FUNCTION_TEMPLATE_INFO_TYPE),
- if_bailout);
+
+ GotoIf(IsCallableMap(getter_map), &if_callable);
+ GotoIf(IsFunctionTemplateInfoMap(getter_map), &if_function_template_info);
// Return undefined if the {getter} is not callable.
var_value.Bind(UndefinedConstant());
- GotoIfNot(IsCallableMap(getter_map), &done);
+ Goto(&done);
+
+ BIND(&if_callable);
+ {
+ // Call the accessor.
+ Callable callable = CodeFactory::Call(isolate());
+ Node* result = CallJS(callable, context, getter, receiver);
+ var_value.Bind(result);
+ Goto(&done);
+ }
- // Call the accessor.
- Callable callable = CodeFactory::Call(isolate());
- Node* result = CallJS(callable, context, getter, receiver);
- var_value.Bind(result);
+ BIND(&if_function_template_info);
+ {
+ TNode<HeapObject> cached_property_name = LoadObjectField<HeapObject>(
+ getter, FunctionTemplateInfo::kCachedPropertyNameOffset);
+ GotoIfNot(IsTheHole(cached_property_name), if_bailout);
+
+ TNode<NativeContext> creation_context =
+ GetCreationContext(CAST(receiver), if_bailout);
+ var_value.Bind(CallBuiltin(
+ Builtins::kCallFunctionTemplate_CheckAccessAndCompatibleReceiver,
+ creation_context, getter, IntPtrConstant(0), receiver));
+ Goto(&done);
+ }
+ } else {
+ Goto(&done);
}
- Goto(&done);
}
// AccessorInfo case.
@@ -9617,10 +9173,11 @@ TNode<Object> CodeStubAssembler::CallGetterIfAccessor(
GotoIfNot(IsLengthString(
LoadObjectField(accessor_info, AccessorInfo::kNameOffset)),
if_bailout);
- Node* receiver_value = LoadJSPrimitiveWrapperValue(receiver);
+ TNode<Object> receiver_value =
+ LoadJSPrimitiveWrapperValue(CAST(receiver));
GotoIfNot(TaggedIsNotSmi(receiver_value), if_bailout);
- GotoIfNot(IsString(receiver_value), if_bailout);
- var_value.Bind(LoadStringLengthAsSmi(receiver_value));
+ GotoIfNot(IsString(CAST(receiver_value)), if_bailout);
+ var_value.Bind(LoadStringLengthAsSmi(CAST(receiver_value)));
Goto(&done);
}
}
@@ -9808,18 +9365,14 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
}
BIND(&if_isfaststringwrapper);
{
- CSA_ASSERT(this, HasInstanceType(object, JS_PRIMITIVE_WRAPPER_TYPE));
- Node* string = LoadJSPrimitiveWrapperValue(object);
- CSA_ASSERT(this, IsString(string));
+ TNode<String> string = CAST(LoadJSPrimitiveWrapperValue(CAST(object)));
TNode<IntPtrT> length = LoadStringLengthAsWord(string);
GotoIf(UintPtrLessThan(intptr_index, length), if_found);
Goto(&if_isobjectorsmi);
}
BIND(&if_isslowstringwrapper);
{
- CSA_ASSERT(this, HasInstanceType(object, JS_PRIMITIVE_WRAPPER_TYPE));
- Node* string = LoadJSPrimitiveWrapperValue(object);
- CSA_ASSERT(this, IsString(string));
+ TNode<String> string = CAST(LoadJSPrimitiveWrapperValue(CAST(object)));
TNode<IntPtrT> length = LoadStringLengthAsWord(string);
GotoIf(UintPtrLessThan(intptr_index, length), if_found);
Goto(&if_isdictionary);
@@ -9892,8 +9445,8 @@ void CodeStubAssembler::TryPrototypeChainLookup(
GotoIf(InstanceTypeEqual(instance_type, JS_PROXY_TYPE), if_proxy);
}
- VARIABLE(var_index, MachineType::PointerRepresentation());
- VARIABLE(var_unique, MachineRepresentation::kTagged);
+ TVARIABLE(IntPtrT, var_index);
+ TVARIABLE(Name, var_unique);
Label if_keyisindex(this), if_iskeyunique(this);
TryToName(key, &if_keyisindex, &var_index, &if_iskeyunique, &var_unique,
@@ -9905,9 +9458,7 @@ void CodeStubAssembler::TryPrototypeChainLookup(
TVARIABLE(Map, var_holder_map, map);
TVARIABLE(Int32T, var_holder_instance_type, instance_type);
- VariableList merged_variables(
- {&var_holder, &var_holder_map, &var_holder_instance_type}, zone());
- Label loop(this, merged_variables);
+ Label loop(this, {&var_holder, &var_holder_map, &var_holder_instance_type});
Goto(&loop);
BIND(&loop);
{
@@ -9950,9 +9501,7 @@ void CodeStubAssembler::TryPrototypeChainLookup(
TVARIABLE(Map, var_holder_map, map);
TVARIABLE(Int32T, var_holder_instance_type, instance_type);
- VariableList merged_variables(
- {&var_holder, &var_holder_map, &var_holder_instance_type}, zone());
- Label loop(this, merged_variables);
+ Label loop(this, {&var_holder, &var_holder_map, &var_holder_instance_type});
Goto(&loop);
BIND(&loop);
{
@@ -9978,22 +9527,22 @@ void CodeStubAssembler::TryPrototypeChainLookup(
}
}
-Node* CodeStubAssembler::HasInPrototypeChain(Node* context, Node* object,
- SloppyTNode<Object> prototype) {
- CSA_ASSERT(this, TaggedIsNotSmi(object));
- VARIABLE(var_result, MachineRepresentation::kTagged);
+TNode<Oddball> CodeStubAssembler::HasInPrototypeChain(TNode<Context> context,
+ TNode<HeapObject> object,
+ TNode<Object> prototype) {
+ TVARIABLE(Oddball, var_result);
Label return_false(this), return_true(this),
return_runtime(this, Label::kDeferred), return_result(this);
// Loop through the prototype chain looking for the {prototype}.
- VARIABLE(var_object_map, MachineRepresentation::kTagged, LoadMap(object));
+ TVARIABLE(Map, var_object_map, LoadMap(object));
Label loop(this, &var_object_map);
Goto(&loop);
BIND(&loop);
{
// Check if we can determine the prototype directly from the {object_map}.
Label if_objectisdirect(this), if_objectisspecial(this, Label::kDeferred);
- Node* object_map = var_object_map.value();
+ TNode<Map> object_map = var_object_map.value();
TNode<Uint16T> object_instance_type = LoadMapInstanceType(object_map);
Branch(IsSpecialReceiverInstanceType(object_instance_type),
&if_objectisspecial, &if_objectisdirect);
@@ -10018,22 +9567,22 @@ Node* CodeStubAssembler::HasInPrototypeChain(Node* context, Node* object,
// Continue with the prototype.
CSA_ASSERT(this, TaggedIsNotSmi(object_prototype));
- var_object_map.Bind(LoadMap(object_prototype));
+ var_object_map = LoadMap(object_prototype);
Goto(&loop);
}
BIND(&return_true);
- var_result.Bind(TrueConstant());
+ var_result = TrueConstant();
Goto(&return_result);
BIND(&return_false);
- var_result.Bind(FalseConstant());
+ var_result = FalseConstant();
Goto(&return_result);
BIND(&return_runtime);
{
// Fallback to the runtime implementation.
- var_result.Bind(
+ var_result = CAST(
CallRuntime(Runtime::kHasInPrototypeChain, context, object, prototype));
}
Goto(&return_result);
@@ -10042,63 +9591,67 @@ Node* CodeStubAssembler::HasInPrototypeChain(Node* context, Node* object,
return var_result.value();
}
-Node* CodeStubAssembler::OrdinaryHasInstance(Node* context, Node* callable,
- Node* object) {
- VARIABLE(var_result, MachineRepresentation::kTagged);
+TNode<Oddball> CodeStubAssembler::OrdinaryHasInstance(
+ TNode<Context> context, TNode<Object> callable_maybe_smi,
+ TNode<Object> object_maybe_smi) {
+ TVARIABLE(Oddball, var_result);
Label return_runtime(this, Label::kDeferred), return_result(this);
GotoIfForceSlowPath(&return_runtime);
// Goto runtime if {object} is a Smi.
- GotoIf(TaggedIsSmi(object), &return_runtime);
+ GotoIf(TaggedIsSmi(object_maybe_smi), &return_runtime);
// Goto runtime if {callable} is a Smi.
- GotoIf(TaggedIsSmi(callable), &return_runtime);
-
- // Load map of {callable}.
- TNode<Map> callable_map = LoadMap(callable);
-
- // Goto runtime if {callable} is not a JSFunction.
- TNode<Uint16T> callable_instance_type = LoadMapInstanceType(callable_map);
- GotoIfNot(InstanceTypeEqual(callable_instance_type, JS_FUNCTION_TYPE),
- &return_runtime);
+ GotoIf(TaggedIsSmi(callable_maybe_smi), &return_runtime);
- GotoIfPrototypeRequiresRuntimeLookup(CAST(callable), callable_map,
- &return_runtime);
-
- // Get the "prototype" (or initial map) of the {callable}.
- TNode<HeapObject> callable_prototype = LoadObjectField<HeapObject>(
- CAST(callable), JSFunction::kPrototypeOrInitialMapOffset);
{
- Label no_initial_map(this), walk_prototype_chain(this);
- TVARIABLE(HeapObject, var_callable_prototype, callable_prototype);
+ // Load map of {callable}.
+ TNode<HeapObject> object = CAST(object_maybe_smi);
+ TNode<HeapObject> callable = CAST(callable_maybe_smi);
+ TNode<Map> callable_map = LoadMap(callable);
- // Resolve the "prototype" if the {callable} has an initial map.
- GotoIfNot(IsMap(callable_prototype), &no_initial_map);
- var_callable_prototype =
- LoadObjectField<HeapObject>(callable_prototype, Map::kPrototypeOffset);
- Goto(&walk_prototype_chain);
+ // Goto runtime if {callable} is not a JSFunction.
+ TNode<Uint16T> callable_instance_type = LoadMapInstanceType(callable_map);
+ GotoIfNot(InstanceTypeEqual(callable_instance_type, JS_FUNCTION_TYPE),
+ &return_runtime);
- BIND(&no_initial_map);
- // {callable_prototype} is the hole if the "prototype" property hasn't been
- // requested so far.
- Branch(TaggedEqual(callable_prototype, TheHoleConstant()), &return_runtime,
- &walk_prototype_chain);
+ GotoIfPrototypeRequiresRuntimeLookup(CAST(callable), callable_map,
+ &return_runtime);
- BIND(&walk_prototype_chain);
- callable_prototype = var_callable_prototype.value();
- }
+ // Get the "prototype" (or initial map) of the {callable}.
+ TNode<HeapObject> callable_prototype = LoadObjectField<HeapObject>(
+ callable, JSFunction::kPrototypeOrInitialMapOffset);
+ {
+ Label no_initial_map(this), walk_prototype_chain(this);
+ TVARIABLE(HeapObject, var_callable_prototype, callable_prototype);
+
+ // Resolve the "prototype" if the {callable} has an initial map.
+ GotoIfNot(IsMap(callable_prototype), &no_initial_map);
+ var_callable_prototype = LoadObjectField<HeapObject>(
+ callable_prototype, Map::kPrototypeOffset);
+ Goto(&walk_prototype_chain);
+
+ BIND(&no_initial_map);
+ // {callable_prototype} is the hole if the "prototype" property hasn't
+ // been requested so far.
+ Branch(TaggedEqual(callable_prototype, TheHoleConstant()),
+ &return_runtime, &walk_prototype_chain);
+
+ BIND(&walk_prototype_chain);
+ callable_prototype = var_callable_prototype.value();
+ }
- // Loop through the prototype chain looking for the {callable} prototype.
- CSA_ASSERT(this, IsJSReceiver(callable_prototype));
- var_result.Bind(HasInPrototypeChain(context, object, callable_prototype));
- Goto(&return_result);
+ // Loop through the prototype chain looking for the {callable} prototype.
+ var_result = HasInPrototypeChain(context, object, callable_prototype);
+ Goto(&return_result);
+ }
BIND(&return_runtime);
{
// Fallback to the runtime implementation.
- var_result.Bind(
- CallRuntime(Runtime::kOrdinaryHasInstance, context, callable, object));
+ var_result = CAST(CallRuntime(Runtime::kOrdinaryHasInstance, context,
+ callable_maybe_smi, object_maybe_smi));
}
Goto(&return_result);
@@ -10111,34 +9664,72 @@ TNode<IntPtrT> CodeStubAssembler::ElementOffsetFromIndex(Node* index_node,
ParameterMode mode,
int base_size) {
CSA_SLOW_ASSERT(this, MatchesParameterMode(index_node, mode));
+ if (mode == SMI_PARAMETERS) {
+ return ElementOffsetFromIndex(ReinterpretCast<Smi>(index_node), kind,
+ base_size);
+ } else {
+ DCHECK(mode == INTPTR_PARAMETERS);
+ return ElementOffsetFromIndex(ReinterpretCast<IntPtrT>(index_node), kind,
+ base_size);
+ }
+}
+
+template <typename TIndex>
+TNode<IntPtrT> CodeStubAssembler::ElementOffsetFromIndex(
+ TNode<TIndex> index_node, ElementsKind kind, int base_size) {
+ // TODO(v8:9708): Remove IntPtrT variant in favor of UintPtrT.
+ static_assert(std::is_same<TIndex, Smi>::value ||
+ std::is_same<TIndex, IntPtrT>::value ||
+ std::is_same<TIndex, UintPtrT>::value,
+ "Only Smi, UintPtrT or IntPtrT index nodes are allowed");
int element_size_shift = ElementsKindToShiftSize(kind);
int element_size = 1 << element_size_shift;
int const kSmiShiftBits = kSmiShiftSize + kSmiTagSize;
intptr_t index = 0;
+ TNode<IntPtrT> intptr_index_node;
bool constant_index = false;
- if (mode == SMI_PARAMETERS) {
+ if (std::is_same<TIndex, Smi>::value) {
+ TNode<Smi> smi_index_node = ReinterpretCast<Smi>(index_node);
element_size_shift -= kSmiShiftBits;
Smi smi_index;
- constant_index = ToSmiConstant(index_node, &smi_index);
- if (constant_index) index = smi_index.value();
- index_node = BitcastTaggedSignedToWord(index_node);
+ constant_index = ToSmiConstant(smi_index_node, &smi_index);
+ if (constant_index) {
+ index = smi_index.value();
+ } else {
+ if (COMPRESS_POINTERS_BOOL) {
+ smi_index_node = NormalizeSmiIndex(smi_index_node);
+ }
+ }
+ intptr_index_node = BitcastTaggedToWordForTagAndSmiBits(smi_index_node);
} else {
- DCHECK(mode == INTPTR_PARAMETERS);
- constant_index = ToIntPtrConstant(index_node, &index);
+ intptr_index_node = ReinterpretCast<IntPtrT>(index_node);
+ constant_index = ToIntPtrConstant(intptr_index_node, &index);
}
if (constant_index) {
return IntPtrConstant(base_size + element_size * index);
}
- TNode<WordT> shifted_index =
+ TNode<IntPtrT> shifted_index =
(element_size_shift == 0)
- ? UncheckedCast<WordT>(index_node)
+ ? intptr_index_node
: ((element_size_shift > 0)
- ? WordShl(index_node, IntPtrConstant(element_size_shift))
- : WordSar(index_node, IntPtrConstant(-element_size_shift)));
+ ? WordShl(intptr_index_node,
+ IntPtrConstant(element_size_shift))
+ : WordSar(intptr_index_node,
+ IntPtrConstant(-element_size_shift)));
return IntPtrAdd(IntPtrConstant(base_size), Signed(shifted_index));
}
+// Instantiate ElementOffsetFromIndex for Smi and IntPtrT.
+template V8_EXPORT_PRIVATE TNode<IntPtrT>
+CodeStubAssembler::ElementOffsetFromIndex<Smi>(TNode<Smi> index_node,
+ ElementsKind kind,
+ int base_size);
+template V8_EXPORT_PRIVATE TNode<IntPtrT>
+CodeStubAssembler::ElementOffsetFromIndex<IntPtrT>(TNode<IntPtrT> index_node,
+ ElementsKind kind,
+ int base_size);
+
TNode<BoolT> CodeStubAssembler::IsOffsetInBounds(SloppyTNode<IntPtrT> offset,
SloppyTNode<IntPtrT> length,
int header_size,
@@ -10146,8 +9737,7 @@ TNode<BoolT> CodeStubAssembler::IsOffsetInBounds(SloppyTNode<IntPtrT> offset,
// Make sure we point to the last field.
int element_size = 1 << ElementsKindToShiftSize(kind);
int correction = header_size - kHeapObjectTag - element_size;
- TNode<IntPtrT> last_offset =
- ElementOffsetFromIndex(length, kind, INTPTR_PARAMETERS, correction);
+ TNode<IntPtrT> last_offset = ElementOffsetFromIndex(length, kind, correction);
return IntPtrLessThanOrEqual(offset, last_offset);
}
@@ -10203,8 +9793,9 @@ TNode<FeedbackVector> CodeStubAssembler::LoadFeedbackVectorForStub() {
return CAST(LoadFeedbackVector(function));
}
-void CodeStubAssembler::UpdateFeedback(Node* feedback, Node* maybe_vector,
- Node* slot_id) {
+void CodeStubAssembler::UpdateFeedback(TNode<Smi> feedback,
+ TNode<HeapObject> maybe_vector,
+ TNode<UintPtrT> slot_id) {
Label end(this);
// If feedback_vector is not valid, then nothing to do.
GotoIf(IsUndefined(maybe_vector), &end);
@@ -10216,7 +9807,7 @@ void CodeStubAssembler::UpdateFeedback(Node* feedback, Node* maybe_vector,
TNode<MaybeObject> feedback_element =
LoadFeedbackVectorSlot(feedback_vector, slot_id);
TNode<Smi> previous_feedback = CAST(feedback_element);
- TNode<Smi> combined_feedback = SmiOr(previous_feedback, CAST(feedback));
+ TNode<Smi> combined_feedback = SmiOr(previous_feedback, feedback);
GotoIf(SmiEqual(previous_feedback, combined_feedback), &end);
{
@@ -10230,7 +9821,7 @@ void CodeStubAssembler::UpdateFeedback(Node* feedback, Node* maybe_vector,
}
void CodeStubAssembler::ReportFeedbackUpdate(
- SloppyTNode<FeedbackVector> feedback_vector, SloppyTNode<IntPtrT> slot_id,
+ TNode<FeedbackVector> feedback_vector, SloppyTNode<UintPtrT> slot_id,
const char* reason) {
// Reset profiler ticks.
StoreObjectFieldNoWriteBarrier(
@@ -10241,7 +9832,7 @@ void CodeStubAssembler::ReportFeedbackUpdate(
// Trace the update.
CallRuntime(Runtime::kInterpreterTraceUpdateFeedback, NoContextConstant(),
LoadFromParentFrame(JavaScriptFrameConstants::kFunctionOffset),
- SmiTag(slot_id), StringConstant(reason));
+ SmiTag(Signed(slot_id)), StringConstant(reason));
#endif // V8_TRACE_FEEDBACK_UPDATES
}
@@ -10285,14 +9876,16 @@ TNode<Map> CodeStubAssembler::LoadReceiverMap(SloppyTNode<Object> receiver) {
[=] { return LoadMap(UncheckedCast<HeapObject>(receiver)); });
}
-TNode<IntPtrT> CodeStubAssembler::TryToIntptr(Node* key, Label* miss) {
+TNode<IntPtrT> CodeStubAssembler::TryToIntptr(SloppyTNode<Object> key,
+ Label* miss) {
TVARIABLE(IntPtrT, var_intptr_key);
Label done(this, &var_intptr_key), key_is_smi(this);
GotoIf(TaggedIsSmi(key), &key_is_smi);
+
// Try to convert a heap number to a Smi.
- GotoIfNot(IsHeapNumber(key), miss);
+ GotoIfNot(IsHeapNumber(CAST(key)), miss);
{
- TNode<Float64T> value = LoadHeapNumberValue(key);
+ TNode<Float64T> value = LoadHeapNumberValue(CAST(key));
TNode<Int32T> int_value = RoundFloat64ToInt32(value);
GotoIfNot(Float64Equal(value, ChangeInt32ToFloat64(int_value)), miss);
var_intptr_key = ChangeInt32ToIntPtr(int_value);
@@ -10301,7 +9894,7 @@ TNode<IntPtrT> CodeStubAssembler::TryToIntptr(Node* key, Label* miss) {
BIND(&key_is_smi);
{
- var_intptr_key = SmiUntag(key);
+ var_intptr_key = SmiUntag(CAST(key));
Goto(&done);
}
@@ -10354,7 +9947,7 @@ Node* CodeStubAssembler::EmitKeyedSloppyArguments(
}
Label if_mapped(this), if_unmapped(this), end(this, &var_result);
TNode<IntPtrT> intptr_two = IntPtrConstant(2);
- TNode<WordT> adjusted_length = IntPtrSub(elements_length, intptr_two);
+ TNode<IntPtrT> adjusted_length = IntPtrSub(elements_length, intptr_two);
GotoIf(UintPtrGreaterThanOrEqual(key, adjusted_length), &if_unmapped);
@@ -10510,33 +10103,35 @@ void CodeStubAssembler::StoreElement(Node* elements, ElementsKind kind,
}
}
-Node* CodeStubAssembler::Int32ToUint8Clamped(Node* int32_value) {
+TNode<Uint8T> CodeStubAssembler::Int32ToUint8Clamped(
+ TNode<Int32T> int32_value) {
Label done(this);
TNode<Int32T> int32_zero = Int32Constant(0);
TNode<Int32T> int32_255 = Int32Constant(255);
- VARIABLE(var_value, MachineRepresentation::kWord32, int32_value);
+ TVARIABLE(Word32T, var_value, int32_value);
GotoIf(Uint32LessThanOrEqual(int32_value, int32_255), &done);
- var_value.Bind(int32_zero);
+ var_value = int32_zero;
GotoIf(Int32LessThan(int32_value, int32_zero), &done);
- var_value.Bind(int32_255);
+ var_value = int32_255;
Goto(&done);
BIND(&done);
- return var_value.value();
+ return UncheckedCast<Uint8T>(var_value.value());
}
-Node* CodeStubAssembler::Float64ToUint8Clamped(Node* float64_value) {
+TNode<Uint8T> CodeStubAssembler::Float64ToUint8Clamped(
+ TNode<Float64T> float64_value) {
Label done(this);
- VARIABLE(var_value, MachineRepresentation::kWord32, Int32Constant(0));
+ TVARIABLE(Word32T, var_value, Int32Constant(0));
GotoIf(Float64LessThanOrEqual(float64_value, Float64Constant(0.0)), &done);
- var_value.Bind(Int32Constant(255));
+ var_value = Int32Constant(255);
GotoIf(Float64LessThanOrEqual(Float64Constant(255.0), float64_value), &done);
{
TNode<Float64T> rounded_value = Float64RoundToEven(float64_value);
- var_value.Bind(TruncateFloat64ToWord32(rounded_value));
+ var_value = TruncateFloat64ToWord32(rounded_value);
Goto(&done);
}
BIND(&done);
- return var_value.value();
+ return UncheckedCast<Uint8T>(var_value.value());
}
Node* CodeStubAssembler::PrepareValueForWriteToTypedArray(
@@ -10716,8 +10311,8 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
GotoIfNot(UintPtrLessThan(intptr_key, length), &update_value_and_bailout);
}
- TNode<RawPtrT> backing_store = LoadJSTypedArrayBackingStore(CAST(object));
- StoreElement(backing_store, elements_kind, intptr_key, converted_value,
+ TNode<RawPtrT> data_ptr = LoadJSTypedArrayDataPtr(CAST(object));
+ StoreElement(data_ptr, elements_kind, intptr_key, converted_value,
parameter_mode);
Goto(&done);
@@ -10807,7 +10402,7 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
if (IsSmiElementsKind(elements_kind)) {
GotoIfNot(TaggedIsSmi(value), bailout);
} else if (IsDoubleElementsKind(elements_kind)) {
- value = TryTaggedToFloat64(value, bailout);
+ value = TryTaggedToFloat64(CAST(value), bailout);
}
if (IsGrowStoreMode(store_mode) &&
@@ -11047,7 +10642,7 @@ TNode<IntPtrT> CodeStubAssembler::PageFromAddress(TNode<IntPtrT> address) {
}
TNode<AllocationSite> CodeStubAssembler::CreateAllocationSiteInFeedbackVector(
- SloppyTNode<FeedbackVector> feedback_vector, TNode<Smi> slot) {
+ TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot) {
TNode<IntPtrT> size = IntPtrConstant(AllocationSite::kSizeWithWeakNext);
TNode<HeapObject> site = Allocate(size, CodeStubAssembler::kPretenured);
StoreMapNoWriteBarrier(site, RootIndex::kAllocationSiteWithWeakNextMap);
@@ -11090,19 +10685,16 @@ TNode<AllocationSite> CodeStubAssembler::CreateAllocationSiteInFeedbackVector(
StoreObjectField(site, AllocationSite::kWeakNextOffset, next_site);
StoreFullTaggedNoWriteBarrier(site_list, site);
- StoreFeedbackVectorSlot(feedback_vector, slot, site, UPDATE_WRITE_BARRIER, 0,
- SMI_PARAMETERS);
+ StoreFeedbackVectorSlot(feedback_vector, slot, site);
return CAST(site);
}
TNode<MaybeObject> CodeStubAssembler::StoreWeakReferenceInFeedbackVector(
- SloppyTNode<FeedbackVector> feedback_vector, Node* slot,
- SloppyTNode<HeapObject> value, int additional_offset,
- ParameterMode parameter_mode) {
+ TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot,
+ TNode<HeapObject> value, int additional_offset) {
TNode<MaybeObject> weak_value = MakeWeak(value);
StoreFeedbackVectorSlot(feedback_vector, slot, weak_value,
- UPDATE_WRITE_BARRIER, additional_offset,
- parameter_mode);
+ UPDATE_WRITE_BARRIER, additional_offset);
return weak_value;
}
@@ -11135,14 +10727,14 @@ TNode<Int32T> CodeStubAssembler::LoadElementsKind(
return elements_kind;
}
-Node* CodeStubAssembler::BuildFastLoop(
- const CodeStubAssembler::VariableList& vars, Node* start_index,
- Node* end_index, const FastLoopBody& body, int increment,
- ParameterMode parameter_mode, IndexAdvanceMode advance_mode) {
- CSA_SLOW_ASSERT(this, MatchesParameterMode(start_index, parameter_mode));
- CSA_SLOW_ASSERT(this, MatchesParameterMode(end_index, parameter_mode));
- MachineRepresentation index_rep = ParameterRepresentation(parameter_mode);
- VARIABLE(var, index_rep, start_index);
+template <typename TIndex>
+TNode<TIndex> CodeStubAssembler::BuildFastLoop(const VariableList& vars,
+ TNode<TIndex> start_index,
+ TNode<TIndex> end_index,
+ const FastLoopBody<TIndex>& body,
+ int increment,
+ IndexAdvanceMode advance_mode) {
+ TVARIABLE(TIndex, var, start_index);
VariableList vars_copy(vars.begin(), vars.end(), zone());
vars_copy.push_back(&var);
Label loop(this, vars_copy);
@@ -11154,8 +10746,7 @@ Node* CodeStubAssembler::BuildFastLoop(
// to force the loop header check at the end of the loop and branch forward to
// it from the pre-header). The extra branch is slower in the case that the
// loop actually iterates.
- TNode<BoolT> first_check =
- IntPtrOrSmiEqual(var.value(), end_index, parameter_mode);
+ TNode<BoolT> first_check = IntPtrOrSmiEqual(var.value(), end_index);
int32_t first_check_val;
if (ToInt32Constant(first_check, &first_check_val)) {
if (first_check_val) return var.value();
@@ -11167,19 +10758,28 @@ Node* CodeStubAssembler::BuildFastLoop(
BIND(&loop);
{
if (advance_mode == IndexAdvanceMode::kPre) {
- Increment(&var, increment, parameter_mode);
+ Increment(&var, increment);
}
body(var.value());
if (advance_mode == IndexAdvanceMode::kPost) {
- Increment(&var, increment, parameter_mode);
+ Increment(&var, increment);
}
- Branch(IntPtrOrSmiNotEqual(var.value(), end_index, parameter_mode), &loop,
- &after_loop);
+ Branch(IntPtrOrSmiNotEqual(var.value(), end_index), &loop, &after_loop);
}
BIND(&after_loop);
return var.value();
}
+// Instantiate BuildFastLoop for Smi and IntPtrT.
+template TNode<Smi> CodeStubAssembler::BuildFastLoop<Smi>(
+ const VariableList& vars, TNode<Smi> start_index, TNode<Smi> end_index,
+ const FastLoopBody<Smi>& body, int increment,
+ IndexAdvanceMode advance_mode);
+template TNode<IntPtrT> CodeStubAssembler::BuildFastLoop<IntPtrT>(
+ const VariableList& vars, TNode<IntPtrT> start_index,
+ TNode<IntPtrT> end_index, const FastLoopBody<IntPtrT>& body, int increment,
+ IndexAdvanceMode advance_mode);
+
void CodeStubAssembler::BuildFastFixedArrayForEach(
const CodeStubAssembler::VariableList& vars, Node* fixed_array,
ElementsKind kind, Node* first_element_inclusive,
@@ -11201,17 +10801,15 @@ void CodeStubAssembler::BuildFastFixedArrayForEach(
if (direction == ForEachDirection::kForward) {
for (int i = first_val; i < last_val; ++i) {
TNode<IntPtrT> index = IntPtrConstant(i);
- TNode<IntPtrT> offset =
- ElementOffsetFromIndex(index, kind, INTPTR_PARAMETERS,
- FixedArray::kHeaderSize - kHeapObjectTag);
+ TNode<IntPtrT> offset = ElementOffsetFromIndex(
+ index, kind, FixedArray::kHeaderSize - kHeapObjectTag);
body(fixed_array, offset);
}
} else {
for (int i = last_val - 1; i >= first_val; --i) {
TNode<IntPtrT> index = IntPtrConstant(i);
- TNode<IntPtrT> offset =
- ElementOffsetFromIndex(index, kind, INTPTR_PARAMETERS,
- FixedArray::kHeaderSize - kHeapObjectTag);
+ TNode<IntPtrT> offset = ElementOffsetFromIndex(
+ index, kind, FixedArray::kHeaderSize - kHeapObjectTag);
body(fixed_array, offset);
}
}
@@ -11228,11 +10826,10 @@ void CodeStubAssembler::BuildFastFixedArrayForEach(
if (direction == ForEachDirection::kReverse) std::swap(start, limit);
int increment = IsDoubleElementsKind(kind) ? kDoubleSize : kTaggedSize;
- BuildFastLoop(
+ BuildFastLoop<IntPtrT>(
vars, start, limit,
- [fixed_array, &body](Node* offset) { body(fixed_array, offset); },
+ [&](TNode<IntPtrT> offset) { body(fixed_array, offset); },
direction == ForEachDirection::kReverse ? -increment : increment,
- INTPTR_PARAMETERS,
direction == ForEachDirection::kReverse ? IndexAdvanceMode::kPre
: IndexAdvanceMode::kPost);
}
@@ -11243,22 +10840,21 @@ void CodeStubAssembler::GotoIfFixedArraySizeDoesntFitInNewSpace(
doesnt_fit);
}
-void CodeStubAssembler::InitializeFieldsWithRoot(Node* object,
- Node* start_offset,
- Node* end_offset,
+void CodeStubAssembler::InitializeFieldsWithRoot(TNode<HeapObject> object,
+ TNode<IntPtrT> start_offset,
+ TNode<IntPtrT> end_offset,
RootIndex root_index) {
CSA_SLOW_ASSERT(this, TaggedIsNotSmi(object));
start_offset = IntPtrAdd(start_offset, IntPtrConstant(-kHeapObjectTag));
end_offset = IntPtrAdd(end_offset, IntPtrConstant(-kHeapObjectTag));
TNode<Object> root_value = LoadRoot(root_index);
- BuildFastLoop(
+ BuildFastLoop<IntPtrT>(
end_offset, start_offset,
- [this, object, root_value](Node* current) {
+ [=](TNode<IntPtrT> current) {
StoreNoWriteBarrier(MachineRepresentation::kTagged, object, current,
root_value);
},
- -kTaggedSize, INTPTR_PARAMETERS,
- CodeStubAssembler::IndexAdvanceMode::kPre);
+ -kTaggedSize, CodeStubAssembler::IndexAdvanceMode::kPre);
}
void CodeStubAssembler::BranchIfNumberRelationalComparison(
@@ -11384,11 +10980,9 @@ Operation Reverse(Operation op) {
}
} // anonymous namespace
-Node* CodeStubAssembler::RelationalComparison(Operation op,
- SloppyTNode<Object> left,
- SloppyTNode<Object> right,
- SloppyTNode<Context> context,
- Variable* var_type_feedback) {
+TNode<Oddball> CodeStubAssembler::RelationalComparison(
+ Operation op, TNode<Object> left, TNode<Object> right,
+ TNode<Context> context, TVariable<Smi>* var_type_feedback) {
Label return_true(this), return_false(this), do_float_comparison(this),
end(this);
TVARIABLE(Oddball, var_result); // Actually only "true" or "false".
@@ -11403,7 +10997,7 @@ Node* CodeStubAssembler::RelationalComparison(Operation op,
if (var_type_feedback != nullptr) {
// Initialize the type feedback to None. The current feedback is combined
// with the previous feedback.
- var_type_feedback->Bind(SmiConstant(CompareOperationFeedback::kNone));
+ *var_type_feedback = SmiConstant(CompareOperationFeedback::kNone);
loop_variable_list.push_back(var_type_feedback);
}
Label loop(this, loop_variable_list);
@@ -11914,17 +11508,17 @@ void CodeStubAssembler::GenerateEqual_Same(SloppyTNode<Object> value,
}
// ES6 section 7.2.12 Abstract Equality Comparison
-Node* CodeStubAssembler::Equal(SloppyTNode<Object> left,
- SloppyTNode<Object> right,
- SloppyTNode<Context> context,
- Variable* var_type_feedback) {
+TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left,
+ SloppyTNode<Object> right,
+ SloppyTNode<Context> context,
+ TVariable<Smi>* var_type_feedback) {
// This is a slightly optimized version of Object::Equals. Whenever you
// change something functionality wise in here, remember to update the
// Object::Equals method as well.
Label if_equal(this), if_notequal(this), do_float_comparison(this),
do_right_stringtonumber(this, Label::kDeferred), end(this);
- VARIABLE(result, MachineRepresentation::kTagged);
+ TVARIABLE(Oddball, result);
TVARIABLE(Float64T, var_left_float);
TVARIABLE(Float64T, var_right_float);
@@ -11984,7 +11578,7 @@ Node* CodeStubAssembler::Equal(SloppyTNode<Object> left,
GotoIf(IsHeapNumberMap(right_map), &if_right_heapnumber);
// {left} is Smi and {right} is not HeapNumber or Smi.
if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(SmiConstant(CompareOperationFeedback::kAny));
+ *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny);
}
GotoIf(IsBooleanMap(right_map), &if_right_boolean);
TNode<Uint16T> right_type = LoadMapInstanceType(right_map);
@@ -12009,8 +11603,8 @@ Node* CodeStubAssembler::Equal(SloppyTNode<Object> left,
BIND(&if_right_bigint);
{
- result.Bind(CallRuntime(Runtime::kBigIntEqualToNumber,
- NoContextConstant(), right, left));
+ result = CAST(CallRuntime(Runtime::kBigIntEqualToNumber,
+ NoContextConstant(), right, left));
Goto(&end);
}
@@ -12046,7 +11640,8 @@ Node* CodeStubAssembler::Equal(SloppyTNode<Object> left,
BIND(&if_left_string);
{
GotoIfNot(IsStringInstanceType(right_type), &use_symmetry);
- result.Bind(CallBuiltin(Builtins::kStringEqual, context, left, right));
+ result =
+ CAST(CallBuiltin(Builtins::kStringEqual, context, left, right));
CombineFeedback(var_type_feedback,
SmiOr(CollectFeedbackForString(left_type),
CollectFeedbackForString(right_type)));
@@ -12067,8 +11662,7 @@ Node* CodeStubAssembler::Equal(SloppyTNode<Object> left,
{
Label if_right_boolean(this);
if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kAny));
+ *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny);
}
GotoIf(IsStringInstanceType(right_type), &do_right_stringtonumber);
GotoIf(IsBooleanMap(right_map), &if_right_boolean);
@@ -12098,38 +11692,35 @@ Node* CodeStubAssembler::Equal(SloppyTNode<Object> left,
BIND(&if_right_heapnumber);
{
if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kAny));
+ *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny);
}
- result.Bind(CallRuntime(Runtime::kBigIntEqualToNumber,
- NoContextConstant(), left, right));
+ result = CAST(CallRuntime(Runtime::kBigIntEqualToNumber,
+ NoContextConstant(), left, right));
Goto(&end);
}
BIND(&if_right_bigint);
{
CombineFeedback(var_type_feedback, CompareOperationFeedback::kBigInt);
- result.Bind(CallRuntime(Runtime::kBigIntEqualToBigInt,
- NoContextConstant(), left, right));
+ result = CAST(CallRuntime(Runtime::kBigIntEqualToBigInt,
+ NoContextConstant(), left, right));
Goto(&end);
}
BIND(&if_right_string);
{
if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kAny));
+ *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny);
}
- result.Bind(CallRuntime(Runtime::kBigIntEqualToString,
- NoContextConstant(), left, right));
+ result = CAST(CallRuntime(Runtime::kBigIntEqualToString,
+ NoContextConstant(), left, right));
Goto(&end);
}
BIND(&if_right_boolean);
{
if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kAny));
+ *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny);
}
var_right = LoadObjectField(CAST(right), Oddball::kToNumberOffset);
Goto(&loop);
@@ -12154,8 +11745,8 @@ Node* CodeStubAssembler::Equal(SloppyTNode<Object> left,
if (var_type_feedback != nullptr) {
// If {right} is undetectable, it must be either also
// Null or Undefined, or a Receiver (aka document.all).
- var_type_feedback->Bind(SmiConstant(
- CompareOperationFeedback::kReceiverOrNullOrUndefined));
+ *var_type_feedback = SmiConstant(
+ CompareOperationFeedback::kReceiverOrNullOrUndefined);
}
Goto(&if_equal);
}
@@ -12164,12 +11755,11 @@ Node* CodeStubAssembler::Equal(SloppyTNode<Object> left,
{
if (var_type_feedback != nullptr) {
// Track whether {right} is Null, Undefined or Receiver.
- var_type_feedback->Bind(SmiConstant(
- CompareOperationFeedback::kReceiverOrNullOrUndefined));
+ *var_type_feedback = SmiConstant(
+ CompareOperationFeedback::kReceiverOrNullOrUndefined);
GotoIf(IsJSReceiverInstanceType(right_type), &if_notequal);
GotoIfNot(IsBooleanMap(right_map), &if_notequal);
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kAny));
+ *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny);
}
Goto(&if_notequal);
}
@@ -12178,8 +11768,7 @@ Node* CodeStubAssembler::Equal(SloppyTNode<Object> left,
BIND(&if_left_boolean);
{
if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kAny));
+ *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny);
}
// If {right} is a Boolean too, it must be a different Boolean.
@@ -12200,7 +11789,7 @@ Node* CodeStubAssembler::Equal(SloppyTNode<Object> left,
if (var_type_feedback != nullptr) {
Label if_right_symbol(this);
GotoIf(IsSymbolInstanceType(right_type), &if_right_symbol);
- var_type_feedback->Bind(SmiConstant(CompareOperationFeedback::kAny));
+ *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny);
Goto(&if_notequal);
BIND(&if_right_symbol);
@@ -12218,8 +11807,7 @@ Node* CodeStubAssembler::Equal(SloppyTNode<Object> left,
// {left} is a Primitive and {right} is a JSReceiver, so swapping
// the order is not observable.
if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kAny));
+ *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny);
}
Goto(&use_symmetry);
}
@@ -12254,8 +11842,8 @@ Node* CodeStubAssembler::Equal(SloppyTNode<Object> left,
// When we get here, {right} must be either Null or Undefined.
CSA_ASSERT(this, IsNullOrUndefined(right));
if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(SmiConstant(
- CompareOperationFeedback::kReceiverOrNullOrUndefined));
+ *var_type_feedback = SmiConstant(
+ CompareOperationFeedback::kReceiverOrNullOrUndefined);
}
Branch(IsUndetectableMap(left_map), &if_equal, &if_notequal);
}
@@ -12265,8 +11853,7 @@ Node* CodeStubAssembler::Equal(SloppyTNode<Object> left,
// {right} is a Primitive, and neither Null or Undefined;
// convert {left} to Primitive too.
if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kAny));
+ *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny);
}
Callable callable = CodeFactory::NonPrimitiveToPrimitive(isolate());
var_left = CallStub(callable, context, left);
@@ -12298,13 +11885,13 @@ Node* CodeStubAssembler::Equal(SloppyTNode<Object> left,
BIND(&if_equal);
{
- result.Bind(TrueConstant());
+ result = TrueConstant();
Goto(&end);
}
BIND(&if_notequal);
{
- result.Bind(FalseConstant());
+ result = FalseConstant();
Goto(&end);
}
@@ -12312,9 +11899,9 @@ Node* CodeStubAssembler::Equal(SloppyTNode<Object> left,
return result.value();
}
-TNode<Oddball> CodeStubAssembler::StrictEqual(SloppyTNode<Object> lhs,
- SloppyTNode<Object> rhs,
- Variable* var_type_feedback) {
+TNode<Oddball> CodeStubAssembler::StrictEqual(
+ SloppyTNode<Object> lhs, SloppyTNode<Object> rhs,
+ TVariable<Smi>* var_type_feedback) {
// Pseudo-code for the algorithm below:
//
// if (lhs == rhs) {
@@ -12482,7 +12069,7 @@ TNode<Oddball> CodeStubAssembler::StrictEqual(SloppyTNode<Object> lhs,
CollectFeedbackForString(lhs_instance_type);
TNode<Smi> rhs_feedback =
CollectFeedbackForString(rhs_instance_type);
- var_type_feedback->Bind(SmiOr(lhs_feedback, rhs_feedback));
+ *var_type_feedback = SmiOr(lhs_feedback, rhs_feedback);
}
result = CAST(CallBuiltin(Builtins::kStringEqual,
NoContextConstant(), lhs, rhs));
@@ -12556,7 +12143,7 @@ TNode<Oddball> CodeStubAssembler::StrictEqual(SloppyTNode<Object> lhs,
BIND(&if_lhsisoddball);
{
- STATIC_ASSERT(LAST_PRIMITIVE_TYPE == ODDBALL_TYPE);
+ STATIC_ASSERT(LAST_PRIMITIVE_HEAP_OBJECT_TYPE == ODDBALL_TYPE);
GotoIf(IsBooleanMap(rhs_map), &if_not_equivalent_types);
GotoIf(Int32LessThan(rhs_instance_type,
Int32Constant(ODDBALL_TYPE)),
@@ -12855,8 +12442,8 @@ TNode<Oddball> CodeStubAssembler::HasProperty(SloppyTNode<Context> context,
return result.value();
}
-Node* CodeStubAssembler::Typeof(Node* value) {
- VARIABLE(result_var, MachineRepresentation::kTagged);
+TNode<String> CodeStubAssembler::Typeof(SloppyTNode<Object> value) {
+ TVARIABLE(String, result_var);
Label return_number(this, Label::kDeferred), if_oddball(this),
return_function(this), return_undefined(this), return_object(this),
@@ -12864,7 +12451,8 @@ Node* CodeStubAssembler::Typeof(Node* value) {
GotoIf(TaggedIsSmi(value), &return_number);
- TNode<Map> map = LoadMap(value);
+ TNode<HeapObject> value_heap_object = CAST(value);
+ TNode<Map> map = LoadMap(value_heap_object);
GotoIf(IsHeapNumberMap(map), &return_number);
@@ -12890,49 +12478,50 @@ Node* CodeStubAssembler::Typeof(Node* value) {
GotoIf(IsBigIntInstanceType(instance_type), &return_bigint);
CSA_ASSERT(this, InstanceTypeEqual(instance_type, SYMBOL_TYPE));
- result_var.Bind(HeapConstant(isolate()->factory()->symbol_string()));
+ result_var = HeapConstant(isolate()->factory()->symbol_string());
Goto(&return_result);
BIND(&return_number);
{
- result_var.Bind(HeapConstant(isolate()->factory()->number_string()));
+ result_var = HeapConstant(isolate()->factory()->number_string());
Goto(&return_result);
}
BIND(&if_oddball);
{
- TNode<Object> type = LoadObjectField(value, Oddball::kTypeOfOffset);
- result_var.Bind(type);
+ TNode<String> type =
+ CAST(LoadObjectField(value_heap_object, Oddball::kTypeOfOffset));
+ result_var = type;
Goto(&return_result);
}
BIND(&return_function);
{
- result_var.Bind(HeapConstant(isolate()->factory()->function_string()));
+ result_var = HeapConstant(isolate()->factory()->function_string());
Goto(&return_result);
}
BIND(&return_undefined);
{
- result_var.Bind(HeapConstant(isolate()->factory()->undefined_string()));
+ result_var = HeapConstant(isolate()->factory()->undefined_string());
Goto(&return_result);
}
BIND(&return_object);
{
- result_var.Bind(HeapConstant(isolate()->factory()->object_string()));
+ result_var = HeapConstant(isolate()->factory()->object_string());
Goto(&return_result);
}
BIND(&return_string);
{
- result_var.Bind(HeapConstant(isolate()->factory()->string_string()));
+ result_var = HeapConstant(isolate()->factory()->string_string());
Goto(&return_result);
}
BIND(&return_bigint);
{
- result_var.Bind(HeapConstant(isolate()->factory()->bigint_string()));
+ result_var = HeapConstant(isolate()->factory()->bigint_string());
Goto(&return_result);
}
@@ -12941,7 +12530,7 @@ Node* CodeStubAssembler::Typeof(Node* value) {
}
TNode<Object> CodeStubAssembler::GetSuperConstructor(
- SloppyTNode<Context> context, SloppyTNode<JSFunction> active_function) {
+ TNode<Context> context, TNode<JSFunction> active_function) {
Label is_not_constructor(this, Label::kDeferred), out(this);
TVARIABLE(Object, result);
@@ -13004,9 +12593,10 @@ TNode<JSReceiver> CodeStubAssembler::SpeciesConstructor(
return var_result.value();
}
-Node* CodeStubAssembler::InstanceOf(Node* object, Node* callable,
- Node* context) {
- VARIABLE(var_result, MachineRepresentation::kTagged);
+TNode<Oddball> CodeStubAssembler::InstanceOf(TNode<Object> object,
+ TNode<Object> callable,
+ TNode<Context> context) {
+ TVARIABLE(Oddball, var_result);
Label if_notcallable(this, Label::kDeferred),
if_notreceiver(this, Label::kDeferred), if_otherhandler(this),
if_nohandler(this, Label::kDeferred), return_true(this),
@@ -13014,7 +12604,7 @@ Node* CodeStubAssembler::InstanceOf(Node* object, Node* callable,
// Ensure that the {callable} is actually a JSReceiver.
GotoIf(TaggedIsSmi(callable), &if_notreceiver);
- GotoIfNot(IsJSReceiver(callable), &if_notreceiver);
+ GotoIfNot(IsJSReceiver(CAST(callable)), &if_notreceiver);
// Load the @@hasInstance property from {callable}.
TNode<Object> inst_of_handler =
@@ -13032,8 +12622,8 @@ Node* CodeStubAssembler::InstanceOf(Node* object, Node* callable,
// Call to Function.prototype[@@hasInstance] directly.
Callable builtin(BUILTIN_CODE(isolate(), FunctionPrototypeHasInstance),
CallTrampolineDescriptor{});
- Node* result = CallJS(builtin, context, inst_of_handler, callable, object);
- var_result.Bind(result);
+ var_result =
+ CAST(CallJS(builtin, context, inst_of_handler, callable, object));
Goto(&return_result);
}
@@ -13055,12 +12645,11 @@ Node* CodeStubAssembler::InstanceOf(Node* object, Node* callable,
BIND(&if_nohandler);
{
// Ensure that the {callable} is actually Callable.
- GotoIfNot(IsCallable(callable), &if_notcallable);
+ GotoIfNot(IsCallable(CAST(callable)), &if_notcallable);
// Use the OrdinaryHasInstance algorithm.
- TNode<Object> result =
- CallBuiltin(Builtins::kOrdinaryHasInstance, context, callable, object);
- var_result.Bind(result);
+ var_result = CAST(
+ CallBuiltin(Builtins::kOrdinaryHasInstance, context, callable, object));
Goto(&return_result);
}
@@ -13071,11 +12660,11 @@ Node* CodeStubAssembler::InstanceOf(Node* object, Node* callable,
{ ThrowTypeError(context, MessageTemplate::kNonObjectInInstanceOfCheck); }
BIND(&return_true);
- var_result.Bind(TrueConstant());
+ var_result = TrueConstant();
Goto(&return_result);
BIND(&return_false);
- var_result.Bind(FalseConstant());
+ var_result = FalseConstant();
Goto(&return_result);
BIND(&return_result);
@@ -13294,9 +12883,8 @@ TNode<JSObject> CodeStubAssembler::AllocateJSIteratorResult(
return CAST(result);
}
-Node* CodeStubAssembler::AllocateJSIteratorResultForEntry(Node* context,
- Node* key,
- Node* value) {
+TNode<JSObject> CodeStubAssembler::AllocateJSIteratorResultForEntry(
+ TNode<Context> context, TNode<Object> key, SloppyTNode<Object> value) {
TNode<NativeContext> native_context = LoadNativeContext(context);
TNode<Smi> length = SmiConstant(2);
int const elements_size = FixedArray::SizeFor(2);
@@ -13326,7 +12914,7 @@ Node* CodeStubAssembler::AllocateJSIteratorResultForEntry(Node* context,
StoreObjectFieldNoWriteBarrier(result, JSIteratorResult::kValueOffset, array);
StoreObjectFieldRoot(result, JSIteratorResult::kDoneOffset,
RootIndex::kFalseValue);
- return result;
+ return CAST(result);
}
TNode<JSReceiver> CodeStubAssembler::ArraySpeciesCreate(TNode<Context> context,
@@ -13393,21 +12981,19 @@ TNode<UintPtrT> CodeStubAssembler::LoadJSTypedArrayLength(
return LoadObjectField<UintPtrT>(typed_array, JSTypedArray::kLengthOffset);
}
-CodeStubArguments::CodeStubArguments(
- CodeStubAssembler* assembler, Node* argc, Node* fp,
- CodeStubAssembler::ParameterMode param_mode, ReceiverMode receiver_mode)
+CodeStubArguments::CodeStubArguments(CodeStubAssembler* assembler,
+ TNode<IntPtrT> argc, TNode<RawPtrT> fp,
+ ReceiverMode receiver_mode)
: assembler_(assembler),
- argc_mode_(param_mode),
receiver_mode_(receiver_mode),
argc_(argc),
base_(),
fp_(fp != nullptr ? fp : assembler_->LoadFramePointer()) {
TNode<IntPtrT> offset = assembler_->ElementOffsetFromIndex(
- argc_, SYSTEM_POINTER_ELEMENTS, param_mode,
+ argc_, SYSTEM_POINTER_ELEMENTS,
(StandardFrameConstants::kFixedSlotCountAboveFp - 1) *
kSystemPointerSize);
- base_ =
- assembler_->UncheckedCast<RawPtrT>(assembler_->IntPtrAdd(fp_, offset));
+ base_ = assembler_->RawPtrAdd(fp_, offset);
}
TNode<Object> CodeStubArguments::GetReceiver() const {
@@ -13422,24 +13008,18 @@ void CodeStubArguments::SetReceiver(TNode<Object> object) const {
base_, assembler_->IntPtrConstant(kSystemPointerSize), object);
}
-TNode<WordT> CodeStubArguments::AtIndexPtr(
- Node* index, CodeStubAssembler::ParameterMode mode) const {
- using Node = compiler::Node;
- Node* negated_index = assembler_->IntPtrOrSmiSub(
- assembler_->IntPtrOrSmiConstant(0, mode), index, mode);
+TNode<RawPtrT> CodeStubArguments::AtIndexPtr(TNode<IntPtrT> index) const {
+ TNode<IntPtrT> negated_index =
+ assembler_->IntPtrOrSmiSub(assembler_->IntPtrConstant(0), index);
TNode<IntPtrT> offset = assembler_->ElementOffsetFromIndex(
- negated_index, SYSTEM_POINTER_ELEMENTS, mode, 0);
- return assembler_->IntPtrAdd(assembler_->UncheckedCast<IntPtrT>(base_),
- offset);
+ negated_index, SYSTEM_POINTER_ELEMENTS, 0);
+ return assembler_->RawPtrAdd(base_, offset);
}
-TNode<Object> CodeStubArguments::AtIndex(
- Node* index, CodeStubAssembler::ParameterMode mode) const {
- DCHECK_EQ(argc_mode_, mode);
- CSA_ASSERT(assembler_,
- assembler_->UintPtrOrSmiLessThan(index, GetLength(mode), mode));
+TNode<Object> CodeStubArguments::AtIndex(TNode<IntPtrT> index) const {
+ CSA_ASSERT(assembler_, assembler_->UintPtrOrSmiLessThan(index, GetLength()));
return assembler_->UncheckedCast<Object>(
- assembler_->LoadFullTagged(AtIndexPtr(index, mode)));
+ assembler_->LoadFullTagged(AtIndexPtr(index)));
}
TNode<Object> CodeStubArguments::AtIndex(int index) const {
@@ -13452,9 +13032,8 @@ TNode<Object> CodeStubArguments::GetOptionalArgumentValue(
CodeStubAssembler::Label argument_missing(assembler_),
argument_done(assembler_, &result);
- assembler_->GotoIf(assembler_->UintPtrOrSmiGreaterThanOrEqual(
- assembler_->IntPtrOrSmiConstant(index, argc_mode_),
- argc_, argc_mode_),
+ assembler_->GotoIf(assembler_->UintPtrGreaterThanOrEqual(
+ assembler_->IntPtrConstant(index), argc_),
&argument_missing);
result = AtIndex(index);
assembler_->Goto(&argument_done);
@@ -13473,10 +13052,8 @@ TNode<Object> CodeStubArguments::GetOptionalArgumentValue(
CodeStubAssembler::Label argument_missing(assembler_),
argument_done(assembler_, &result);
- assembler_->GotoIf(
- assembler_->UintPtrOrSmiGreaterThanOrEqual(
- assembler_->IntPtrToParameter(index, argc_mode_), argc_, argc_mode_),
- &argument_missing);
+ assembler_->GotoIf(assembler_->UintPtrGreaterThanOrEqual(index, argc_),
+ &argument_missing);
result = AtIndex(index);
assembler_->Goto(&argument_done);
@@ -13490,43 +13067,38 @@ TNode<Object> CodeStubArguments::GetOptionalArgumentValue(
void CodeStubArguments::ForEach(
const CodeStubAssembler::VariableList& vars,
- const CodeStubArguments::ForEachBodyFunction& body, Node* first, Node* last,
- CodeStubAssembler::ParameterMode mode) {
+ const CodeStubArguments::ForEachBodyFunction& body, TNode<IntPtrT> first,
+ TNode<IntPtrT> last) const {
assembler_->Comment("CodeStubArguments::ForEach");
if (first == nullptr) {
- first = assembler_->IntPtrOrSmiConstant(0, mode);
+ first = assembler_->IntPtrConstant(0);
}
if (last == nullptr) {
- DCHECK_EQ(mode, argc_mode_);
last = argc_;
}
- TNode<IntPtrT> start = assembler_->IntPtrSub(
- assembler_->UncheckedCast<IntPtrT>(base_),
- assembler_->ElementOffsetFromIndex(first, SYSTEM_POINTER_ELEMENTS, mode));
- TNode<IntPtrT> end = assembler_->IntPtrSub(
- assembler_->UncheckedCast<IntPtrT>(base_),
- assembler_->ElementOffsetFromIndex(last, SYSTEM_POINTER_ELEMENTS, mode));
- assembler_->BuildFastLoop(
+ TNode<RawPtrT> start = assembler_->RawPtrSub(
+ base_,
+ assembler_->ElementOffsetFromIndex(first, SYSTEM_POINTER_ELEMENTS));
+ TNode<RawPtrT> end = assembler_->RawPtrSub(
+ base_, assembler_->ElementOffsetFromIndex(last, SYSTEM_POINTER_ELEMENTS));
+ assembler_->BuildFastLoop<RawPtrT>(
vars, start, end,
- [this, &body](Node* current) {
- Node* arg = assembler_->Load(MachineType::AnyTagged(), current);
+ [&](TNode<RawPtrT> current) {
+ TNode<Object> arg = assembler_->Load<Object>(current);
body(arg);
},
- -kSystemPointerSize, CodeStubAssembler::INTPTR_PARAMETERS,
- CodeStubAssembler::IndexAdvanceMode::kPost);
+ -kSystemPointerSize, CodeStubAssembler::IndexAdvanceMode::kPost);
}
void CodeStubArguments::PopAndReturn(Node* value) {
- Node* pop_count;
+ TNode<IntPtrT> pop_count;
if (receiver_mode_ == ReceiverMode::kHasReceiver) {
- pop_count = assembler_->IntPtrOrSmiAdd(
- argc_, assembler_->IntPtrOrSmiConstant(1, argc_mode_), argc_mode_);
+ pop_count = assembler_->IntPtrAdd(argc_, assembler_->IntPtrConstant(1));
} else {
pop_count = argc_;
}
- assembler_->PopAndReturn(assembler_->ParameterToIntPtr(pop_count, argc_mode_),
- value);
+ assembler_->PopAndReturn(pop_count, value);
}
TNode<BoolT> CodeStubAssembler::IsFastElementsKind(
@@ -13642,21 +13214,15 @@ Node* CodeStubAssembler::
}
TNode<Code> CodeStubAssembler::LoadBuiltin(TNode<Smi> builtin_id) {
- CSA_ASSERT(this, SmiGreaterThanOrEqual(builtin_id, SmiConstant(0)));
- CSA_ASSERT(this,
- SmiLessThan(builtin_id, SmiConstant(Builtins::builtin_count)));
+ CSA_ASSERT(this, SmiBelow(builtin_id, SmiConstant(Builtins::builtin_count)));
- int const kSmiShiftBits = kSmiShiftSize + kSmiTagSize;
- int index_shift = kSystemPointerSizeLog2 - kSmiShiftBits;
- TNode<WordT> table_index =
- index_shift >= 0
- ? WordShl(BitcastTaggedSignedToWord(builtin_id), index_shift)
- : WordSar(BitcastTaggedSignedToWord(builtin_id), -index_shift);
-
- return CAST(
- Load(MachineType::TaggedPointer(),
+ TNode<IntPtrT> offset =
+ ElementOffsetFromIndex(SmiToBInt(builtin_id), SYSTEM_POINTER_ELEMENTS);
+
+ return CAST(BitcastWordToTagged(
+ Load(MachineType::Pointer(),
ExternalConstant(ExternalReference::builtins_address(isolate())),
- table_index));
+ offset)));
}
TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
@@ -13765,11 +13331,9 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
return sfi_code.value();
}
-Node* CodeStubAssembler::AllocateFunctionWithMapAndContext(Node* map,
- Node* shared_info,
- Node* context) {
- CSA_SLOW_ASSERT(this, IsMap(map));
-
+TNode<JSFunction> CodeStubAssembler::AllocateFunctionWithMapAndContext(
+ TNode<Map> map, TNode<SharedFunctionInfo> shared_info,
+ TNode<Context> context) {
TNode<Code> const code = GetSharedFunctionInfoCode(shared_info);
// TODO(ishell): All the callers of this function pass map loaded from
@@ -13790,7 +13354,7 @@ Node* CodeStubAssembler::AllocateFunctionWithMapAndContext(Node* map,
shared_info);
StoreObjectFieldNoWriteBarrier(fun, JSFunction::kContextOffset, context);
StoreObjectFieldNoWriteBarrier(fun, JSFunction::kCodeOffset, code);
- return fun;
+ return CAST(fun);
}
void CodeStubAssembler::CheckPrototypeEnumCache(Node* receiver,
@@ -13839,8 +13403,9 @@ void CodeStubAssembler::CheckPrototypeEnumCache(Node* receiver,
}
}
-Node* CodeStubAssembler::CheckEnumCache(Node* receiver, Label* if_empty,
- Label* if_runtime) {
+TNode<Map> CodeStubAssembler::CheckEnumCache(TNode<HeapObject> receiver,
+ Label* if_empty,
+ Label* if_runtime) {
Label if_fast(this), if_cache(this), if_no_cache(this, Label::kDeferred);
TNode<Map> receiver_map = LoadMap(receiver);
@@ -13855,7 +13420,7 @@ Node* CodeStubAssembler::CheckEnumCache(Node* receiver, Label* if_empty,
{
// Avoid runtime-call for empty dictionary receivers.
GotoIfNot(IsDictionaryMap(receiver_map), if_runtime);
- TNode<NameDictionary> properties = CAST(LoadSlowProperties(receiver));
+ TNode<NameDictionary> properties = CAST(LoadSlowProperties(CAST(receiver)));
TNode<Smi> length = GetNumberOfElements(properties);
GotoIfNot(TaggedEqual(length, SmiConstant(0)), if_runtime);
// Check that there are no elements on the {receiver} and its prototype
@@ -13881,8 +13446,7 @@ TNode<Object> CodeStubAssembler::GetArgumentValue(TorqueStructArguments args,
TorqueStructArguments CodeStubAssembler::GetFrameArguments(
TNode<RawPtrT> frame, TNode<IntPtrT> argc) {
- return CodeStubArguments(this, argc, frame, INTPTR_PARAMETERS)
- .GetTorqueArguments();
+ return CodeStubArguments(this, argc, frame).GetTorqueArguments();
}
void CodeStubAssembler::Print(const char* s) {
@@ -13976,9 +13540,8 @@ TNode<JSArray> CodeStubAssembler::ArrayCreate(TNode<Context> context,
// TODO(delphick): Consider using
// AllocateUninitializedJSArrayWithElements to avoid initializing an
// array and then writing over it.
- array =
- AllocateJSArray(PACKED_SMI_ELEMENTS, array_map, length, SmiConstant(0),
- nullptr, ParameterMode::SMI_PARAMETERS);
+ array = AllocateJSArray(PACKED_SMI_ELEMENTS, array_map, length,
+ SmiConstant(0), {}, ParameterMode::SMI_PARAMETERS);
Goto(&done);
BIND(&done);
diff --git a/deps/v8/src/codegen/code-stub-assembler.h b/deps/v8/src/codegen/code-stub-assembler.h
index 9884d04e66..eee3e7a376 100644
--- a/deps/v8/src/codegen/code-stub-assembler.h
+++ b/deps/v8/src/codegen/code-stub-assembler.h
@@ -97,6 +97,7 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
V(iterator_symbol, iterator_symbol, IteratorSymbol) \
V(length_string, length_string, LengthString) \
V(ManyClosuresCellMap, many_closures_cell_map, ManyClosuresCellMap) \
+ V(match_symbol, match_symbol, MatchSymbol) \
V(megamorphic_symbol, megamorphic_symbol, MegamorphicSymbol) \
V(MetaMap, meta_map, MetaMap) \
V(MinusZeroValue, minus_zero_value, MinusZero) \
@@ -114,7 +115,6 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
V(object_to_string, object_to_string, ObjectToString) \
V(OneClosureCellMap, one_closure_cell_map, OneClosureCellMap) \
V(OnePointerFillerMap, one_pointer_filler_map, OnePointerFillerMap) \
- V(premonomorphic_symbol, premonomorphic_symbol, PremonomorphicSymbol) \
V(PreparseDataMap, preparse_data_map, PreparseDataMap) \
V(PromiseCapabilityMap, promise_capability_map, PromiseCapabilityMap) \
V(PromiseFulfillReactionJobTaskMap, promise_fulfill_reaction_job_task_map, \
@@ -157,11 +157,11 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(V)
#ifdef DEBUG
-#define CSA_CHECK(csa, x) \
- (csa)->Check( \
- [&]() -> compiler::Node* { \
- return implicit_cast<compiler::SloppyTNode<Word32T>>(x); \
- }, \
+#define CSA_CHECK(csa, x) \
+ (csa)->Check( \
+ [&]() -> compiler::Node* { \
+ return implicit_cast<SloppyTNode<Word32T>>(x); \
+ }, \
#x, __FILE__, __LINE__)
#else
#define CSA_CHECK(csa, x) (csa)->FastCheck(x)
@@ -255,10 +255,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
public TorqueGeneratedExportedMacrosAssembler {
public:
using Node = compiler::Node;
- template <class T>
- using TNode = compiler::TNode<T>;
- template <class T>
- using SloppyTNode = compiler::SloppyTNode<T>;
template <typename T>
using LazyNode = std::function<TNode<T>()>;
@@ -303,11 +299,17 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return ParameterRepresentation(OptimalParameterMode());
}
+ TNode<IntPtrT> ParameterToIntPtr(TNode<Smi> value) { return SmiUntag(value); }
+ TNode<IntPtrT> ParameterToIntPtr(TNode<IntPtrT> value) { return value; }
+ // TODO(v8:9708): remove once all uses are ported.
TNode<IntPtrT> ParameterToIntPtr(Node* value, ParameterMode mode) {
if (mode == SMI_PARAMETERS) value = SmiUntag(value);
return UncheckedCast<IntPtrT>(value);
}
+ template <typename TIndex>
+ TNode<TIndex> IntPtrToParameter(TNode<IntPtrT> value);
+
Node* IntPtrToParameter(SloppyTNode<IntPtrT> value, ParameterMode mode) {
if (mode == SMI_PARAMETERS) return SmiTag(value);
return value;
@@ -364,6 +366,16 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
#error Unknown architecture.
#endif
+ // Pointer compression specific. Returns true if the upper 32 bits of a Smi
+ // contain the sign of a lower 32 bits (i.e. not corrupted) so that the Smi
+ // can be directly used as an index in element offset computation.
+ TNode<BoolT> IsValidSmiIndex(TNode<Smi> smi);
+
+ // Pointer compression specific. Ensures that the upper 32 bits of a Smi
+ // contain the sign of a lower 32 bits so that the Smi can be directly used
+ // as an index in element offset computation.
+ TNode<Smi> NormalizeSmiIndex(TNode<Smi> smi_index);
+
TNode<Smi> TaggedToSmi(TNode<Object> value, Label* fail) {
GotoIf(TaggedIsNotSmi(value), fail);
return UncheckedCast<Smi>(value);
@@ -443,18 +455,52 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
Node* MatchesParameterMode(Node* value, ParameterMode mode);
-#define PARAMETER_BINOP(OpName, IntPtrOpName, SmiOpName) \
- Node* OpName(Node* a, Node* b, ParameterMode mode) { \
- if (mode == SMI_PARAMETERS) { \
- return SmiOpName(CAST(a), CAST(b)); \
- } else { \
- DCHECK_EQ(INTPTR_PARAMETERS, mode); \
- return IntPtrOpName(a, b); \
- } \
- }
+#define PARAMETER_BINOP(OpName, IntPtrOpName, SmiOpName) \
+ /* TODO(v8:9708): remove once all uses are ported. */ \
+ Node* OpName(Node* a, Node* b, ParameterMode mode) { \
+ if (mode == SMI_PARAMETERS) { \
+ return SmiOpName(CAST(a), CAST(b)); \
+ } else { \
+ DCHECK_EQ(INTPTR_PARAMETERS, mode); \
+ return IntPtrOpName(UncheckedCast<IntPtrT>(a), \
+ UncheckedCast<IntPtrT>(b)); \
+ } \
+ } \
+ TNode<Smi> OpName(TNode<Smi> a, TNode<Smi> b) { return SmiOpName(a, b); } \
+ TNode<IntPtrT> OpName(TNode<IntPtrT> a, TNode<IntPtrT> b) { \
+ return IntPtrOpName(a, b); \
+ } \
+ TNode<RawPtrT> OpName(TNode<RawPtrT> a, TNode<RawPtrT> b) { \
+ return ReinterpretCast<RawPtrT>(IntPtrOpName( \
+ ReinterpretCast<IntPtrT>(a), ReinterpretCast<IntPtrT>(b))); \
+ }
+ // TODO(v8:9708): Define BInt operations once all uses are ported.
PARAMETER_BINOP(IntPtrOrSmiMin, IntPtrMin, SmiMin)
PARAMETER_BINOP(IntPtrOrSmiAdd, IntPtrAdd, SmiAdd)
PARAMETER_BINOP(IntPtrOrSmiSub, IntPtrSub, SmiSub)
+#undef PARAMETER_BINOP
+
+#define PARAMETER_BINOP(OpName, IntPtrOpName, SmiOpName) \
+ /* TODO(v8:9708): remove once all uses are ported. */ \
+ TNode<BoolT> OpName(Node* a, Node* b, ParameterMode mode) { \
+ if (mode == SMI_PARAMETERS) { \
+ return SmiOpName(CAST(a), CAST(b)); \
+ } else { \
+ DCHECK_EQ(INTPTR_PARAMETERS, mode); \
+ return IntPtrOpName(UncheckedCast<IntPtrT>(a), \
+ UncheckedCast<IntPtrT>(b)); \
+ } \
+ } \
+ TNode<BoolT> OpName(TNode<Smi> a, TNode<Smi> b) { return SmiOpName(a, b); } \
+ TNode<BoolT> OpName(TNode<IntPtrT> a, TNode<IntPtrT> b) { \
+ return IntPtrOpName(a, b); \
+ } \
+ TNode<BoolT> OpName(TNode<RawPtrT> a, TNode<RawPtrT> b) { \
+ return IntPtrOpName(a, b); \
+ }
+ // TODO(v8:9708): Define BInt operations once all uses are ported.
+ PARAMETER_BINOP(IntPtrOrSmiEqual, WordEqual, SmiEqual)
+ PARAMETER_BINOP(IntPtrOrSmiNotEqual, WordNotEqual, SmiNotEqual)
PARAMETER_BINOP(IntPtrOrSmiLessThan, IntPtrLessThan, SmiLessThan)
PARAMETER_BINOP(IntPtrOrSmiLessThanOrEqual, IntPtrLessThanOrEqual,
SmiLessThanOrEqual)
@@ -473,31 +519,30 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
intptr_t ConstexprWordNot(intptr_t a) { return ~a; }
uintptr_t ConstexprWordNot(uintptr_t a) { return ~a; }
- TNode<BoolT> TaggedEqual(TNode<UnionT<Object, MaybeObject>> a,
- TNode<UnionT<Object, MaybeObject>> b) {
- // In pointer-compressed architectures, the instruction selector will narrow
- // this comparison to a 32-bit one.
+ TNode<BoolT> TaggedEqual(TNode<AnyTaggedT> a, TNode<AnyTaggedT> b) {
+#ifdef V8_COMPRESS_POINTERS
+ return Word32Equal(ChangeTaggedToCompressed(a),
+ ChangeTaggedToCompressed(b));
+#else
return WordEqual(ReinterpretCast<WordT>(a), ReinterpretCast<WordT>(b));
+#endif
}
- TNode<BoolT> TaggedNotEqual(TNode<UnionT<Object, MaybeObject>> a,
- TNode<UnionT<Object, MaybeObject>> b) {
- // In pointer-compressed architectures, the instruction selector will narrow
- // this comparison to a 32-bit one.
- return WordNotEqual(ReinterpretCast<WordT>(a), ReinterpretCast<WordT>(b));
+ TNode<BoolT> TaggedNotEqual(TNode<AnyTaggedT> a, TNode<AnyTaggedT> b) {
+ return Word32BinaryNot(TaggedEqual(a, b));
}
TNode<Object> NoContextConstant();
#define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \
- compiler::TNode<std::remove_pointer<std::remove_reference<decltype( \
+ TNode<std::remove_pointer<std::remove_reference<decltype( \
std::declval<ReadOnlyRoots>().rootAccessorName())>::type>::type> \
name##Constant();
HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR)
#undef HEAP_CONSTANT_ACCESSOR
#define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \
- compiler::TNode<std::remove_pointer<std::remove_reference<decltype( \
+ TNode<std::remove_pointer<std::remove_reference<decltype( \
std::declval<Heap>().rootAccessorName())>::type>::type> \
name##Constant();
HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR)
@@ -511,11 +556,16 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BInt> BIntConstant(int value);
+ template <typename TIndex>
+ TNode<TIndex> IntPtrOrSmiConstant(int value);
+ // TODO(v8:9708): remove once all uses are ported.
Node* IntPtrOrSmiConstant(int value, ParameterMode mode);
- TNode<BoolT> IntPtrOrSmiEqual(Node* left, Node* right, ParameterMode mode);
- TNode<BoolT> IntPtrOrSmiNotEqual(Node* left, Node* right, ParameterMode mode);
+ bool IsIntPtrOrSmiConstantZero(TNode<Smi> test);
+ bool IsIntPtrOrSmiConstantZero(TNode<IntPtrT> test);
+ // TODO(v8:9708): remove once all uses are ported.
bool IsIntPtrOrSmiConstantZero(Node* test, ParameterMode mode);
+
bool TryGetIntPtrOrSmiConstantValue(Node* maybe_constant, int* value,
ParameterMode mode);
@@ -557,25 +607,27 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Float64T> SmiToFloat64(SloppyTNode<Smi> value);
TNode<Smi> SmiFromIntPtr(SloppyTNode<IntPtrT> value) { return SmiTag(value); }
TNode<Smi> SmiFromInt32(SloppyTNode<Int32T> value);
+ TNode<Smi> SmiFromUint32(TNode<Uint32T> value);
TNode<IntPtrT> SmiToIntPtr(SloppyTNode<Smi> value) { return SmiUntag(value); }
TNode<Int32T> SmiToInt32(SloppyTNode<Smi> value);
// Smi operations.
-#define SMI_ARITHMETIC_BINOP(SmiOpName, IntPtrOpName, Int32OpName) \
- TNode<Smi> SmiOpName(TNode<Smi> a, TNode<Smi> b) { \
- if (SmiValuesAre32Bits()) { \
- return BitcastWordToTaggedSigned(IntPtrOpName( \
- BitcastTaggedSignedToWord(a), BitcastTaggedSignedToWord(b))); \
- } else { \
- DCHECK(SmiValuesAre31Bits()); \
- if (kSystemPointerSize == kInt64Size) { \
- CSA_ASSERT(this, IsValidSmi(a)); \
- CSA_ASSERT(this, IsValidSmi(b)); \
- } \
- return BitcastWordToTaggedSigned(ChangeInt32ToIntPtr( \
- Int32OpName(TruncateIntPtrToInt32(BitcastTaggedSignedToWord(a)), \
- TruncateIntPtrToInt32(BitcastTaggedSignedToWord(b))))); \
- } \
+#define SMI_ARITHMETIC_BINOP(SmiOpName, IntPtrOpName, Int32OpName) \
+ TNode<Smi> SmiOpName(TNode<Smi> a, TNode<Smi> b) { \
+ if (SmiValuesAre32Bits()) { \
+ return BitcastWordToTaggedSigned( \
+ IntPtrOpName(BitcastTaggedToWordForTagAndSmiBits(a), \
+ BitcastTaggedToWordForTagAndSmiBits(b))); \
+ } else { \
+ DCHECK(SmiValuesAre31Bits()); \
+ if (kSystemPointerSize == kInt64Size) { \
+ CSA_ASSERT(this, IsValidSmi(a)); \
+ CSA_ASSERT(this, IsValidSmi(b)); \
+ } \
+ return BitcastWordToTaggedSigned(ChangeInt32ToIntPtr(Int32OpName( \
+ TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(a)), \
+ TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(b))))); \
+ } \
}
SMI_ARITHMETIC_BINOP(SmiAdd, IntPtrAdd, Int32Add)
SMI_ARITHMETIC_BINOP(SmiSub, IntPtrSub, Int32Sub)
@@ -595,38 +647,40 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Smi> SmiShl(TNode<Smi> a, int shift) {
return BitcastWordToTaggedSigned(
- WordShl(BitcastTaggedSignedToWord(a), shift));
+ WordShl(BitcastTaggedToWordForTagAndSmiBits(a), shift));
}
TNode<Smi> SmiShr(TNode<Smi> a, int shift) {
if (kTaggedSize == kInt64Size) {
return BitcastWordToTaggedSigned(
- WordAnd(WordShr(BitcastTaggedSignedToWord(a), shift),
- BitcastTaggedSignedToWord(SmiConstant(-1))));
+ WordAnd(WordShr(BitcastTaggedToWordForTagAndSmiBits(a), shift),
+ BitcastTaggedToWordForTagAndSmiBits(SmiConstant(-1))));
} else {
// For pointer compressed Smis, we want to make sure that we truncate to
// int32 before shifting, to avoid the values of the top 32-bits from
// leaking into the sign bit of the smi.
return BitcastWordToTaggedSigned(WordAnd(
ChangeInt32ToIntPtr(Word32Shr(
- TruncateWordToInt32(BitcastTaggedSignedToWord(a)), shift)),
- BitcastTaggedSignedToWord(SmiConstant(-1))));
+ TruncateWordToInt32(BitcastTaggedToWordForTagAndSmiBits(a)),
+ shift)),
+ BitcastTaggedToWordForTagAndSmiBits(SmiConstant(-1))));
}
}
TNode<Smi> SmiSar(TNode<Smi> a, int shift) {
if (kTaggedSize == kInt64Size) {
return BitcastWordToTaggedSigned(
- WordAnd(WordSar(BitcastTaggedSignedToWord(a), shift),
- BitcastTaggedSignedToWord(SmiConstant(-1))));
+ WordAnd(WordSar(BitcastTaggedToWordForTagAndSmiBits(a), shift),
+ BitcastTaggedToWordForTagAndSmiBits(SmiConstant(-1))));
} else {
// For pointer compressed Smis, we want to make sure that we truncate to
// int32 before shifting, to avoid the values of the top 32-bits from
// changing the sign bit of the smi.
return BitcastWordToTaggedSigned(WordAnd(
ChangeInt32ToIntPtr(Word32Sar(
- TruncateWordToInt32(BitcastTaggedSignedToWord(a)), shift)),
- BitcastTaggedSignedToWord(SmiConstant(-1))));
+ TruncateWordToInt32(BitcastTaggedToWordForTagAndSmiBits(a)),
+ shift)),
+ BitcastTaggedToWordForTagAndSmiBits(SmiConstant(-1))));
}
}
@@ -648,21 +702,22 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
}
}
-#define SMI_COMPARISON_OP(SmiOpName, IntPtrOpName, Int32OpName) \
- TNode<BoolT> SmiOpName(TNode<Smi> a, TNode<Smi> b) { \
- if (kTaggedSize == kInt64Size) { \
- return IntPtrOpName(BitcastTaggedSignedToWord(a), \
- BitcastTaggedSignedToWord(b)); \
- } else { \
- DCHECK_EQ(kTaggedSize, kInt32Size); \
- DCHECK(SmiValuesAre31Bits()); \
- if (kSystemPointerSize == kInt64Size) { \
- CSA_ASSERT(this, IsValidSmi(a)); \
- CSA_ASSERT(this, IsValidSmi(b)); \
- } \
- return Int32OpName(TruncateIntPtrToInt32(BitcastTaggedSignedToWord(a)), \
- TruncateIntPtrToInt32(BitcastTaggedSignedToWord(b))); \
- } \
+#define SMI_COMPARISON_OP(SmiOpName, IntPtrOpName, Int32OpName) \
+ TNode<BoolT> SmiOpName(TNode<Smi> a, TNode<Smi> b) { \
+ if (kTaggedSize == kInt64Size) { \
+ return IntPtrOpName(BitcastTaggedToWordForTagAndSmiBits(a), \
+ BitcastTaggedToWordForTagAndSmiBits(b)); \
+ } else { \
+ DCHECK_EQ(kTaggedSize, kInt32Size); \
+ DCHECK(SmiValuesAre31Bits()); \
+ if (kSystemPointerSize == kInt64Size) { \
+ CSA_ASSERT(this, IsValidSmi(a)); \
+ CSA_ASSERT(this, IsValidSmi(b)); \
+ } \
+ return Int32OpName( \
+ TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(a)), \
+ TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(b))); \
+ } \
}
SMI_COMPARISON_OP(SmiEqual, WordEqual, Word32Equal)
SMI_COMPARISON_OP(SmiNotEqual, WordNotEqual, Word32NotEqual)
@@ -856,9 +911,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Int32T> TruncateIntPtrToInt32(SloppyTNode<IntPtrT> value);
// Check a value for smi-ness
- TNode<BoolT> TaggedIsSmi(SloppyTNode<Object> a);
TNode<BoolT> TaggedIsSmi(TNode<MaybeObject> a);
- TNode<BoolT> TaggedIsNotSmi(SloppyTNode<Object> a);
+ TNode<BoolT> TaggedIsSmi(SloppyTNode<Object> a) {
+ return TaggedIsSmi(UncheckedCast<MaybeObject>(a));
+ }
+ TNode<BoolT> TaggedIsNotSmi(TNode<MaybeObject> a);
+ TNode<BoolT> TaggedIsNotSmi(SloppyTNode<Object> a) {
+ return TaggedIsNotSmi(UncheckedCast<MaybeObject>(a));
+ }
// Check that the value is a non-negative smi.
TNode<BoolT> TaggedIsPositiveSmi(SloppyTNode<Object> a);
// Check that a word has a word-aligned address.
@@ -918,9 +978,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Works only with V8_ENABLE_FORCE_SLOW_PATH compile time flag. Nop otherwise.
void GotoIfForceSlowPath(Label* if_true);
- // Branches to {if_true} when Debug::ExecutionMode is DebugInfo::kSideEffect.
- void GotoIfDebugExecutionModeChecksSideEffects(Label* if_true);
-
// Load value from current parent frame by given offset in bytes.
Node* LoadFromParentFrame(int offset,
MachineType type = MachineType::AnyTagged());
@@ -1060,9 +1117,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Word32T> IsStringWrapperElementsKind(TNode<Map> map);
void GotoIfMapHasSlowProperties(TNode<Map> map, Label* if_slow);
- // Load the properties backing store of a JSObject.
- TNode<HeapObject> LoadSlowProperties(SloppyTNode<JSObject> object);
- TNode<HeapObject> LoadFastProperties(SloppyTNode<JSObject> object);
+ // Load the properties backing store of a JSReceiver.
+ TNode<HeapObject> LoadSlowProperties(SloppyTNode<JSReceiver> object);
+ TNode<HeapObject> LoadFastProperties(SloppyTNode<JSReceiver> object);
// Load the elements backing store of a JSObject.
TNode<FixedArrayBase> LoadElements(SloppyTNode<JSObject> object) {
return LoadJSObjectElements(object);
@@ -1148,10 +1205,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<IntPtrT> LoadStringLengthAsWord(SloppyTNode<String> string);
// Load length field of a String object as uint32_t value.
TNode<Uint32T> LoadStringLengthAsWord32(SloppyTNode<String> string);
- // Loads a pointer to the sequential String char array.
- Node* PointerToSeqStringData(Node* seq_string);
// Load value field of a JSPrimitiveWrapper object.
- Node* LoadJSPrimitiveWrapperValue(Node* object);
+ TNode<Object> LoadJSPrimitiveWrapperValue(TNode<JSPrimitiveWrapper> object);
// Figures out whether the value of maybe_object is:
// - a SMI (jump to "if_smi", "extracted" will be the SMI value)
@@ -1175,7 +1230,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsWeakOrCleared(TNode<MaybeObject> value);
TNode<BoolT> IsCleared(TNode<MaybeObject> value);
- TNode<BoolT> IsNotCleared(TNode<MaybeObject> value);
+ TNode<BoolT> IsNotCleared(TNode<MaybeObject> value) {
+ return Word32BinaryNot(IsCleared(value));
+ }
// Removes the weak bit + asserts it was set.
TNode<HeapObject> GetHeapObjectAssumeWeak(TNode<MaybeObject> value);
@@ -1183,12 +1240,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<HeapObject> GetHeapObjectAssumeWeak(TNode<MaybeObject> value,
Label* if_cleared);
- TNode<BoolT> IsWeakReferenceTo(TNode<MaybeObject> object,
- TNode<Object> value);
- TNode<BoolT> IsNotWeakReferenceTo(TNode<MaybeObject> object,
- TNode<Object> value);
- TNode<BoolT> IsStrongReferenceTo(TNode<MaybeObject> object,
- TNode<Object> value);
+ // Checks if |maybe_object| is a weak reference to given |heap_object|.
+ // Works for both any tagged |maybe_object| values.
+ TNode<BoolT> IsWeakReferenceTo(TNode<MaybeObject> maybe_object,
+ TNode<HeapObject> heap_object);
+ // Returns true if the |object| is a HeapObject and |maybe_object| is a weak
+ // reference to |object|.
+ // The |maybe_object| must not be a Smi.
+ TNode<BoolT> IsWeakReferenceToObject(TNode<MaybeObject> maybe_object,
+ TNode<Object> object);
TNode<MaybeObject> MakeWeak(TNode<HeapObject> value);
@@ -1341,9 +1401,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Int32T> elements_kind, Label* if_accessor, Label* if_hole);
// Load a feedback slot from a FeedbackVector.
+ template <typename TIndex>
TNode<MaybeObject> LoadFeedbackVectorSlot(
- Node* object, Node* index, int additional_offset = 0,
- ParameterMode parameter_mode = INTPTR_PARAMETERS);
+ TNode<FeedbackVector> feedback_vector, TNode<TIndex> slot,
+ int additional_offset = 0);
TNode<IntPtrT> LoadFeedbackVectorLength(TNode<FeedbackVector>);
TNode<Float64T> LoadDoubleWithHoleCheck(TNode<FixedDoubleArray> array,
@@ -1383,13 +1444,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BigInt> BigIntFromInt32Pair(TNode<IntPtrT> low, TNode<IntPtrT> high);
TNode<BigInt> BigIntFromUint32Pair(TNode<UintPtrT> low, TNode<UintPtrT> high);
- void StoreJSTypedArrayElementFromTagged(TNode<Context> context,
- TNode<JSTypedArray> typed_array,
- TNode<Smi> index_node,
- TNode<Object> value,
- ElementsKind elements_kind);
-
// Context manipulation
+ TNode<BoolT> LoadContextHasExtensionField(SloppyTNode<Context> context);
TNode<Object> LoadContextElement(SloppyTNode<Context> context,
int slot_index);
TNode<Object> LoadContextElement(SloppyTNode<Context> context,
@@ -1608,10 +1664,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
}
void StoreFeedbackVectorSlot(
- Node* object, Node* index, Node* value,
+ TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot,
+ TNode<AnyTaggedT> value,
WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
- int additional_offset = 0,
- ParameterMode parameter_mode = INTPTR_PARAMETERS);
+ int additional_offset = 0);
void EnsureArrayLengthWritable(TNode<Map> map, Label* bailout);
@@ -1633,8 +1689,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
void BuildAppendJSArray(ElementsKind kind, Node* array, Node* value,
Label* bailout);
- void StoreFieldsNoWriteBarrier(Node* start_address, Node* end_address,
- Node* value);
+ void StoreFieldsNoWriteBarrier(TNode<IntPtrT> start_address,
+ TNode<IntPtrT> end_address,
+ TNode<Object> value);
Node* AllocateCellWithValue(Node* value,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
@@ -1642,7 +1699,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return AllocateCellWithValue(SmiConstant(value), SKIP_WRITE_BARRIER);
}
- Node* LoadCellValue(Node* cell);
+ TNode<Object> LoadCellValue(Node* cell);
void StoreCellValue(Node* cell, Node* value,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
@@ -1698,11 +1755,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<String> parent,
TNode<Smi> offset);
- // Allocate an appropriate one- or two-byte ConsString with the first and
- // second parts specified by |left| and |right|.
- TNode<String> AllocateConsString(TNode<Uint32T> length, TNode<String> left,
- TNode<String> right);
-
TNode<NameDictionary> AllocateNameDictionary(int at_least_space_for);
TNode<NameDictionary> AllocateNameDictionary(
TNode<IntPtrT> at_least_space_for, AllocationFlags = kNone);
@@ -1714,26 +1766,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
template <typename CollectionType>
Node* AllocateOrderedHashTable();
- // Builds code that finds OrderedHashTable entry for a key with hash code
- // {hash} with using the comparison code generated by {key_compare}. The code
- // jumps to {entry_found} if the key is found, or to {not_found} if the key
- // was not found. In the {entry_found} branch, the variable
- // entry_start_position will be bound to the index of the entry (relative to
- // OrderedHashTable::kHashTableStartIndex).
- //
- // The {CollectionType} template parameter stands for the particular instance
- // of OrderedHashTable, it should be OrderedHashMap or OrderedHashSet.
- template <typename CollectionType>
- void FindOrderedHashTableEntry(
- Node* table, Node* hash,
- const std::function<void(TNode<Object>, Label*, Label*)>& key_compare,
- Variable* entry_start_position, Label* entry_found, Label* not_found);
-
template <typename CollectionType>
TNode<CollectionType> AllocateSmallOrderedHashTable(TNode<IntPtrT> capacity);
Node* AllocateStruct(Node* map, AllocationFlags flags = kNone);
- void InitializeStructBody(Node* object, Node* map, Node* size,
+ void InitializeStructBody(TNode<HeapObject> object, TNode<IntPtrT> size,
int start_offset = Struct::kHeaderSize);
TNode<JSObject> AllocateJSObjectFromMap(
@@ -1742,14 +1779,17 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
SlackTrackingMode slack_tracking_mode = kNoSlackTracking);
void InitializeJSObjectFromMap(
- Node* object, Node* map, Node* instance_size, Node* properties = nullptr,
+ SloppyTNode<HeapObject> object, SloppyTNode<Map> map,
+ SloppyTNode<IntPtrT> instance_size, Node* properties = nullptr,
Node* elements = nullptr,
SlackTrackingMode slack_tracking_mode = kNoSlackTracking);
- void InitializeJSObjectBodyWithSlackTracking(Node* object, Node* map,
- Node* instance_size);
+ void InitializeJSObjectBodyWithSlackTracking(
+ SloppyTNode<HeapObject> object, SloppyTNode<Map> map,
+ SloppyTNode<IntPtrT> instance_size);
void InitializeJSObjectBodyNoSlackTracking(
- Node* object, Node* map, Node* instance_size,
+ SloppyTNode<HeapObject> object, SloppyTNode<Map> map,
+ SloppyTNode<IntPtrT> instance_size,
int start_offset = JSObject::kHeaderSize);
TNode<BoolT> IsValidFastJSArrayCapacity(Node* capacity,
@@ -1762,7 +1802,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
std::pair<TNode<JSArray>, TNode<FixedArrayBase>>
AllocateUninitializedJSArrayWithElements(
ElementsKind kind, TNode<Map> array_map, TNode<Smi> length,
- Node* allocation_site, Node* capacity,
+ TNode<AllocationSite> allocation_site, Node* capacity,
ParameterMode capacity_mode = INTPTR_PARAMETERS,
AllocationFlags allocation_flags = kNone,
int array_header_size = JSArray::kSize);
@@ -1771,20 +1811,20 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// The ParameterMode argument is only used for the capacity parameter.
TNode<JSArray> AllocateJSArray(
ElementsKind kind, TNode<Map> array_map, Node* capacity,
- TNode<Smi> length, Node* allocation_site = nullptr,
+ TNode<Smi> length, TNode<AllocationSite> allocation_site = {},
ParameterMode capacity_mode = INTPTR_PARAMETERS,
AllocationFlags allocation_flags = kNone);
TNode<JSArray> AllocateJSArray(ElementsKind kind, TNode<Map> array_map,
TNode<Smi> capacity, TNode<Smi> length) {
- return AllocateJSArray(kind, array_map, capacity, length, nullptr,
+ return AllocateJSArray(kind, array_map, capacity, length, {},
SMI_PARAMETERS);
}
TNode<JSArray> AllocateJSArray(ElementsKind kind, TNode<Map> array_map,
TNode<IntPtrT> capacity, TNode<Smi> length,
AllocationFlags allocation_flags = kNone) {
- return AllocateJSArray(kind, array_map, capacity, length, nullptr,
+ return AllocateJSArray(kind, array_map, capacity, length, {},
INTPTR_PARAMETERS, allocation_flags);
}
@@ -1792,7 +1832,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<JSArray> AllocateJSArray(TNode<Map> array_map,
TNode<FixedArrayBase> elements,
TNode<Smi> length,
- Node* allocation_site = nullptr,
+ TNode<AllocationSite> allocation_site = {},
int array_header_size = JSArray::kSize);
enum class HoleConversionMode { kDontConvert, kConvertToUndefined };
@@ -1806,15 +1846,17 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// If |convert_holes| is set kDontConvert, holes are also copied to the
// resulting array, who will have the same elements kind as |array|. The
// function generates significantly less code in this case.
- Node* CloneFastJSArray(
- Node* context, Node* array, ParameterMode mode = INTPTR_PARAMETERS,
- Node* allocation_site = nullptr,
+ TNode<JSArray> CloneFastJSArray(
+ TNode<Context> context, TNode<JSArray> array,
+ ParameterMode mode = INTPTR_PARAMETERS,
+ TNode<AllocationSite> allocation_site = {},
HoleConversionMode convert_holes = HoleConversionMode::kDontConvert);
- Node* ExtractFastJSArray(Node* context, Node* array, Node* begin, Node* count,
+ Node* ExtractFastJSArray(TNode<Context> context, TNode<JSArray> array,
+ Node* begin, Node* count,
ParameterMode mode = INTPTR_PARAMETERS,
Node* capacity = nullptr,
- Node* allocation_site = nullptr);
+ TNode<AllocationSite> allocation_site = {});
TNode<FixedArrayBase> AllocateFixedArray(
ElementsKind kind, Node* capacity, ParameterMode mode = INTPTR_PARAMETERS,
@@ -1828,6 +1870,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
fixed_array_map);
}
+ TNode<NativeContext> GetCreationContext(TNode<JSReceiver> receiver,
+ Label* if_bailout);
+ TNode<Object> GetConstructor(TNode<Map> map);
+
TNode<Map> GetStructMap(InstanceType instance_type);
TNode<FixedArray> AllocateUninitializedFixedArray(intptr_t capacity) {
@@ -1879,10 +1925,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Object> object,
IterationKind mode);
+ // TODO(v8:9722): Return type should be JSIteratorResult
TNode<JSObject> AllocateJSIteratorResult(SloppyTNode<Context> context,
SloppyTNode<Object> value,
SloppyTNode<Oddball> done);
- Node* AllocateJSIteratorResultForEntry(Node* context, Node* key, Node* value);
+
+ // TODO(v8:9722): Return type should be JSIteratorResult
+ TNode<JSObject> AllocateJSIteratorResultForEntry(TNode<Context> context,
+ TNode<Object> key,
+ SloppyTNode<Object> value);
TNode<JSReceiver> ArraySpeciesCreate(TNode<Context> context,
TNode<Object> originalArray,
@@ -1904,6 +1955,25 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
enum class DestroySource { kNo, kYes };
+ // Collect the callable |maybe_target| feedback for either a CALL_IC or
+ // an INSTANCEOF_IC in the |feedback_vector| at |slot_id|.
+ void CollectCallableFeedback(TNode<Object> maybe_target,
+ TNode<Context> context,
+ TNode<FeedbackVector> feedback_vector,
+ TNode<UintPtrT> slot_id);
+
+ // Collect CALL_IC feedback for |maybe_target| function in the
+ // |feedback_vector| at |slot_id|, and the call counts in
+ // the |feedback_vector| at |slot_id+1|.
+ void CollectCallFeedback(TNode<Object> maybe_target, TNode<Context> context,
+ TNode<HeapObject> maybe_feedback_vector,
+ TNode<UintPtrT> slot_id);
+
+ // Increment the call count for a CALL_IC or construct call.
+ // The call count is located at feedback_vector[slot_id + 1].
+ void IncrementCallCount(TNode<FeedbackVector> feedback_vector,
+ TNode<UintPtrT> slot_id);
+
// Specify DestroySource::kYes if {from_array} is being supplanted by
// {to_array}. This offers a slight performance benefit by simply copying the
// array word by word. The source may be destroyed at the end of this macro.
@@ -2152,27 +2222,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// kAllFixedArrays, the generated code is more compact and efficient if the
// caller can specify whether only FixedArrays or FixedDoubleArrays will be
// passed as the |source| parameter.
- Node* CloneFixedArray(Node* source,
- ExtractFixedArrayFlags flags =
- ExtractFixedArrayFlag::kAllFixedArraysDontCopyCOW) {
+ TNode<FixedArrayBase> CloneFixedArray(
+ TNode<FixedArrayBase> source,
+ ExtractFixedArrayFlags flags =
+ ExtractFixedArrayFlag::kAllFixedArraysDontCopyCOW) {
ParameterMode mode = OptimalParameterMode();
return ExtractFixedArray(source, IntPtrOrSmiConstant(0, mode), nullptr,
nullptr, flags, mode);
}
- // Copies |character_count| elements from |from_string| to |to_string|
- // starting at the |from_index|'th character. |from_string| and |to_string|
- // can either be one-byte strings or two-byte strings, although if
- // |from_string| is two-byte, then |to_string| must be two-byte.
- // |from_index|, |to_index| and |character_count| must be intptr_ts s.t. 0 <=
- // |from_index| <= |from_index| + |character_count| <= from_string.length and
- // 0 <= |to_index| <= |to_index| + |character_count| <= to_string.length.
- void CopyStringCharacters(Node* from_string, Node* to_string,
- TNode<IntPtrT> from_index, TNode<IntPtrT> to_index,
- TNode<IntPtrT> character_count,
- String::Encoding from_encoding,
- String::Encoding to_encoding);
-
// Loads an element from |array| of |from_kind| elements by given |offset|
// (NOTE: not index!), does a hole check if |if_hole| is provided and
// converts the value so that it becomes ready for storing to array of
@@ -2194,21 +2252,26 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Tries to grow the |elements| array of given |object| to store the |key|
// or bails out if the growing gap is too big. Returns new elements.
- Node* TryGrowElementsCapacity(Node* object, Node* elements, ElementsKind kind,
- Node* key, Label* bailout);
+ TNode<FixedArrayBase> TryGrowElementsCapacity(Node* object, Node* elements,
+ ElementsKind kind, Node* key,
+ Label* bailout);
// Tries to grow the |capacity|-length |elements| array of given |object|
// to store the |key| or bails out if the growing gap is too big. Returns
// new elements.
- Node* TryGrowElementsCapacity(Node* object, Node* elements, ElementsKind kind,
- Node* key, Node* capacity, ParameterMode mode,
- Label* bailout);
+ TNode<FixedArrayBase> TryGrowElementsCapacity(Node* object, Node* elements,
+ ElementsKind kind, Node* key,
+ Node* capacity,
+ ParameterMode mode,
+ Label* bailout);
// Grows elements capacity of given object. Returns new elements.
- Node* GrowElementsCapacity(Node* object, Node* elements,
- ElementsKind from_kind, ElementsKind to_kind,
- Node* capacity, Node* new_capacity,
- ParameterMode mode, Label* bailout);
+ TNode<FixedArrayBase> GrowElementsCapacity(Node* object, Node* elements,
+ ElementsKind from_kind,
+ ElementsKind to_kind,
+ Node* capacity, Node* new_capacity,
+ ParameterMode mode,
+ Label* bailout);
// Given a need to grow by |growth|, allocate an appropriate new capacity
// if necessary, and return a new elements FixedArray object. Label |bailout|
@@ -2223,25 +2286,30 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
Node* base_allocation_size,
Node* allocation_site);
- Node* TryTaggedToFloat64(Node* value, Label* if_valueisnotnumber);
- Node* TruncateTaggedToFloat64(Node* context, Node* value);
- Node* TruncateTaggedToWord32(Node* context, Node* value);
- void TaggedToWord32OrBigInt(Node* context, Node* value, Label* if_number,
- Variable* var_word32, Label* if_bigint,
- Variable* var_bigint);
- void TaggedToWord32OrBigIntWithFeedback(
- Node* context, Node* value, Label* if_number, Variable* var_word32,
- Label* if_bigint, Variable* var_bigint, Variable* var_feedback);
+ TNode<Float64T> TryTaggedToFloat64(TNode<Object> value,
+ Label* if_valueisnotnumber);
+ TNode<Float64T> TruncateTaggedToFloat64(SloppyTNode<Context> context,
+ SloppyTNode<Object> value);
+ TNode<Word32T> TruncateTaggedToWord32(SloppyTNode<Context> context,
+ SloppyTNode<Object> value);
+ void TaggedToWord32OrBigInt(TNode<Context> context, TNode<Object> value,
+ Label* if_number, TVariable<Word32T>* var_word32,
+ Label* if_bigint,
+ TVariable<Object>* var_maybe_bigint);
+ void TaggedToWord32OrBigIntWithFeedback(TNode<Context> context,
+ TNode<Object> value, Label* if_number,
+ TVariable<Word32T>* var_word32,
+ Label* if_bigint,
+ TVariable<Object>* var_maybe_bigint,
+ TVariable<Smi>* var_feedback);
// Truncate the floating point value of a HeapNumber to an Int32.
TNode<Int32T> TruncateHeapNumberValueToWord32(TNode<HeapNumber> object);
// Conversions.
- void TryHeapNumberToSmi(TNode<HeapNumber> number,
- TVariable<Smi>& output, // NOLINT(runtime/references)
+ void TryHeapNumberToSmi(TNode<HeapNumber> number, TVariable<Smi>* output,
Label* if_smi);
- void TryFloat64ToSmi(TNode<Float64T> number,
- TVariable<Smi>& output, // NOLINT(runtime/references)
+ void TryFloat64ToSmi(TNode<Float64T> number, TVariable<Smi>* output,
Label* if_smi);
TNode<Number> ChangeFloat64ToTagged(SloppyTNode<Float64T> value);
TNode<Number> ChangeInt32ToTagged(SloppyTNode<Int32T> value);
@@ -2377,7 +2445,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsAllocationSiteInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsJSFunctionMap(SloppyTNode<Map> map);
TNode<BoolT> IsJSFunction(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsJSGeneratorObject(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsJSGeneratorObject(TNode<HeapObject> object);
TNode<BoolT> IsJSGlobalProxyInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsJSGlobalProxyMap(SloppyTNode<Map> map);
TNode<BoolT> IsJSGlobalProxy(SloppyTNode<HeapObject> object);
@@ -2388,6 +2456,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsJSPromise(SloppyTNode<HeapObject> object);
TNode<BoolT> IsJSProxy(SloppyTNode<HeapObject> object);
TNode<BoolT> IsJSStringIterator(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsJSRegExpStringIterator(SloppyTNode<HeapObject> object);
TNode<BoolT> IsJSReceiverInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsJSReceiverMap(SloppyTNode<Map> map);
TNode<BoolT> IsJSReceiver(SloppyTNode<HeapObject> object);
@@ -2395,6 +2464,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsJSTypedArrayInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsJSTypedArrayMap(SloppyTNode<Map> map);
TNode<BoolT> IsJSTypedArray(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsJSGeneratorMap(TNode<Map> map);
TNode<BoolT> IsJSPrimitiveWrapperInstanceType(
SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsJSPrimitiveWrapperMap(SloppyTNode<Map> map);
@@ -2537,47 +2607,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Return the single character string with only {code}.
TNode<String> StringFromSingleCharCode(TNode<Int32T> code);
- // Return a new string object which holds a substring containing the range
- // [from,to[ of string.
- TNode<String> SubString(TNode<String> string, TNode<IntPtrT> from,
- TNode<IntPtrT> to);
-
- // Return a new string object produced by concatenating |first| with |second|.
- TNode<String> StringAdd(Node* context, TNode<String> first,
- TNode<String> second);
-
- // Check if |string| is an indirect (thin or flat cons) string type that can
- // be dereferenced by DerefIndirectString.
- void BranchIfCanDerefIndirectString(TNode<String> string,
- TNode<Int32T> instance_type,
- Label* can_deref, Label* cannot_deref);
- // Unpack an indirect (thin or flat cons) string type.
- void DerefIndirectString(TVariable<String>* var_string,
- TNode<Int32T> instance_type);
- // Check if |var_string| has an indirect (thin or flat cons) string type,
- // and unpack it if so.
- void MaybeDerefIndirectString(TVariable<String>* var_string,
- TNode<Int32T> instance_type, Label* did_deref,
- Label* cannot_deref);
- // Check if |var_left| or |var_right| has an indirect (thin or flat cons)
- // string type, and unpack it/them if so. Fall through if nothing was done.
- void MaybeDerefIndirectStrings(TVariable<String>* var_left,
- TNode<Int32T> left_instance_type,
- TVariable<String>* var_right,
- TNode<Int32T> right_instance_type,
- Label* did_something);
- TNode<String> DerefIndirectString(TNode<String> string,
- TNode<Int32T> instance_type,
- Label* cannot_deref);
-
- TNode<String> StringFromSingleUTF16EncodedCodePoint(TNode<Int32T> codepoint);
-
// Type conversion helpers.
enum class BigIntHandling { kConvertToNumber, kThrow };
// Convert a String to a Number.
TNode<Number> StringToNumber(TNode<String> input);
// Convert a Number to a String.
TNode<String> NumberToString(TNode<Number> input);
+ TNode<String> NumberToString(TNode<Number> input, Label* bailout);
+
// Convert a Non-Number object to a Number.
TNode<Number> NonNumberToNumber(
SloppyTNode<Context> context, SloppyTNode<HeapObject> input,
@@ -2715,6 +2752,16 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return Word32Equal(Word32And(word32, const_mask), const_mask);
}
+ // Returns true if the bit field |BitField| in |word32| is equal to a given.
+ // constant |value|. Avoids a shift compared to using DecodeWord32.
+ template <typename BitField>
+ TNode<BoolT> IsEqualInWord32(TNode<Word32T> word32,
+ typename BitField::FieldType value) {
+ TNode<Word32T> masked_word32 =
+ Word32And(word32, Int32Constant(BitField::kMask));
+ return Word32Equal(masked_word32, Int32Constant(BitField::encode(value)));
+ }
+
// Returns true if any of the |T|'s bits in given |word| are set.
template <typename T>
TNode<BoolT> IsSetWord(SloppyTNode<WordT> word) {
@@ -2730,9 +2777,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Smi-encoding of the mask is performed implicitly!
TNode<BoolT> IsSetSmi(SloppyTNode<Smi> smi, int untagged_mask) {
intptr_t mask_word = bit_cast<intptr_t>(Smi::FromInt(untagged_mask));
- return WordNotEqual(
- WordAnd(BitcastTaggedSignedToWord(smi), IntPtrConstant(mask_word)),
- IntPtrConstant(0));
+ return WordNotEqual(WordAnd(BitcastTaggedToWordForTagAndSmiBits(smi),
+ IntPtrConstant(mask_word)),
+ IntPtrConstant(0));
}
// Returns true if all of the |T|'s bits in given |word32| are clear.
@@ -2762,11 +2809,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
void IncrementCounter(StatsCounter* counter, int delta);
void DecrementCounter(StatsCounter* counter, int delta);
- void Increment(Variable* variable, int value = 1,
- ParameterMode mode = INTPTR_PARAMETERS);
- void Decrement(Variable* variable, int value = 1,
- ParameterMode mode = INTPTR_PARAMETERS) {
- Increment(variable, -value, mode);
+ template <typename TIndex>
+ void Increment(TVariable<TIndex>* variable, int value = 1);
+
+ template <typename TIndex>
+ void Decrement(TVariable<TIndex>* variable, int value = 1) {
+ Increment(variable, -value);
}
// Generates "if (false) goto label" code. Useful for marking a label as
@@ -2780,8 +2828,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Note: If |key| does not yet have a hash, |if_notinternalized| will be taken
// even if |key| is an array index. |if_keyisunique| will never
// be taken for array indices.
- void TryToName(Node* key, Label* if_keyisindex, Variable* var_index,
- Label* if_keyisunique, Variable* var_unique, Label* if_bailout,
+ void TryToName(SloppyTNode<Object> key, Label* if_keyisindex,
+ TVariable<IntPtrT>* var_index, Label* if_keyisunique,
+ TVariable<Name>* var_unique, Label* if_bailout,
Label* if_notinternalized = nullptr);
// Performs a hash computation and string table lookup for the given string,
@@ -2793,8 +2842,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// - |if_not_internalized| if the string is not in the string table (but
// does not add it).
// - |if_bailout| for unsupported cases (e.g. uncachable array index).
- void TryInternalizeString(Node* string, Label* if_index, Variable* var_index,
- Label* if_internalized, Variable* var_internalized,
+ void TryInternalizeString(SloppyTNode<String> string, Label* if_index,
+ TVariable<IntPtrT>* var_index,
+ Label* if_internalized,
+ TVariable<Name>* var_internalized,
Label* if_not_internalized, Label* if_bailout);
// Calculates array index for given dictionary entry and entry field.
@@ -2938,10 +2989,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Object> BasicLoadNumberDictionaryElement(
TNode<NumberDictionary> dictionary, TNode<IntPtrT> intptr_index,
Label* not_data, Label* if_hole);
- void BasicStoreNumberDictionaryElement(TNode<NumberDictionary> dictionary,
- TNode<IntPtrT> intptr_index,
- TNode<Object> value, Label* not_data,
- Label* if_hole, Label* read_only);
template <class Dictionary>
void FindInsertionEntry(TNode<Dictionary> dictionary, TNode<Name> key,
@@ -3053,7 +3100,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
//
// Note: this code does not check if the global dictionary points to deleted
// entry! This has to be done by the caller.
- void TryLookupProperty(SloppyTNode<JSObject> object, SloppyTNode<Map> map,
+ void TryLookupProperty(SloppyTNode<JSReceiver> object, SloppyTNode<Map> map,
SloppyTNode<Int32T> instance_type,
SloppyTNode<Name> unique_name, Label* if_found_fast,
Label* if_found_dict, Label* if_found_global,
@@ -3113,10 +3160,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Returns true if {object} has {prototype} somewhere in it's prototype
// chain, otherwise false is returned. Might cause arbitrary side effects
// due to [[GetPrototypeOf]] invocations.
- Node* HasInPrototypeChain(Node* context, Node* object,
- SloppyTNode<Object> prototype);
+ TNode<Oddball> HasInPrototypeChain(TNode<Context> context,
+ TNode<HeapObject> object,
+ TNode<Object> prototype);
// ES6 section 7.3.19 OrdinaryHasInstance (C, O)
- Node* OrdinaryHasInstance(Node* context, Node* callable, Node* object);
+ TNode<Oddball> OrdinaryHasInstance(TNode<Context> context,
+ TNode<Object> callable,
+ TNode<Object> object);
// Load type feedback vector from the stub caller's frame.
TNode<FeedbackVector> LoadFeedbackVectorForStub();
@@ -3137,12 +3187,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
SloppyTNode<JSFunction> closure);
// Update the type feedback vector.
- void UpdateFeedback(Node* feedback, Node* feedback_vector, Node* slot_id);
+ void UpdateFeedback(TNode<Smi> feedback,
+ TNode<HeapObject> maybe_feedback_vector,
+ TNode<UintPtrT> slot_id);
// Report that there was a feedback update, performing any tasks that should
// be done after a feedback update.
- void ReportFeedbackUpdate(SloppyTNode<FeedbackVector> feedback_vector,
- SloppyTNode<IntPtrT> slot_id, const char* reason);
+ void ReportFeedbackUpdate(TNode<FeedbackVector> feedback_vector,
+ SloppyTNode<UintPtrT> slot_id, const char* reason);
// Combine the new feedback with the existing_feedback. Do nothing if
// existing_feedback is nullptr.
@@ -3185,8 +3237,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Context> LoadScriptContext(TNode<Context> context,
TNode<IntPtrT> context_index);
- Node* Int32ToUint8Clamped(Node* int32_value);
- Node* Float64ToUint8Clamped(Node* float64_value);
+ TNode<Uint8T> Int32ToUint8Clamped(TNode<Int32T> int32_value);
+ TNode<Uint8T> Float64ToUint8Clamped(TNode<Float64T> float64_value);
Node* PrepareValueForWriteToTypedArray(TNode<Object> input,
ElementsKind elements_kind,
@@ -3229,13 +3281,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Store a weak in-place reference into the FeedbackVector.
TNode<MaybeObject> StoreWeakReferenceInFeedbackVector(
- SloppyTNode<FeedbackVector> feedback_vector, Node* slot,
- SloppyTNode<HeapObject> value, int additional_offset = 0,
- ParameterMode parameter_mode = INTPTR_PARAMETERS);
+ TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot,
+ TNode<HeapObject> value, int additional_offset = 0);
// Create a new AllocationSite and install it into a feedback vector.
TNode<AllocationSite> CreateAllocationSiteInFeedbackVector(
- SloppyTNode<FeedbackVector> feedback_vector, TNode<Smi> slot);
+ TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot);
// TODO(ishell, cbruni): Change to HasBoilerplate.
TNode<BoolT> NotHasBoilerplate(TNode<Object> maybe_literal_site);
@@ -3245,19 +3296,22 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
enum class IndexAdvanceMode { kPre, kPost };
- using FastLoopBody = std::function<void(Node* index)>;
+ template <typename TIndex>
+ using FastLoopBody = std::function<void(TNode<TIndex> index)>;
- Node* BuildFastLoop(const VariableList& var_list, Node* start_index,
- Node* end_index, const FastLoopBody& body, int increment,
- ParameterMode parameter_mode,
- IndexAdvanceMode advance_mode = IndexAdvanceMode::kPre);
+ template <typename TIndex>
+ TNode<TIndex> BuildFastLoop(
+ const VariableList& var_list, TNode<TIndex> start_index,
+ TNode<TIndex> end_index, const FastLoopBody<TIndex>& body, int increment,
+ IndexAdvanceMode advance_mode = IndexAdvanceMode::kPre);
- Node* BuildFastLoop(Node* start_index, Node* end_index,
- const FastLoopBody& body, int increment,
- ParameterMode parameter_mode,
- IndexAdvanceMode advance_mode = IndexAdvanceMode::kPre) {
+ template <typename TIndex>
+ TNode<TIndex> BuildFastLoop(
+ TNode<TIndex> start_index, TNode<TIndex> end_index,
+ const FastLoopBody<TIndex>& body, int increment,
+ IndexAdvanceMode advance_mode = IndexAdvanceMode::kPre) {
return BuildFastLoop(VariableList(0, zone()), start_index, end_index, body,
- increment, parameter_mode, advance_mode);
+ increment, advance_mode);
}
enum class ForEachDirection { kForward, kReverse };
@@ -3304,13 +3358,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
Label* doesnt_fit, int base_size,
ParameterMode mode);
- void InitializeFieldsWithRoot(Node* object, Node* start_offset,
- Node* end_offset, RootIndex root);
+ void InitializeFieldsWithRoot(TNode<HeapObject> object,
+ TNode<IntPtrT> start_offset,
+ TNode<IntPtrT> end_offset, RootIndex root);
- Node* RelationalComparison(Operation op, SloppyTNode<Object> left,
- SloppyTNode<Object> right,
- SloppyTNode<Context> context,
- Variable* var_type_feedback = nullptr);
+ TNode<Oddball> RelationalComparison(
+ Operation op, TNode<Object> left, TNode<Object> right,
+ TNode<Context> context, TVariable<Smi>* var_type_feedback = nullptr);
void BranchIfNumberRelationalComparison(Operation op,
SloppyTNode<Number> left,
@@ -3360,12 +3414,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
void GotoIfNumberGreaterThanOrEqual(Node* left, Node* right, Label* if_false);
- Node* Equal(SloppyTNode<Object> lhs, SloppyTNode<Object> rhs,
- SloppyTNode<Context> context,
- Variable* var_type_feedback = nullptr);
+ TNode<Oddball> Equal(SloppyTNode<Object> lhs, SloppyTNode<Object> rhs,
+ SloppyTNode<Context> context,
+ TVariable<Smi>* var_type_feedback = nullptr);
TNode<Oddball> StrictEqual(SloppyTNode<Object> lhs, SloppyTNode<Object> rhs,
- Variable* var_type_feedback = nullptr);
+ TVariable<Smi>* var_type_feedback = nullptr);
// ECMA#sec-samevalue
// Similar to StrictEqual except that NaNs are treated as equal and minus zero
@@ -3395,16 +3449,17 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
HasPropertyLookupMode::kHasProperty);
}
- Node* Typeof(Node* value);
+ TNode<String> Typeof(SloppyTNode<Object> value);
- TNode<Object> GetSuperConstructor(SloppyTNode<Context> context,
- SloppyTNode<JSFunction> active_function);
+ TNode<Object> GetSuperConstructor(TNode<Context> context,
+ TNode<JSFunction> active_function);
TNode<JSReceiver> SpeciesConstructor(
SloppyTNode<Context> context, SloppyTNode<Object> object,
SloppyTNode<JSReceiver> default_constructor);
- Node* InstanceOf(Node* object, Node* callable, Node* context);
+ TNode<Oddball> InstanceOf(TNode<Object> object, TNode<Object> callable,
+ TNode<Context> context);
// Debug helpers
Node* IsDebugActive();
@@ -3431,8 +3486,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// JSTypedArray helpers
TNode<UintPtrT> LoadJSTypedArrayLength(TNode<JSTypedArray> typed_array);
- TNode<RawPtrT> LoadJSTypedArrayBackingStore(TNode<JSTypedArray> typed_array);
+ TNode<RawPtrT> LoadJSTypedArrayDataPtr(TNode<JSTypedArray> typed_array);
+ template <typename TIndex>
+ TNode<IntPtrT> ElementOffsetFromIndex(TNode<TIndex> index, ElementsKind kind,
+ int base_size = 0);
+ // TODO(v8:9708): remove once all uses are ported.
TNode<IntPtrT> ElementOffsetFromIndex(Node* index, ElementsKind kind,
ParameterMode mode, int base_size = 0);
@@ -3451,8 +3510,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
SloppyTNode<SharedFunctionInfo> shared_info,
Label* if_compile_lazy = nullptr);
- Node* AllocateFunctionWithMapAndContext(Node* map, Node* shared_info,
- Node* context);
+ TNode<JSFunction> AllocateFunctionWithMapAndContext(
+ TNode<Map> map, TNode<SharedFunctionInfo> shared_info,
+ TNode<Context> context);
// Promise helpers
Node* IsPromiseHookEnabled();
@@ -3463,7 +3523,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// for..in helpers
void CheckPrototypeEnumCache(Node* receiver, Node* receiver_map,
Label* if_fast, Label* if_slow);
- Node* CheckEnumCache(Node* receiver, Label* if_empty, Label* if_runtime);
+ TNode<Map> CheckEnumCache(TNode<HeapObject> receiver, Label* if_empty,
+ Label* if_runtime);
TNode<Object> GetArgumentValue(TorqueStructArguments args,
TNode<IntPtrT> index);
@@ -3620,11 +3681,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
Node* receiver, Label* if_bailout,
GetOwnPropertyMode mode = kCallJSGetter);
- TNode<IntPtrT> TryToIntptr(Node* key, Label* miss);
-
- void BranchIfPrototypesHaveNoElements(Node* receiver_map,
- Label* definitely_no_elements,
- Label* possibly_elements);
+ TNode<IntPtrT> TryToIntptr(SloppyTNode<Object> key, Label* miss);
void InitializeFunctionContext(Node* native_context, Node* context,
int slots);
@@ -3655,13 +3712,18 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Allocate and return a JSArray of given total size in bytes with header
// fields initialized.
- TNode<JSArray> AllocateUninitializedJSArray(TNode<Map> array_map,
- TNode<Smi> length,
- Node* allocation_site,
- TNode<IntPtrT> size_in_bytes);
+ TNode<JSArray> AllocateUninitializedJSArray(
+ TNode<Map> array_map, TNode<Smi> length,
+ TNode<AllocationSite> allocation_site, TNode<IntPtrT> size_in_bytes);
TNode<BoolT> IsValidSmi(TNode<Smi> smi);
- Node* SmiShiftBitsConstant();
+
+ TNode<IntPtrT> SmiShiftBitsConstant() {
+ return IntPtrConstant(kSmiShiftSize + kSmiTagSize);
+ }
+ TNode<Int32T> SmiShiftBitsConstant32() {
+ return Int32Constant(kSmiShiftSize + kSmiTagSize);
+ }
// Emits keyed sloppy arguments load if the |value| is nullptr or store
// otherwise. Returns either the loaded value or |value|.
@@ -3689,10 +3751,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
void GenerateEqual_Same(SloppyTNode<Object> value, Label* if_equal,
Label* if_notequal,
Variable* var_type_feedback = nullptr);
- TNode<String> AllocAndCopyStringCharacters(Node* from,
- Node* from_instance_type,
- TNode<IntPtrT> from_index,
- TNode<IntPtrT> character_count);
static const int kElementLoopUnrollThreshold = 8;
@@ -3705,11 +3763,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
Variable* var_numeric, Variable* var_feedback);
template <Object::Conversion conversion>
- void TaggedToWord32OrBigIntImpl(Node* context, Node* value, Label* if_number,
- Variable* var_word32,
+ void TaggedToWord32OrBigIntImpl(TNode<Context> context, TNode<Object> value,
+ Label* if_number,
+ TVariable<Word32T>* var_word32,
Label* if_bigint = nullptr,
- Variable* var_bigint = nullptr,
- Variable* var_feedback = nullptr);
+ TVariable<Object>* var_maybe_bigint = nullptr,
+ TVariable<Smi>* var_feedback = nullptr);
private:
// Low-level accessors for Descriptor arrays.
@@ -3727,36 +3786,48 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
}
};
+// template <typename TIndex>
class V8_EXPORT_PRIVATE CodeStubArguments {
public:
using Node = compiler::Node;
- template <class T>
- using TNode = compiler::TNode<T>;
- template <class T>
- using SloppyTNode = compiler::SloppyTNode<T>;
enum ReceiverMode { kHasReceiver, kNoReceiver };
- // |argc| is an intptr value which specifies the number of arguments passed
- // to the builtin excluding the receiver. The arguments will include a
- // receiver iff |receiver_mode| is kHasReceiver.
- CodeStubArguments(CodeStubAssembler* assembler, Node* argc,
+ // |argc| specifies the number of arguments passed to the builtin excluding
+ // the receiver. The arguments will include a receiver iff |receiver_mode|
+ // is kHasReceiver.
+ CodeStubArguments(CodeStubAssembler* assembler, TNode<IntPtrT> argc,
ReceiverMode receiver_mode = ReceiverMode::kHasReceiver)
- : CodeStubArguments(assembler, argc, nullptr,
- CodeStubAssembler::INTPTR_PARAMETERS, receiver_mode) {
- }
+ : CodeStubArguments(assembler, argc, TNode<RawPtrT>(), receiver_mode) {}
+
+ CodeStubArguments(CodeStubAssembler* assembler, TNode<Int32T> argc,
+ ReceiverMode receiver_mode = ReceiverMode::kHasReceiver)
+ : CodeStubArguments(assembler, assembler->ChangeInt32ToIntPtr(argc),
+ TNode<RawPtrT>(), receiver_mode) {}
- // |argc| is either a smi or intptr depending on |param_mode|. The arguments
- // include a receiver iff |receiver_mode| is kHasReceiver.
- CodeStubArguments(CodeStubAssembler* assembler, Node* argc, Node* fp,
- CodeStubAssembler::ParameterMode param_mode,
+ // TODO(v8:9708): Consider removing this variant
+ CodeStubArguments(CodeStubAssembler* assembler, TNode<Smi> argc,
+ ReceiverMode receiver_mode = ReceiverMode::kHasReceiver)
+ : CodeStubArguments(assembler, assembler->ParameterToIntPtr(argc),
+ TNode<RawPtrT>(), receiver_mode) {}
+
+ // |argc| specifies the number of arguments passed to the builtin excluding
+ // the receiver. The arguments will include a receiver iff |receiver_mode|
+ // is kHasReceiver.
+ CodeStubArguments(CodeStubAssembler* assembler, TNode<IntPtrT> argc,
+ TNode<RawPtrT> fp,
ReceiverMode receiver_mode = ReceiverMode::kHasReceiver);
+ CodeStubArguments(CodeStubAssembler* assembler, TNode<Smi> argc,
+ TNode<RawPtrT> fp,
+ ReceiverMode receiver_mode = ReceiverMode::kHasReceiver)
+ : CodeStubArguments(assembler, assembler->ParameterToIntPtr(argc), fp,
+ receiver_mode) {}
+
// Used by Torque to construct arguments based on a Torque-defined
// struct of values.
CodeStubArguments(CodeStubAssembler* assembler,
TorqueStructArguments torque_arguments)
: assembler_(assembler),
- argc_mode_(CodeStubAssembler::INTPTR_PARAMETERS),
receiver_mode_(ReceiverMode::kHasReceiver),
argc_(torque_arguments.length),
base_(torque_arguments.base),
@@ -3769,14 +3840,17 @@ class V8_EXPORT_PRIVATE CodeStubArguments {
void SetReceiver(TNode<Object> object) const;
// Computes address of the index'th argument.
- TNode<WordT> AtIndexPtr(Node* index,
- CodeStubAssembler::ParameterMode mode =
- CodeStubAssembler::INTPTR_PARAMETERS) const;
+ TNode<RawPtrT> AtIndexPtr(TNode<IntPtrT> index) const;
+ TNode<RawPtrT> AtIndexPtr(TNode<Smi> index) const {
+ return AtIndexPtr(assembler_->ParameterToIntPtr(index));
+ }
// |index| is zero-based and does not include the receiver
- TNode<Object> AtIndex(Node* index,
- CodeStubAssembler::ParameterMode mode =
- CodeStubAssembler::INTPTR_PARAMETERS) const;
+ TNode<Object> AtIndex(TNode<IntPtrT> index) const;
+ // TODO(v8:9708): Consider removing this variant
+ TNode<Object> AtIndex(TNode<Smi> index) const {
+ return AtIndex(assembler_->ParameterToIntPtr(index));
+ }
TNode<Object> AtIndex(int index) const;
@@ -3786,15 +3860,10 @@ class V8_EXPORT_PRIVATE CodeStubArguments {
TNode<Object> GetOptionalArgumentValue(int index,
TNode<Object> default_value);
- Node* GetLength(CodeStubAssembler::ParameterMode mode) const {
- DCHECK_EQ(mode, argc_mode_);
- return argc_;
- }
+ TNode<IntPtrT> GetLength() const { return argc_; }
TorqueStructArguments GetTorqueArguments() const {
- DCHECK_EQ(argc_mode_, CodeStubAssembler::INTPTR_PARAMETERS);
- return TorqueStructArguments{assembler_->UncheckedCast<RawPtrT>(fp_), base_,
- assembler_->UncheckedCast<IntPtrT>(argc_)};
+ return TorqueStructArguments{fp_, base_, argc_};
}
TNode<Object> GetOptionalArgumentValue(TNode<IntPtrT> index) {
@@ -3802,28 +3871,32 @@ class V8_EXPORT_PRIVATE CodeStubArguments {
}
TNode<Object> GetOptionalArgumentValue(TNode<IntPtrT> index,
TNode<Object> default_value);
- TNode<IntPtrT> GetLength() const {
- DCHECK_EQ(argc_mode_, CodeStubAssembler::INTPTR_PARAMETERS);
- return assembler_->UncheckedCast<IntPtrT>(argc_);
- }
- using ForEachBodyFunction = std::function<void(Node* arg)>;
+ using ForEachBodyFunction = std::function<void(TNode<Object> arg)>;
// Iteration doesn't include the receiver. |first| and |last| are zero-based.
- void ForEach(const ForEachBodyFunction& body, Node* first = nullptr,
- Node* last = nullptr,
- CodeStubAssembler::ParameterMode mode =
- CodeStubAssembler::INTPTR_PARAMETERS) {
+ template <typename TIndex>
+ void ForEach(const ForEachBodyFunction& body, TNode<TIndex> first = {},
+ TNode<TIndex> last = {}) const {
CodeStubAssembler::VariableList list(0, assembler_->zone());
ForEach(list, body, first, last);
}
// Iteration doesn't include the receiver. |first| and |last| are zero-based.
void ForEach(const CodeStubAssembler::VariableList& vars,
- const ForEachBodyFunction& body, Node* first = nullptr,
- Node* last = nullptr,
- CodeStubAssembler::ParameterMode mode =
- CodeStubAssembler::INTPTR_PARAMETERS);
+ const ForEachBodyFunction& body, TNode<IntPtrT> first = {},
+ TNode<IntPtrT> last = {}) const;
+
+ void ForEach(const CodeStubAssembler::VariableList& vars,
+ const ForEachBodyFunction& body, TNode<Smi> first,
+ TNode<Smi> last = {}) const {
+ TNode<IntPtrT> first_intptr = assembler_->ParameterToIntPtr(first);
+ TNode<IntPtrT> last_intptr;
+ if (last != nullptr) {
+ last_intptr = assembler_->ParameterToIntPtr(last);
+ }
+ return ForEach(vars, body, first_intptr, last_intptr);
+ }
void PopAndReturn(Node* value);
@@ -3831,11 +3904,10 @@ class V8_EXPORT_PRIVATE CodeStubArguments {
Node* GetArguments();
CodeStubAssembler* assembler_;
- CodeStubAssembler::ParameterMode argc_mode_;
ReceiverMode receiver_mode_;
- Node* argc_;
+ TNode<IntPtrT> argc_;
TNode<RawPtrT> base_;
- Node* fp_;
+ TNode<RawPtrT> fp_;
};
class ToDirectStringAssembler : public CodeStubAssembler {
diff --git a/deps/v8/src/codegen/compilation-cache.cc b/deps/v8/src/codegen/compilation-cache.cc
index 6e9613005e..ef3d83a06e 100644
--- a/deps/v8/src/codegen/compilation-cache.cc
+++ b/deps/v8/src/codegen/compilation-cache.cc
@@ -28,7 +28,7 @@ CompilationCache::CompilationCache(Isolate* isolate)
eval_global_(isolate),
eval_contextual_(isolate),
reg_exp_(isolate, kRegExpGenerations),
- enabled_(true) {
+ enabled_script_and_eval_(true) {
CompilationSubCache* subcaches[kSubCacheCount] = {
&script_, &eval_global_, &eval_contextual_, &reg_exp_};
for (int i = 0; i < kSubCacheCount; ++i) {
@@ -254,7 +254,7 @@ void CompilationCacheRegExp::Put(Handle<String> source, JSRegExp::Flags flags,
}
void CompilationCache::Remove(Handle<SharedFunctionInfo> function_info) {
- if (!IsEnabled()) return;
+ if (!IsEnabledScriptAndEval()) return;
eval_global_.Remove(function_info);
eval_contextual_.Remove(function_info);
@@ -265,7 +265,7 @@ MaybeHandle<SharedFunctionInfo> CompilationCache::LookupScript(
Handle<String> source, MaybeHandle<Object> name, int line_offset,
int column_offset, ScriptOriginOptions resource_options,
Handle<Context> native_context, LanguageMode language_mode) {
- if (!IsEnabled()) return MaybeHandle<SharedFunctionInfo>();
+ if (!IsEnabledScriptAndEval()) return MaybeHandle<SharedFunctionInfo>();
return script_.Lookup(source, name, line_offset, column_offset,
resource_options, native_context, language_mode);
@@ -277,7 +277,7 @@ InfoCellPair CompilationCache::LookupEval(Handle<String> source,
LanguageMode language_mode,
int position) {
InfoCellPair result;
- if (!IsEnabled()) return result;
+ if (!IsEnabledScriptAndEval()) return result;
const char* cache_type;
@@ -303,8 +303,6 @@ InfoCellPair CompilationCache::LookupEval(Handle<String> source,
MaybeHandle<FixedArray> CompilationCache::LookupRegExp(Handle<String> source,
JSRegExp::Flags flags) {
- if (!IsEnabled()) return MaybeHandle<FixedArray>();
-
return reg_exp_.Lookup(source, flags);
}
@@ -312,7 +310,7 @@ void CompilationCache::PutScript(Handle<String> source,
Handle<Context> native_context,
LanguageMode language_mode,
Handle<SharedFunctionInfo> function_info) {
- if (!IsEnabled()) return;
+ if (!IsEnabledScriptAndEval()) return;
LOG(isolate(), CompilationCacheEvent("put", "script", *function_info));
script_.Put(source, native_context, language_mode, function_info);
@@ -324,7 +322,7 @@ void CompilationCache::PutEval(Handle<String> source,
Handle<SharedFunctionInfo> function_info,
Handle<FeedbackCell> feedback_cell,
int position) {
- if (!IsEnabled()) return;
+ if (!IsEnabledScriptAndEval()) return;
const char* cache_type;
HandleScope scope(isolate());
@@ -344,8 +342,6 @@ void CompilationCache::PutEval(Handle<String> source,
void CompilationCache::PutRegExp(Handle<String> source, JSRegExp::Flags flags,
Handle<FixedArray> data) {
- if (!IsEnabled()) return;
-
reg_exp_.Put(source, flags, data);
}
@@ -367,10 +363,12 @@ void CompilationCache::MarkCompactPrologue() {
}
}
-void CompilationCache::Enable() { enabled_ = true; }
+void CompilationCache::EnableScriptAndEval() {
+ enabled_script_and_eval_ = true;
+}
-void CompilationCache::Disable() {
- enabled_ = false;
+void CompilationCache::DisableScriptAndEval() {
+ enabled_script_and_eval_ = false;
Clear();
}
diff --git a/deps/v8/src/codegen/compilation-cache.h b/deps/v8/src/codegen/compilation-cache.h
index 35595b1985..04bea44a82 100644
--- a/deps/v8/src/codegen/compilation-cache.h
+++ b/deps/v8/src/codegen/compilation-cache.h
@@ -202,9 +202,14 @@ class V8_EXPORT_PRIVATE CompilationCache {
void MarkCompactPrologue();
// Enable/disable compilation cache. Used by debugger to disable compilation
- // cache during debugging to make sure new scripts are always compiled.
- void Enable();
- void Disable();
+ // cache during debugging so that eval and new scripts are always compiled.
+ // TODO(bmeurer, chromium:992277): The RegExp cache cannot be enabled and/or
+ // disabled, since it doesn't affect debugging. However ideally the other
+ // caches should also be always on, even in the presence of the debugger,
+ // but at this point there are too many unclear invariants, and so I decided
+ // to just fix the pressing performance problem for RegExp individually first.
+ void EnableScriptAndEval();
+ void DisableScriptAndEval();
private:
explicit CompilationCache(Isolate* isolate);
@@ -215,7 +220,9 @@ class V8_EXPORT_PRIVATE CompilationCache {
// The number of sub caches covering the different types to cache.
static const int kSubCacheCount = 4;
- bool IsEnabled() const { return FLAG_compilation_cache && enabled_; }
+ bool IsEnabledScriptAndEval() const {
+ return FLAG_compilation_cache && enabled_script_and_eval_;
+ }
Isolate* isolate() const { return isolate_; }
@@ -227,8 +234,8 @@ class V8_EXPORT_PRIVATE CompilationCache {
CompilationCacheRegExp reg_exp_;
CompilationSubCache* subcaches_[kSubCacheCount];
- // Current enable state of the compilation cache.
- bool enabled_;
+ // Current enable state of the compilation cache for scripts and eval.
+ bool enabled_script_and_eval_;
friend class Isolate;
diff --git a/deps/v8/src/codegen/compiler.cc b/deps/v8/src/codegen/compiler.cc
index fbd181f5c8..d73be13a30 100644
--- a/deps/v8/src/codegen/compiler.cc
+++ b/deps/v8/src/codegen/compiler.cc
@@ -666,21 +666,25 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Code> GetCodeFromOptimizedCodeCache(
function->GetIsolate(),
RuntimeCallCounterId::kCompileGetFromOptimizedCodeMap);
Handle<SharedFunctionInfo> shared(function->shared(), function->GetIsolate());
+ Isolate* isolate = function->GetIsolate();
DisallowHeapAllocation no_gc;
- if (osr_offset.IsNone()) {
- if (function->has_feedback_vector()) {
- FeedbackVector feedback_vector = function->feedback_vector();
- feedback_vector.EvictOptimizedCodeMarkedForDeoptimization(
- function->shared(), "GetCodeFromOptimizedCodeCache");
- Code code = feedback_vector.optimized_code();
-
- if (!code.is_null()) {
- // Caching of optimized code enabled and optimized code found.
- DCHECK(!code.marked_for_deoptimization());
- DCHECK(function->shared().is_compiled());
- return Handle<Code>(code, feedback_vector.GetIsolate());
- }
- }
+ Code code;
+ if (osr_offset.IsNone() && function->has_feedback_vector()) {
+ FeedbackVector feedback_vector = function->feedback_vector();
+ feedback_vector.EvictOptimizedCodeMarkedForDeoptimization(
+ function->shared(), "GetCodeFromOptimizedCodeCache");
+ code = feedback_vector.optimized_code();
+ } else if (!osr_offset.IsNone()) {
+ code = function->context()
+ .native_context()
+ .GetOSROptimizedCodeCache()
+ .GetOptimizedCode(shared, osr_offset, isolate);
+ }
+ if (!code.is_null()) {
+ // Caching of optimized code enabled and optimized code found.
+ DCHECK(!code.marked_for_deoptimization());
+ DCHECK(function->shared().is_compiled());
+ return Handle<Code>(code, isolate);
}
return MaybeHandle<Code>();
}
@@ -711,12 +715,15 @@ void InsertCodeIntoOptimizedCodeCache(
// Cache optimized context-specific code.
Handle<JSFunction> function = compilation_info->closure();
Handle<SharedFunctionInfo> shared(function->shared(), function->GetIsolate());
- Handle<Context> native_context(function->context().native_context(),
- function->GetIsolate());
+ Handle<NativeContext> native_context(function->context().native_context(),
+ function->GetIsolate());
if (compilation_info->osr_offset().IsNone()) {
Handle<FeedbackVector> vector =
handle(function->feedback_vector(), function->GetIsolate());
FeedbackVector::SetOptimizedCode(vector, code);
+ } else {
+ OSROptimizedCodeCache::AddOptimizedCode(native_context, shared, code,
+ compilation_info->osr_offset());
}
}
@@ -1904,6 +1911,12 @@ struct ScriptCompileTimerScope {
case CacheBehaviour::kConsumeCodeCache:
return isolate_->counters()->compile_script_with_consume_cache();
+ // Note that this only counts the finalization part of streaming, the
+ // actual streaming compile is counted by BackgroundCompileTask into
+ // "compile_script_on_background".
+ case CacheBehaviour::kNoCacheBecauseStreamingSource:
+ return isolate_->counters()->compile_script_streaming_finalization();
+
case CacheBehaviour::kNoCacheBecauseInlineScript:
return isolate_->counters()
->compile_script_no_cache_because_inline_script();
@@ -1923,9 +1936,6 @@ struct ScriptCompileTimerScope {
// TODO(leszeks): Consider counting separately once modules are more
// common.
case CacheBehaviour::kNoCacheBecauseModule:
- // TODO(leszeks): Count separately or remove entirely once we have
- // background compilation.
- case CacheBehaviour::kNoCacheBecauseStreamingSource:
case CacheBehaviour::kNoCacheBecauseV8Extension:
case CacheBehaviour::kNoCacheBecauseExtensionModule:
case CacheBehaviour::kNoCacheBecausePacScript:
diff --git a/deps/v8/src/codegen/constant-pool.cc b/deps/v8/src/codegen/constant-pool.cc
index 6816c5b7ad..42b2fa6e9a 100644
--- a/deps/v8/src/codegen/constant-pool.cc
+++ b/deps/v8/src/codegen/constant-pool.cc
@@ -49,22 +49,22 @@ ConstantPoolEntry::Access ConstantPoolBuilder::NextAccess(
}
ConstantPoolEntry::Access ConstantPoolBuilder::AddEntry(
- ConstantPoolEntry& entry, ConstantPoolEntry::Type type) {
+ ConstantPoolEntry* entry, ConstantPoolEntry::Type type) {
DCHECK(!emitted_label_.is_bound());
PerTypeEntryInfo& info = info_[type];
const int entry_size = ConstantPoolEntry::size(type);
bool merged = false;
- if (entry.sharing_ok()) {
+ if (entry->sharing_ok()) {
// Try to merge entries
std::vector<ConstantPoolEntry>::iterator it = info.shared_entries.begin();
int end = static_cast<int>(info.shared_entries.size());
for (int i = 0; i < end; i++, it++) {
if ((entry_size == kSystemPointerSize)
- ? entry.value() == it->value()
- : entry.value64() == it->value64()) {
+ ? entry->value() == it->value()
+ : entry->value64() == it->value64()) {
// Merge with found entry.
- entry.set_merged_index(i);
+ entry->set_merged_index(i);
merged = true;
break;
}
@@ -72,16 +72,16 @@ ConstantPoolEntry::Access ConstantPoolBuilder::AddEntry(
}
// By definition, merged entries have regular access.
- DCHECK(!merged || entry.merged_index() < info.regular_count);
+ DCHECK(!merged || entry->merged_index() < info.regular_count);
ConstantPoolEntry::Access access =
(merged ? ConstantPoolEntry::REGULAR : NextAccess(type));
// Enforce an upper bound on search time by limiting the search to
// unique sharable entries which fit in the regular section.
- if (entry.sharing_ok() && !merged && access == ConstantPoolEntry::REGULAR) {
- info.shared_entries.push_back(entry);
+ if (entry->sharing_ok() && !merged && access == ConstantPoolEntry::REGULAR) {
+ info.shared_entries.push_back(*entry);
} else {
- info.entries.push_back(entry);
+ info.entries.push_back(*entry);
}
// We're done if we found a match or have already triggered the
diff --git a/deps/v8/src/codegen/constant-pool.h b/deps/v8/src/codegen/constant-pool.h
index d07452336b..d2ab5641ae 100644
--- a/deps/v8/src/codegen/constant-pool.h
+++ b/deps/v8/src/codegen/constant-pool.h
@@ -102,13 +102,13 @@ class ConstantPoolBuilder {
ConstantPoolEntry::Access AddEntry(int position, intptr_t value,
bool sharing_ok) {
ConstantPoolEntry entry(position, value, sharing_ok);
- return AddEntry(entry, ConstantPoolEntry::INTPTR);
+ return AddEntry(&entry, ConstantPoolEntry::INTPTR);
}
// Add double constant to the embedded constant pool
ConstantPoolEntry::Access AddEntry(int position, Double value) {
ConstantPoolEntry entry(position, value);
- return AddEntry(entry, ConstantPoolEntry::DOUBLE);
+ return AddEntry(&entry, ConstantPoolEntry::DOUBLE);
}
// Add double constant to the embedded constant pool
@@ -138,9 +138,8 @@ class ConstantPoolBuilder {
inline Label* EmittedPosition() { return &emitted_label_; }
private:
- ConstantPoolEntry::Access AddEntry(
- ConstantPoolEntry& entry, // NOLINT(runtime/references)
- ConstantPoolEntry::Type type);
+ ConstantPoolEntry::Access AddEntry(ConstantPoolEntry* entry,
+ ConstantPoolEntry::Type type);
void EmitSharedEntries(Assembler* assm, ConstantPoolEntry::Type type);
void EmitGroup(Assembler* assm, ConstantPoolEntry::Access access,
ConstantPoolEntry::Type type);
diff --git a/deps/v8/src/codegen/cpu-features.h b/deps/v8/src/codegen/cpu-features.h
index dae9992c57..6b3d3934d0 100644
--- a/deps/v8/src/codegen/cpu-features.h
+++ b/deps/v8/src/codegen/cpu-features.h
@@ -13,7 +13,7 @@ namespace internal {
// CPU feature flags.
enum CpuFeature {
- // x86
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
SSE4_2,
SSE4_1,
SSSE3,
@@ -26,39 +26,46 @@ enum CpuFeature {
LZCNT,
POPCNT,
ATOM,
- // ARM
+
+#elif V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64
// - Standard configurations. The baseline is ARMv6+VFPv2.
ARMv7, // ARMv7-A + VFPv3-D32 + NEON
ARMv7_SUDIV, // ARMv7-A + VFPv4-D32 + NEON + SUDIV
ARMv8, // ARMv8-A (+ all of the above)
- // MIPS, MIPS64
+
+ // ARM feature aliases (based on the standard configurations above).
+ VFPv3 = ARMv7,
+ NEON = ARMv7,
+ VFP32DREGS = ARMv7,
+ SUDIV = ARMv7_SUDIV,
+
+#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
FPU,
FP64FPU,
MIPSr1,
MIPSr2,
MIPSr6,
MIPS_SIMD, // MSA instructions
- // PPC
+
+#elif V8_TARGET_ARCH_PPC
+ FPU,
FPR_GPR_MOV,
LWSYNC,
ISELECT,
VSX,
MODULO,
- // S390
+
+#elif V8_TARGET_ARCH_S390X
+ FPU,
DISTINCT_OPS,
GENERAL_INSTR_EXT,
FLOATING_POINT_EXT,
VECTOR_FACILITY,
VECTOR_ENHANCE_FACILITY_1,
MISC_INSTR_EXT2,
+#endif
- NUMBER_OF_CPU_FEATURES,
-
- // ARM feature aliases (based on the standard configurations above).
- VFPv3 = ARMv7,
- NEON = ARMv7,
- VFP32DREGS = ARMv7,
- SUDIV = ARMv7_SUDIV
+ NUMBER_OF_CPU_FEATURES
};
// CpuFeatures keeps track of which features are supported by the target CPU.
diff --git a/deps/v8/src/codegen/external-reference.cc b/deps/v8/src/codegen/external-reference.cc
index 44503e532d..e1f873cb38 100644
--- a/deps/v8/src/codegen/external-reference.cc
+++ b/deps/v8/src/codegen/external-reference.cc
@@ -217,10 +217,8 @@ struct IsValidExternalReferenceType<Result (Class::*)(Args...)> {
FUNCTION_REFERENCE(incremental_marking_record_write_function,
IncrementalMarking::RecordWriteFromCode)
-ExternalReference ExternalReference::store_buffer_overflow_function() {
- return ExternalReference(
- Redirect(Heap::store_buffer_overflow_function_address()));
-}
+FUNCTION_REFERENCE(insert_remembered_set_function,
+ Heap::InsertIntoRememberedSetFromCode)
FUNCTION_REFERENCE(delete_handle_scope_extensions,
HandleScope::DeleteExtensions)
@@ -342,10 +340,6 @@ ExternalReference ExternalReference::address_of_real_jslimit(Isolate* isolate) {
return ExternalReference(address);
}
-ExternalReference ExternalReference::store_buffer_top(Isolate* isolate) {
- return ExternalReference(isolate->heap()->store_buffer_top_address());
-}
-
ExternalReference ExternalReference::heap_is_marking_flag_address(
Isolate* isolate) {
return ExternalReference(isolate->heap()->IsMarkingFlagAddress());
@@ -529,19 +523,19 @@ ExternalReference ExternalReference::address_of_regexp_stack_memory_top_address(
FUNCTION_REFERENCE_WITH_TYPE(ieee754_acos_function, base::ieee754::acos,
BUILTIN_FP_CALL)
FUNCTION_REFERENCE_WITH_TYPE(ieee754_acosh_function, base::ieee754::acosh,
- BUILTIN_FP_FP_CALL)
+ BUILTIN_FP_CALL)
FUNCTION_REFERENCE_WITH_TYPE(ieee754_asin_function, base::ieee754::asin,
BUILTIN_FP_CALL)
FUNCTION_REFERENCE_WITH_TYPE(ieee754_asinh_function, base::ieee754::asinh,
- BUILTIN_FP_FP_CALL)
+ BUILTIN_FP_CALL)
FUNCTION_REFERENCE_WITH_TYPE(ieee754_atan_function, base::ieee754::atan,
BUILTIN_FP_CALL)
FUNCTION_REFERENCE_WITH_TYPE(ieee754_atanh_function, base::ieee754::atanh,
- BUILTIN_FP_FP_CALL)
+ BUILTIN_FP_CALL)
FUNCTION_REFERENCE_WITH_TYPE(ieee754_atan2_function, base::ieee754::atan2,
BUILTIN_FP_FP_CALL)
FUNCTION_REFERENCE_WITH_TYPE(ieee754_cbrt_function, base::ieee754::cbrt,
- BUILTIN_FP_FP_CALL)
+ BUILTIN_FP_CALL)
FUNCTION_REFERENCE_WITH_TYPE(ieee754_cos_function, base::ieee754::cos,
BUILTIN_FP_CALL)
FUNCTION_REFERENCE_WITH_TYPE(ieee754_cosh_function, base::ieee754::cosh,
@@ -549,7 +543,7 @@ FUNCTION_REFERENCE_WITH_TYPE(ieee754_cosh_function, base::ieee754::cosh,
FUNCTION_REFERENCE_WITH_TYPE(ieee754_exp_function, base::ieee754::exp,
BUILTIN_FP_CALL)
FUNCTION_REFERENCE_WITH_TYPE(ieee754_expm1_function, base::ieee754::expm1,
- BUILTIN_FP_FP_CALL)
+ BUILTIN_FP_CALL)
FUNCTION_REFERENCE_WITH_TYPE(ieee754_log_function, base::ieee754::log,
BUILTIN_FP_CALL)
FUNCTION_REFERENCE_WITH_TYPE(ieee754_log1p_function, base::ieee754::log1p,
diff --git a/deps/v8/src/codegen/external-reference.h b/deps/v8/src/codegen/external-reference.h
index 45c26bdfb0..7cc0241fc4 100644
--- a/deps/v8/src/codegen/external-reference.h
+++ b/deps/v8/src/codegen/external-reference.h
@@ -38,7 +38,6 @@ class StatsCounter;
V(allocation_sites_list_address, "Heap::allocation_sites_list_address()") \
V(address_of_jslimit, "StackGuard::address_of_jslimit()") \
V(address_of_real_jslimit, "StackGuard::address_of_real_jslimit()") \
- V(store_buffer_top, "store_buffer_top") \
V(heap_is_marking_flag_address, "heap_is_marking_flag_address") \
V(new_space_allocation_top_address, "Heap::NewSpaceAllocationTopAddress()") \
V(new_space_allocation_limit_address, \
@@ -143,6 +142,7 @@ class StatsCounter;
V(ieee754_tanh_function, "base::ieee754::tanh") \
V(incremental_marking_record_write_function, \
"IncrementalMarking::RecordWrite") \
+ V(insert_remembered_set_function, "Heap::InsertIntoRememberedSetFromCode") \
V(invalidate_prototype_chains_function, \
"JSObject::InvalidatePrototypeChains()") \
V(invoke_accessor_getter_callback, "InvokeAccessorGetterCallback") \
@@ -170,7 +170,6 @@ class StatsCounter;
V(search_string_raw_two_one, "search_string_raw_two_one") \
V(search_string_raw_two_two, "search_string_raw_two_two") \
V(smi_lexicographic_compare_function, "smi_lexicographic_compare_function") \
- V(store_buffer_overflow_function, "StoreBuffer::StoreBufferOverflow") \
V(try_internalize_string_function, "try_internalize_string_function") \
V(wasm_call_trap_callback_for_testing, \
"wasm::call_trap_callback_for_testing") \
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32-inl.h b/deps/v8/src/codegen/ia32/assembler-ia32-inl.h
index e274b41fa3..174a483868 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/codegen/ia32/assembler-ia32-inl.h
@@ -39,6 +39,7 @@
#include "src/codegen/ia32/assembler-ia32.h"
+#include "src/base/memory.h"
#include "src/codegen/assembler.h"
#include "src/debug/debug.h"
#include "src/objects/objects-inl.h"
@@ -58,12 +59,12 @@ void RelocInfo::apply(intptr_t delta) {
RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY)));
if (IsRuntimeEntry(rmode_) || IsCodeTarget(rmode_) ||
IsOffHeapTarget(rmode_)) {
- int32_t* p = reinterpret_cast<int32_t*>(pc_);
- *p -= delta; // Relocate entry.
+ base::WriteUnalignedValue(pc_,
+ base::ReadUnalignedValue<int32_t>(pc_) - delta);
} else if (IsInternalReference(rmode_)) {
- // absolute code pointer inside code object moves with the code object.
- int32_t* p = reinterpret_cast<int32_t*>(pc_);
- *p += delta; // Relocate entry.
+ // Absolute code pointer inside code object moves with the code object.
+ base::WriteUnalignedValue(pc_,
+ base::ReadUnalignedValue<int32_t>(pc_) + delta);
}
}
@@ -103,7 +104,8 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target,
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
FlushInstructionCache(pc_, sizeof(Address));
}
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) {
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() &&
+ !FLAG_disable_write_barriers) {
WriteBarrierForCode(host(), this, target);
}
}
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32.cc b/deps/v8/src/codegen/ia32/assembler-ia32.cc
index aefcab7299..405e4b7c55 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32.cc
+++ b/deps/v8/src/codegen/ia32/assembler-ia32.cc
@@ -272,8 +272,8 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
Handle<HeapObject> object;
switch (request.kind()) {
case HeapObjectRequest::kHeapNumber:
- object = isolate->factory()->NewHeapNumber(request.heap_number(),
- AllocationType::kOld);
+ object = isolate->factory()->NewHeapNumber<AllocationType::kOld>(
+ request.heap_number());
break;
case HeapObjectRequest::kStringConstant: {
const StringConstantBase* str = request.string();
@@ -2163,70 +2163,6 @@ void Assembler::divsd(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-void Assembler::xorpd(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x57);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::andps(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0x54);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::andnps(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0x55);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::orps(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0x56);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::xorps(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0x57);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::addps(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0x58);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::subps(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0x5C);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::mulps(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0x59);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::divps(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0x5E);
- emit_sse_operand(dst, src);
-}
-
void Assembler::rcpps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
@@ -2234,29 +2170,31 @@ void Assembler::rcpps(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-void Assembler::rsqrtps(XMMRegister dst, Operand src) {
+void Assembler::sqrtps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
- EMIT(0x52);
+ EMIT(0x51);
emit_sse_operand(dst, src);
}
-void Assembler::minps(XMMRegister dst, Operand src) {
+void Assembler::rsqrtps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
- EMIT(0x5D);
+ EMIT(0x52);
emit_sse_operand(dst, src);
}
-void Assembler::maxps(XMMRegister dst, Operand src) {
+void Assembler::cmpps(XMMRegister dst, Operand src, uint8_t cmp) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
- EMIT(0x5F);
+ EMIT(0xC2);
emit_sse_operand(dst, src);
+ EMIT(cmp);
}
-void Assembler::cmpps(XMMRegister dst, Operand src, uint8_t cmp) {
+void Assembler::cmppd(XMMRegister dst, Operand src, uint8_t cmp) {
EnsureSpace ensure_space(this);
+ EMIT(0x66);
EMIT(0x0F);
EMIT(0xC2);
emit_sse_operand(dst, src);
@@ -2280,22 +2218,6 @@ void Assembler::haddps(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-void Assembler::andpd(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x54);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::orpd(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x56);
- emit_sse_operand(dst, src);
-}
-
void Assembler::ucomisd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x66);
@@ -2398,6 +2320,16 @@ void Assembler::shufps(XMMRegister dst, XMMRegister src, byte imm8) {
EMIT(imm8);
}
+void Assembler::shufpd(XMMRegister dst, XMMRegister src, byte imm8) {
+ DCHECK(is_uint8(imm8));
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0xC6);
+ emit_sse_operand(dst, src);
+ EMIT(imm8);
+}
+
void Assembler::movdqa(Operand dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0x66);
@@ -2776,6 +2708,23 @@ void Assembler::minss(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
+// Packed single-precision floating-point SSE instructions.
+void Assembler::ps(byte opcode, XMMRegister dst, Operand src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(opcode);
+ emit_sse_operand(dst, src);
+}
+
+// Packed double-precision floating-point SSE instructions.
+void Assembler::pd(byte opcode, XMMRegister dst, Operand src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(opcode);
+ emit_sse_operand(dst, src);
+}
+
// AVX instructions
void Assembler::vfmasd(byte op, XMMRegister dst, XMMRegister src1,
Operand src2) {
@@ -2811,12 +2760,25 @@ void Assembler::vpd(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
vinstr(op, dst, src1, src2, k66, k0F, kWIG);
}
+void Assembler::vshufpd(XMMRegister dst, XMMRegister src1, Operand src2,
+ byte imm8) {
+ DCHECK(is_uint8(imm8));
+ vpd(0xC6, dst, src1, src2);
+ EMIT(imm8);
+}
+
void Assembler::vcmpps(XMMRegister dst, XMMRegister src1, Operand src2,
uint8_t cmp) {
vps(0xC2, dst, src1, src2);
EMIT(cmp);
}
+void Assembler::vcmppd(XMMRegister dst, XMMRegister src1, Operand src2,
+ uint8_t cmp) {
+ vpd(0xC2, dst, src1, src2);
+ EMIT(cmp);
+}
+
void Assembler::vshufps(XMMRegister dst, XMMRegister src1, Operand src2,
byte imm8) {
DCHECK(is_uint8(imm8));
@@ -2848,6 +2810,12 @@ void Assembler::vpsrld(XMMRegister dst, XMMRegister src, uint8_t imm8) {
EMIT(imm8);
}
+void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, uint8_t imm8) {
+ XMMRegister iop = XMMRegister::from_code(2);
+ vinstr(0x73, iop, dst, Operand(src), k66, k0F, kWIG);
+ EMIT(imm8);
+}
+
void Assembler::vpsraw(XMMRegister dst, XMMRegister src, uint8_t imm8) {
XMMRegister iop = XMMRegister::from_code(4);
vinstr(0x71, iop, dst, Operand(src), k66, k0F, kWIG);
@@ -3158,11 +3126,10 @@ void Assembler::emit_operand(int code, Operand adr) {
DCHECK_GT(length, 0);
// Emit updated ModRM byte containing the given register.
- pc_[0] = (adr.buf_[0] & ~0x38) | (code << 3);
+ EMIT((adr.buf_[0] & ~0x38) | (code << 3));
// Emit the rest of the encoded operand.
- for (unsigned i = 1; i < length; i++) pc_[i] = adr.buf_[i];
- pc_ += length;
+ for (unsigned i = 1; i < length; i++) EMIT(adr.buf_[i]);
// Emit relocation information if necessary.
if (length >= sizeof(int32_t) && !RelocInfo::IsNone(adr.rmode_)) {
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32.h b/deps/v8/src/codegen/ia32/assembler-ia32.h
index 5225621276..8161ff8322 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32.h
+++ b/deps/v8/src/codegen/ia32/assembler-ia32.h
@@ -38,6 +38,7 @@
#define V8_CODEGEN_IA32_ASSEMBLER_IA32_H_
#include <deque>
+#include <memory>
#include "src/codegen/assembler.h"
#include "src/codegen/ia32/constants-ia32.h"
@@ -292,7 +293,7 @@ class V8_EXPORT_PRIVATE Operand {
// Only valid if len_ > 4.
RelocInfo::Mode rmode_ = RelocInfo::NONE;
- // TODO(clemensh): Get rid of this friendship, or make Operand immutable.
+ // TODO(clemensb): Get rid of this friendship, or make Operand immutable.
friend class Assembler;
};
ASSERT_TRIVIALLY_COPYABLE(Operand);
@@ -371,7 +372,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// own buffer. Otherwise it takes ownership of the provided buffer.
explicit Assembler(const AssemblerOptions&,
std::unique_ptr<AssemblerBuffer> = {});
- virtual ~Assembler() {}
// GetCode emits any pending (non-emitted) code and fills the descriptor desc.
static constexpr int kNoHandlerTable = 0;
@@ -512,6 +512,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void movzx_w(Register dst, Operand src);
void movq(XMMRegister dst, Operand src);
+
// Conditional moves
void cmov(Condition cc, Register dst, Register src) {
cmov(cc, dst, Operand(src));
@@ -849,56 +850,54 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void movups(XMMRegister dst, Operand src);
void movups(Operand dst, XMMRegister src);
void shufps(XMMRegister dst, XMMRegister src, byte imm8);
+ void shufpd(XMMRegister dst, XMMRegister src, byte imm8);
void maxss(XMMRegister dst, XMMRegister src) { maxss(dst, Operand(src)); }
void maxss(XMMRegister dst, Operand src);
void minss(XMMRegister dst, XMMRegister src) { minss(dst, Operand(src)); }
void minss(XMMRegister dst, Operand src);
- void andps(XMMRegister dst, Operand src);
- void andps(XMMRegister dst, XMMRegister src) { andps(dst, Operand(src)); }
- void andnps(XMMRegister dst, Operand src);
- void andnps(XMMRegister dst, XMMRegister src) { andnps(dst, Operand(src)); }
- void xorps(XMMRegister dst, Operand src);
- void xorps(XMMRegister dst, XMMRegister src) { xorps(dst, Operand(src)); }
- void orps(XMMRegister dst, Operand src);
- void orps(XMMRegister dst, XMMRegister src) { orps(dst, Operand(src)); }
-
- void addps(XMMRegister dst, Operand src);
- void addps(XMMRegister dst, XMMRegister src) { addps(dst, Operand(src)); }
- void subps(XMMRegister dst, Operand src);
- void subps(XMMRegister dst, XMMRegister src) { subps(dst, Operand(src)); }
- void mulps(XMMRegister dst, Operand src);
- void mulps(XMMRegister dst, XMMRegister src) { mulps(dst, Operand(src)); }
- void divps(XMMRegister dst, Operand src);
- void divps(XMMRegister dst, XMMRegister src) { divps(dst, Operand(src)); }
void rcpps(XMMRegister dst, Operand src);
void rcpps(XMMRegister dst, XMMRegister src) { rcpps(dst, Operand(src)); }
+ void sqrtps(XMMRegister dst, Operand src);
+ void sqrtps(XMMRegister dst, XMMRegister src) { sqrtps(dst, Operand(src)); }
void rsqrtps(XMMRegister dst, Operand src);
void rsqrtps(XMMRegister dst, XMMRegister src) { rsqrtps(dst, Operand(src)); }
void haddps(XMMRegister dst, Operand src);
void haddps(XMMRegister dst, XMMRegister src) { haddps(dst, Operand(src)); }
-
- void minps(XMMRegister dst, Operand src);
- void minps(XMMRegister dst, XMMRegister src) { minps(dst, Operand(src)); }
- void maxps(XMMRegister dst, Operand src);
- void maxps(XMMRegister dst, XMMRegister src) { maxps(dst, Operand(src)); }
+ void sqrtpd(XMMRegister dst, Operand src) {
+ sse2_instr(dst, src, 0x66, 0x0F, 0x51);
+ }
+ void sqrtpd(XMMRegister dst, XMMRegister src) { sqrtpd(dst, Operand(src)); }
void cmpps(XMMRegister dst, Operand src, uint8_t cmp);
void cmpps(XMMRegister dst, XMMRegister src, uint8_t cmp) {
cmpps(dst, Operand(src), cmp);
}
-#define SSE_CMP_P(instr, imm8) \
- void instr##ps(XMMRegister dst, XMMRegister src) { \
- cmpps(dst, Operand(src), imm8); \
- } \
- void instr##ps(XMMRegister dst, Operand src) { cmpps(dst, src, imm8); }
+ void cmppd(XMMRegister dst, Operand src, uint8_t cmp);
+ void cmppd(XMMRegister dst, XMMRegister src, uint8_t cmp) {
+ cmppd(dst, Operand(src), cmp);
+ }
+
+// Packed floating-point comparison operations.
+#define PACKED_CMP_LIST(V) \
+ V(cmpeq, 0x0) \
+ V(cmplt, 0x1) \
+ V(cmple, 0x2) \
+ V(cmpunord, 0x3) \
+ V(cmpneq, 0x4)
- SSE_CMP_P(cmpeq, 0x0)
- SSE_CMP_P(cmplt, 0x1)
- SSE_CMP_P(cmple, 0x2)
- SSE_CMP_P(cmpneq, 0x4)
+#define SSE_CMP_P(instr, imm8) \
+ void instr##ps(XMMRegister dst, XMMRegister src) { \
+ cmpps(dst, Operand(src), imm8); \
+ } \
+ void instr##ps(XMMRegister dst, Operand src) { cmpps(dst, src, imm8); } \
+ void instr##pd(XMMRegister dst, XMMRegister src) { \
+ cmppd(dst, Operand(src), imm8); \
+ } \
+ void instr##pd(XMMRegister dst, Operand src) { cmppd(dst, src, imm8); }
+ PACKED_CMP_LIST(SSE_CMP_P)
#undef SSE_CMP_P
// SSE2 instructions
@@ -941,22 +940,20 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void mulsd(XMMRegister dst, Operand src);
void divsd(XMMRegister dst, XMMRegister src) { divsd(dst, Operand(src)); }
void divsd(XMMRegister dst, Operand src);
- void xorpd(XMMRegister dst, XMMRegister src) { xorpd(dst, Operand(src)); }
- void xorpd(XMMRegister dst, Operand src);
void sqrtsd(XMMRegister dst, XMMRegister src) { sqrtsd(dst, Operand(src)); }
void sqrtsd(XMMRegister dst, Operand src);
- void andpd(XMMRegister dst, XMMRegister src) { andpd(dst, Operand(src)); }
- void andpd(XMMRegister dst, Operand src);
- void orpd(XMMRegister dst, XMMRegister src) { orpd(dst, Operand(src)); }
- void orpd(XMMRegister dst, Operand src);
-
void ucomisd(XMMRegister dst, XMMRegister src) { ucomisd(dst, Operand(src)); }
void ucomisd(XMMRegister dst, Operand src);
void roundss(XMMRegister dst, XMMRegister src, RoundingMode mode);
void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
+ void movapd(XMMRegister dst, XMMRegister src) { movapd(dst, Operand(src)); }
+ void movapd(XMMRegister dst, Operand src) {
+ sse2_instr(dst, src, 0x66, 0x0F, 0x28);
+ }
+
void movmskpd(Register dst, XMMRegister src);
void movmskps(Register dst, XMMRegister src);
@@ -1298,6 +1295,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vrcpps(XMMRegister dst, Operand src) {
vinstr(0x53, dst, xmm0, src, kNone, k0F, kWIG);
}
+ void vsqrtps(XMMRegister dst, XMMRegister src) { vsqrtps(dst, Operand(src)); }
+ void vsqrtps(XMMRegister dst, Operand src) {
+ vinstr(0x51, dst, xmm0, src, kNone, k0F, kWIG);
+ }
void vrsqrtps(XMMRegister dst, XMMRegister src) {
vrsqrtps(dst, Operand(src));
}
@@ -1310,14 +1311,24 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vhaddps(XMMRegister dst, XMMRegister src1, Operand src2) {
vinstr(0x7C, dst, src1, src2, kF2, k0F, kWIG);
}
+ void vsqrtpd(XMMRegister dst, XMMRegister src) { vsqrtpd(dst, Operand(src)); }
+ void vsqrtpd(XMMRegister dst, Operand src) {
+ vinstr(0x51, dst, xmm0, src, k66, k0F, kWIG);
+ }
void vmovaps(XMMRegister dst, XMMRegister src) { vmovaps(dst, Operand(src)); }
void vmovaps(XMMRegister dst, Operand src) { vps(0x28, dst, xmm0, src); }
+ void vmovapd(XMMRegister dst, XMMRegister src) { vmovapd(dst, Operand(src)); }
+ void vmovapd(XMMRegister dst, Operand src) { vpd(0x28, dst, xmm0, src); }
void vmovups(XMMRegister dst, XMMRegister src) { vmovups(dst, Operand(src)); }
void vmovups(XMMRegister dst, Operand src) { vps(0x10, dst, xmm0, src); }
void vshufps(XMMRegister dst, XMMRegister src1, XMMRegister src2, byte imm8) {
vshufps(dst, src1, Operand(src2), imm8);
}
void vshufps(XMMRegister dst, XMMRegister src1, Operand src2, byte imm8);
+ void vshufpd(XMMRegister dst, XMMRegister src1, XMMRegister src2, byte imm8) {
+ vshufpd(dst, src1, Operand(src2), imm8);
+ }
+ void vshufpd(XMMRegister dst, XMMRegister src1, Operand src2, byte imm8);
void vpsllw(XMMRegister dst, XMMRegister src, uint8_t imm8);
void vpslld(XMMRegister dst, XMMRegister src, uint8_t imm8);
@@ -1325,6 +1336,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vpsrld(XMMRegister dst, XMMRegister src, uint8_t imm8);
void vpsraw(XMMRegister dst, XMMRegister src, uint8_t imm8);
void vpsrad(XMMRegister dst, XMMRegister src, uint8_t imm8);
+ void vpsrlq(XMMRegister dst, XMMRegister src, uint8_t imm8);
void vpshufhw(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
vpshufhw(dst, Operand(src), shuffle);
@@ -1489,6 +1501,11 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
void rorx(Register dst, Operand src, byte imm8);
+ // Implementation of packed single-precision floating-point SSE instructions.
+ void ps(byte op, XMMRegister dst, Operand src);
+ // Implementation of packed double-precision floating-point SSE instructions.
+ void pd(byte op, XMMRegister dst, Operand src);
+
#define PACKED_OP_LIST(V) \
V(and, 0x54) \
V(andn, 0x55) \
@@ -1501,6 +1518,19 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
V(div, 0x5e) \
V(max, 0x5f)
+#define SSE_PACKED_OP_DECLARE(name, opcode) \
+ void name##ps(XMMRegister dst, XMMRegister src) { \
+ ps(opcode, dst, Operand(src)); \
+ } \
+ void name##ps(XMMRegister dst, Operand src) { ps(opcode, dst, src); } \
+ void name##pd(XMMRegister dst, XMMRegister src) { \
+ pd(opcode, dst, Operand(src)); \
+ } \
+ void name##pd(XMMRegister dst, Operand src) { pd(opcode, dst, src); }
+
+ PACKED_OP_LIST(SSE_PACKED_OP_DECLARE)
+#undef SSE_PACKED_OP_DECLARE
+
#define AVX_PACKED_OP_DECLARE(name, opcode) \
void v##name##ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
vps(opcode, dst, src1, Operand(src2)); \
@@ -1516,24 +1546,32 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
PACKED_OP_LIST(AVX_PACKED_OP_DECLARE)
+#undef AVX_PACKED_OP_DECLARE
+#undef PACKED_OP_LIST
+
void vps(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
void vpd(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
void vcmpps(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t cmp);
-#define AVX_CMP_P(instr, imm8) \
- void instr##ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
- vcmpps(dst, src1, Operand(src2), imm8); \
- } \
- void instr##ps(XMMRegister dst, XMMRegister src1, Operand src2) { \
- vcmpps(dst, src1, src2, imm8); \
- }
-
- AVX_CMP_P(vcmpeq, 0x0)
- AVX_CMP_P(vcmplt, 0x1)
- AVX_CMP_P(vcmple, 0x2)
- AVX_CMP_P(vcmpneq, 0x4)
-
+ void vcmppd(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t cmp);
+
+#define AVX_CMP_P(instr, imm8) \
+ void v##instr##ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
+ vcmpps(dst, src1, Operand(src2), imm8); \
+ } \
+ void v##instr##ps(XMMRegister dst, XMMRegister src1, Operand src2) { \
+ vcmpps(dst, src1, src2, imm8); \
+ } \
+ void v##instr##pd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
+ vcmppd(dst, src1, Operand(src2), imm8); \
+ } \
+ void v##instr##pd(XMMRegister dst, XMMRegister src1, Operand src2) { \
+ vcmppd(dst, src1, src2, imm8); \
+ }
+
+ PACKED_CMP_LIST(AVX_CMP_P)
#undef AVX_CMP_P
+#undef PACKED_CMP_LIST
// Other SSE and AVX instructions
#define DECLARE_SSE2_INSTRUCTION(instruction, prefix, escape, opcode) \
diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
index 070f315977..dd11bc496e 100644
--- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
@@ -1168,57 +1168,44 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
}
-void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
- const ParameterCount& expected,
- const ParameterCount& actual) {
- Label skip_hook;
-
- ExternalReference debug_hook_active =
- ExternalReference::debug_hook_on_function_call_address(isolate());
- push(eax);
- cmpb(ExternalReferenceAsOperand(debug_hook_active, eax), Immediate(0));
- pop(eax);
- j(equal, &skip_hook);
-
- {
- FrameScope frame(this,
- has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
- if (expected.is_reg()) {
- SmiTag(expected.reg());
- Push(expected.reg());
- }
- if (actual.is_reg()) {
- SmiTag(actual.reg());
- Push(actual.reg());
- SmiUntag(actual.reg());
- }
- if (new_target.is_valid()) {
- Push(new_target);
- }
- Push(fun);
- Push(fun);
- Operand receiver_op =
- actual.is_reg()
- ? Operand(ebp, actual.reg(), times_system_pointer_size,
- kSystemPointerSize * 2)
- : Operand(ebp, actual.immediate() * times_system_pointer_size +
- kSystemPointerSize * 2);
- Push(receiver_op);
- CallRuntime(Runtime::kDebugOnFunctionCall);
- Pop(fun);
- if (new_target.is_valid()) {
- Pop(new_target);
- }
- if (actual.is_reg()) {
- Pop(actual.reg());
- SmiUntag(actual.reg());
- }
- if (expected.is_reg()) {
- Pop(expected.reg());
- SmiUntag(expected.reg());
- }
+void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual) {
+ FrameScope frame(this, has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ if (expected.is_reg()) {
+ SmiTag(expected.reg());
+ Push(expected.reg());
+ }
+ if (actual.is_reg()) {
+ SmiTag(actual.reg());
+ Push(actual.reg());
+ SmiUntag(actual.reg());
+ }
+ if (new_target.is_valid()) {
+ Push(new_target);
+ }
+ Push(fun);
+ Push(fun);
+ Operand receiver_op =
+ actual.is_reg()
+ ? Operand(ebp, actual.reg(), times_system_pointer_size,
+ kSystemPointerSize * 2)
+ : Operand(ebp, actual.immediate() * times_system_pointer_size +
+ kSystemPointerSize * 2);
+ Push(receiver_op);
+ CallRuntime(Runtime::kDebugOnFunctionCall);
+ Pop(fun);
+ if (new_target.is_valid()) {
+ Pop(new_target);
+ }
+ if (actual.is_reg()) {
+ Pop(actual.reg());
+ SmiUntag(actual.reg());
+ }
+ if (expected.is_reg()) {
+ Pop(expected.reg());
+ SmiUntag(expected.reg());
}
- bind(&skip_hook);
}
void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
@@ -1233,7 +1220,16 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
DCHECK_IMPLIES(actual.is_reg(), actual.reg() == eax);
// On function call, call into the debugger if necessary.
- CheckDebugHook(function, new_target, expected, actual);
+ Label debug_hook, continue_after_hook;
+ {
+ ExternalReference debug_hook_active =
+ ExternalReference::debug_hook_on_function_call_address(isolate());
+ push(eax);
+ cmpb(ExternalReferenceAsOperand(debug_hook_active, eax), Immediate(0));
+ pop(eax);
+ j(not_equal, &debug_hook, Label::kNear);
+ }
+ bind(&continue_after_hook);
// Clear the new.target register if not given.
if (!new_target.is_valid()) {
@@ -1256,8 +1252,15 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
DCHECK(flag == JUMP_FUNCTION);
JumpCodeObject(ecx);
}
- bind(&done);
}
+ jmp(&done, Label::kNear);
+
+ // Deferred debug hook.
+ bind(&debug_hook);
+ CallDebugOnFunctionCall(function, new_target, expected, actual);
+ jmp(&continue_after_hook, Label::kNear);
+
+ bind(&done);
}
void MacroAssembler::InvokeFunction(Register fun, Register new_target,
@@ -1479,6 +1482,15 @@ void TurboAssembler::Psrlw(XMMRegister dst, uint8_t shift) {
}
}
+void TurboAssembler::Psrlq(XMMRegister dst, uint8_t shift) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpsrlq(dst, dst, shift);
+ } else {
+ psrlq(dst, shift);
+ }
+}
+
void TurboAssembler::Psignb(XMMRegister dst, Operand src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
index c65871cfad..9e7774c55d 100644
--- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
@@ -237,6 +237,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Pshufd(XMMRegister dst, Operand src, uint8_t shuffle);
void Psraw(XMMRegister dst, uint8_t shift);
void Psrlw(XMMRegister dst, uint8_t shift);
+ void Psrlq(XMMRegister dst, uint8_t shift);
// SSE/SSE2 instructions with AVX version.
#define AVX_OP2_WITH_TYPE(macro_name, name, dst_type, src_type) \
@@ -258,6 +259,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP2_WITH_TYPE(Movd, movd, Register, XMMRegister)
AVX_OP2_WITH_TYPE(Movd, movd, Operand, XMMRegister)
AVX_OP2_WITH_TYPE(Cvtdq2ps, cvtdq2ps, XMMRegister, Operand)
+ AVX_OP2_WITH_TYPE(Sqrtpd, sqrtpd, XMMRegister, const Operand&)
+ AVX_OP2_WITH_TYPE(Movapd, movapd, XMMRegister, XMMRegister)
+ AVX_OP2_WITH_TYPE(Movapd, movapd, XMMRegister, const Operand&)
#undef AVX_OP2_WITH_TYPE
@@ -278,6 +282,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP3_XO(Packsswb, packsswb)
AVX_OP3_XO(Packuswb, packuswb)
+ AVX_OP3_XO(Paddusb, paddusb)
AVX_OP3_XO(Pcmpeqb, pcmpeqb)
AVX_OP3_XO(Pcmpeqw, pcmpeqw)
AVX_OP3_XO(Pcmpeqd, pcmpeqd)
@@ -294,10 +299,41 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP3_XO(Xorpd, xorpd)
AVX_OP3_XO(Sqrtss, sqrtss)
AVX_OP3_XO(Sqrtsd, sqrtsd)
+ AVX_OP3_XO(Orpd, orpd)
+ AVX_OP3_XO(Andnpd, andnpd)
#undef AVX_OP3_XO
#undef AVX_OP3_WITH_TYPE
+// Only use this macro when dst and src1 is the same in SSE case.
+#define AVX_PACKED_OP3_WITH_TYPE(macro_name, name, dst_type, src_type) \
+ void macro_name(dst_type dst, dst_type src1, src_type src2) { \
+ if (CpuFeatures::IsSupported(AVX)) { \
+ CpuFeatureScope scope(this, AVX); \
+ v##name(dst, src1, src2); \
+ } else { \
+ DCHECK_EQ(dst, src1); \
+ name(dst, src2); \
+ } \
+ }
+#define AVX_PACKED_OP3(macro_name, name) \
+ AVX_PACKED_OP3_WITH_TYPE(macro_name, name, XMMRegister, XMMRegister) \
+ AVX_PACKED_OP3_WITH_TYPE(macro_name, name, XMMRegister, Operand)
+
+ AVX_PACKED_OP3(Addpd, addpd)
+ AVX_PACKED_OP3(Subpd, subpd)
+ AVX_PACKED_OP3(Mulpd, mulpd)
+ AVX_PACKED_OP3(Divpd, divpd)
+ AVX_PACKED_OP3(Cmpeqpd, cmpeqpd)
+ AVX_PACKED_OP3(Cmpneqpd, cmpneqpd)
+ AVX_PACKED_OP3(Cmpltpd, cmpltpd)
+ AVX_PACKED_OP3(Cmplepd, cmplepd)
+ AVX_PACKED_OP3(Minpd, minpd)
+ AVX_PACKED_OP3(Maxpd, maxpd)
+ AVX_PACKED_OP3(Cmpunordpd, cmpunordpd)
+#undef AVX_PACKED_OP3
+#undef AVX_PACKED_OP3_WITH_TYPE
+
// Non-SSE2 instructions.
#define AVX_OP2_WITH_TYPE_SCOPE(macro_name, name, dst_type, src_type, \
sse_scope) \
@@ -529,11 +565,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag);
- // On function call, call into the debugger if necessary.
+ // On function call, call into the debugger.
// This may clobber ecx.
- void CheckDebugHook(Register fun, Register new_target,
- const ParameterCount& expected,
- const ParameterCount& actual);
+ void CallDebugOnFunctionCall(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
diff --git a/deps/v8/src/codegen/interface-descriptors.cc b/deps/v8/src/codegen/interface-descriptors.cc
index f537ebc899..1525f814cd 100644
--- a/deps/v8/src/codegen/interface-descriptors.cc
+++ b/deps/v8/src/codegen/interface-descriptors.cc
@@ -278,6 +278,11 @@ void AsyncFunctionStackParameterDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(0, nullptr);
}
+void GetIteratorStackParameterDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ data->InitializePlatformSpecific(0, nullptr);
+}
+
void LoadWithVectorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ReceiverRegister(), NameRegister(), SlotRegister(),
diff --git a/deps/v8/src/codegen/interface-descriptors.h b/deps/v8/src/codegen/interface-descriptors.h
index 544d62fd9f..e305d666a3 100644
--- a/deps/v8/src/codegen/interface-descriptors.h
+++ b/deps/v8/src/codegen/interface-descriptors.h
@@ -9,12 +9,17 @@
#include "src/codegen/machine-type.h"
#include "src/codegen/register-arch.h"
+#include "src/codegen/tnode.h"
#include "src/common/globals.h"
#include "src/execution/isolate.h"
namespace v8 {
namespace internal {
+#define TORQUE_BUILTIN_LIST_TFC(V) \
+ BUILTIN_LIST_FROM_TORQUE(IGNORE_BUILTIN, IGNORE_BUILTIN, V, IGNORE_BUILTIN, \
+ IGNORE_BUILTIN, IGNORE_BUILTIN)
+
#define INTERFACE_DESCRIPTOR_LIST(V) \
V(Abort) \
V(Allocate) \
@@ -52,6 +57,7 @@ namespace internal {
V(FastNewFunctionContext) \
V(FastNewObject) \
V(FrameDropperTrampoline) \
+ V(GetIteratorStackParameter) \
V(GetProperty) \
V(GrowArrayElements) \
V(InterpreterCEntry1) \
@@ -89,7 +95,8 @@ namespace internal {
V(WasmTableGet) \
V(WasmTableSet) \
V(WasmThrow) \
- BUILTIN_LIST_TFS(V)
+ BUILTIN_LIST_TFS(V) \
+ TORQUE_BUILTIN_LIST_TFC(V)
class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
public:
@@ -486,6 +493,46 @@ class V8_EXPORT_PRIVATE VoidDescriptor : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(VoidDescriptor, CallInterfaceDescriptor)
};
+// This class is subclassed by Torque-generated call interface descriptors.
+template <int parameter_count>
+class TorqueInterfaceDescriptor : public CallInterfaceDescriptor {
+ public:
+ static constexpr int kDescriptorFlags = CallInterfaceDescriptorData::kNoFlags;
+ static constexpr int kParameterCount = parameter_count;
+ enum ParameterIndices { kContext = kParameterCount };
+ template <int i>
+ static ParameterIndices ParameterIndex() {
+ STATIC_ASSERT(0 <= i && i < kParameterCount);
+ return static_cast<ParameterIndices>(i);
+ }
+ static constexpr int kReturnCount = 1;
+
+ using CallInterfaceDescriptor::CallInterfaceDescriptor;
+
+ protected:
+ static const int kRegisterParams =
+ kParameterCount > kMaxTFSBuiltinRegisterParams
+ ? kMaxTFSBuiltinRegisterParams
+ : kParameterCount;
+ static const int kStackParams = kParameterCount - kRegisterParams;
+ virtual MachineType ReturnType() = 0;
+ virtual std::array<MachineType, kParameterCount> ParameterTypes() = 0;
+ void InitializePlatformSpecific(CallInterfaceDescriptorData* data) override {
+ DefaultInitializePlatformSpecific(data, kRegisterParams);
+ }
+ void InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) override {
+ std::vector<MachineType> machine_types = {ReturnType()};
+ auto parameter_types = ParameterTypes();
+ machine_types.insert(machine_types.end(), parameter_types.begin(),
+ parameter_types.end());
+ DCHECK_EQ(kReturnCount + kParameterCount, machine_types.size());
+ data->InitializePlatformIndependent(Flags(kDescriptorFlags), kReturnCount,
+ kParameterCount, machine_types.data(),
+ static_cast<int>(machine_types.size()));
+ }
+};
+
// Dummy descriptor used to mark builtins that don't yet have their proper
// descriptor associated.
using DummyDescriptor = VoidDescriptor;
@@ -706,7 +753,7 @@ class FastNewFunctionContextDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kScopeInfo, kSlots)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kScopeInfo
- MachineType::Int32()) // kSlots
+ MachineType::Uint32()) // kSlots
DECLARE_DESCRIPTOR(FastNewFunctionContextDescriptor, CallInterfaceDescriptor)
static const Register ScopeInfoRegister();
@@ -771,6 +818,16 @@ class AsyncFunctionStackParameterDescriptor final
CallInterfaceDescriptor)
};
+class GetIteratorStackParameterDescriptor final
+ : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kReceiver, kCallSlot, kFeedback, kResult)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), MachineType::AnyTagged(),
+ MachineType::AnyTagged(), MachineType::AnyTagged())
+ DECLARE_DESCRIPTOR(GetIteratorStackParameterDescriptor,
+ CallInterfaceDescriptor)
+};
+
class GetPropertyDescriptor final : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kObject, kKey)
@@ -1298,6 +1355,11 @@ class CloneObjectWithVectorDescriptor final : public CallInterfaceDescriptor {
BUILTIN_LIST_TFS(DEFINE_TFS_BUILTIN_DESCRIPTOR)
#undef DEFINE_TFS_BUILTIN_DESCRIPTOR
+// This file contains interface descriptor class definitions for builtins
+// defined in Torque. It is included here because the class definitions need to
+// precede the definition of name##Descriptor::key() below.
+#include "torque-generated/interface-descriptors-tq.inc"
+
#undef DECLARE_DEFAULT_DESCRIPTOR
#undef DECLARE_DESCRIPTOR_WITH_BASE
#undef DECLARE_DESCRIPTOR
diff --git a/deps/v8/src/codegen/machine-type.h b/deps/v8/src/codegen/machine-type.h
index 15e3df65c5..a0bef4e07d 100644
--- a/deps/v8/src/codegen/machine-type.h
+++ b/deps/v8/src/codegen/machine-type.h
@@ -9,6 +9,7 @@
#include "src/base/bits.h"
#include "src/common/globals.h"
+#include "src/flags/flags.h"
namespace v8 {
namespace internal {
@@ -114,6 +115,10 @@ class MachineType {
constexpr bool IsCompressedPointer() const {
return representation() == MachineRepresentation::kCompressedPointer;
}
+ constexpr static MachineRepresentation TaggedRepresentation() {
+ return (kTaggedSize == 4) ? MachineRepresentation::kWord32
+ : MachineRepresentation::kWord64;
+ }
constexpr static MachineRepresentation PointerRepresentation() {
return (kSystemPointerSize == 4) ? MachineRepresentation::kWord32
: MachineRepresentation::kWord64;
@@ -239,71 +244,79 @@ class MachineType {
// pointer flag is enabled. Otherwise, they returned the corresponding tagged
// one.
constexpr static MachineRepresentation RepCompressedTagged() {
-#ifdef V8_COMPRESS_POINTERS
- return MachineRepresentation::kCompressed;
-#else
- return MachineRepresentation::kTagged;
-#endif
+ if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) {
+ return MachineRepresentation::kCompressed;
+ } else {
+ return MachineRepresentation::kTagged;
+ }
}
constexpr static MachineRepresentation RepCompressedTaggedSigned() {
-#ifdef V8_COMPRESS_POINTERS
- return MachineRepresentation::kCompressedSigned;
-#else
- return MachineRepresentation::kTaggedSigned;
-#endif
+ if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) {
+ return MachineRepresentation::kCompressedSigned;
+ } else {
+ return MachineRepresentation::kTaggedSigned;
+ }
}
constexpr static MachineRepresentation RepCompressedTaggedPointer() {
-#ifdef V8_COMPRESS_POINTERS
- return MachineRepresentation::kCompressedPointer;
-#else
- return MachineRepresentation::kTaggedPointer;
-#endif
+ if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) {
+ return MachineRepresentation::kCompressedPointer;
+ } else {
+ return MachineRepresentation::kTaggedPointer;
+ }
+ }
+
+ constexpr static MachineType TypeRawTagged() {
+ if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) {
+ return MachineType::Int32();
+ } else {
+ return MachineType::Pointer();
+ }
}
constexpr static MachineType TypeCompressedTagged() {
-#ifdef V8_COMPRESS_POINTERS
- return MachineType::AnyCompressed();
-#else
- return MachineType::AnyTagged();
-#endif
+ if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) {
+ return MachineType::AnyCompressed();
+ } else {
+ return MachineType::AnyTagged();
+ }
}
constexpr static MachineType TypeCompressedTaggedSigned() {
-#ifdef V8_COMPRESS_POINTERS
- return MachineType::CompressedSigned();
-#else
- return MachineType::TaggedSigned();
-#endif
+ if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) {
+ return MachineType::CompressedSigned();
+ } else {
+ return MachineType::TaggedSigned();
+ }
}
constexpr static MachineType TypeCompressedTaggedPointer() {
-#ifdef V8_COMPRESS_POINTERS
- return MachineType::CompressedPointer();
-#else
- return MachineType::TaggedPointer();
-#endif
+ if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) {
+ return MachineType::CompressedPointer();
+ } else {
+ return MachineType::TaggedPointer();
+ }
}
constexpr bool IsCompressedTagged() const {
-#ifdef V8_COMPRESS_POINTERS
- return IsCompressed();
-#else
- return IsTagged();
-#endif
+ if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) {
+ return IsCompressed();
+ } else {
+ return IsTagged();
+ }
}
constexpr bool IsCompressedTaggedSigned() const {
-#ifdef V8_COMPRESS_POINTERS
- return IsCompressedSigned();
-#else
- return IsTaggedSigned();
-#endif
+ if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) {
+ return IsCompressedSigned();
+ } else {
+ return IsTaggedSigned();
+ }
}
constexpr bool IsCompressedTaggedPointer() const {
-#ifdef V8_COMPRESS_POINTERS
- return IsCompressedPointer();
-#else
- return IsTaggedPointer();
-#endif
+ if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) {
+ return IsCompressedPointer();
+ } else {
+ return IsTaggedPointer();
+ }
}
static MachineType TypeForRepresentation(const MachineRepresentation& rep,
@@ -405,11 +418,11 @@ inline bool IsAnyCompressed(MachineRepresentation rep) {
}
inline bool IsAnyCompressedTagged(MachineRepresentation rep) {
-#ifdef V8_COMPRESS_POINTERS
- return IsAnyCompressed(rep);
-#else
- return IsAnyTagged(rep);
-#endif
+ if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) {
+ return IsAnyCompressed(rep);
+ } else {
+ return IsAnyTagged(rep);
+ }
}
// Gets the log2 of the element size in bytes of the machine type.
@@ -431,7 +444,6 @@ V8_EXPORT_PRIVATE inline int ElementSizeLog2Of(MachineRepresentation rep) {
case MachineRepresentation::kTaggedSigned:
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTagged:
- return kSystemPointerSizeLog2;
case MachineRepresentation::kCompressedSigned:
case MachineRepresentation::kCompressedPointer:
case MachineRepresentation::kCompressed:
diff --git a/deps/v8/src/codegen/mips/assembler-mips-inl.h b/deps/v8/src/codegen/mips/assembler-mips-inl.h
index d8181ad8f5..53e6f93411 100644
--- a/deps/v8/src/codegen/mips/assembler-mips-inl.h
+++ b/deps/v8/src/codegen/mips/assembler-mips-inl.h
@@ -133,7 +133,7 @@ void Assembler::set_target_internal_reference_encoded_at(Address pc,
if (Assembler::IsJicOrJialc(instr2)) {
// Encoded internal references are lui/jic load of 32-bit absolute address.
uint32_t lui_offset_u, jic_offset_u;
- Assembler::UnpackTargetAddressUnsigned(imm, lui_offset_u, jic_offset_u);
+ Assembler::UnpackTargetAddressUnsigned(imm, &lui_offset_u, &jic_offset_u);
Assembler::instr_at_put(pc + 0 * kInstrSize, instr1 | lui_offset_u);
Assembler::instr_at_put(pc + 1 * kInstrSize, instr2 | jic_offset_u);
@@ -183,7 +183,8 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target,
DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_));
Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
icache_flush_mode);
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) {
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() &&
+ !FLAG_disable_write_barriers) {
WriteBarrierForCode(host(), this, target);
}
}
diff --git a/deps/v8/src/codegen/mips/assembler-mips.cc b/deps/v8/src/codegen/mips/assembler-mips.cc
index 423da2fb65..768b16b86c 100644
--- a/deps/v8/src/codegen/mips/assembler-mips.cc
+++ b/deps/v8/src/codegen/mips/assembler-mips.cc
@@ -231,8 +231,8 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
Handle<HeapObject> object;
switch (request.kind()) {
case HeapObjectRequest::kHeapNumber:
- object = isolate->factory()->NewHeapNumber(request.heap_number(),
- AllocationType::kOld);
+ object = isolate->factory()->NewHeapNumber<AllocationType::kOld>(
+ request.heap_number());
break;
case HeapObjectRequest::kStringConstant:
const StringConstantBase* str = request.string();
@@ -742,27 +742,27 @@ uint32_t Assembler::CreateTargetAddress(Instr instr_lui, Instr instr_jic) {
// before that addition, difference between upper part of the target address and
// upper part of the sign-extended offset (0xFFFF or 0x0000), will be inserted
// in jic register with lui instruction.
-void Assembler::UnpackTargetAddress(uint32_t address, int16_t& lui_offset,
- int16_t& jic_offset) {
- lui_offset = (address & kHiMask) >> kLuiShift;
- jic_offset = address & kLoMask;
+void Assembler::UnpackTargetAddress(uint32_t address, int16_t* lui_offset,
+ int16_t* jic_offset) {
+ *lui_offset = (address & kHiMask) >> kLuiShift;
+ *jic_offset = address & kLoMask;
- if (jic_offset < 0) {
- lui_offset -= kImm16Mask;
+ if (*jic_offset < 0) {
+ *lui_offset -= kImm16Mask;
}
}
void Assembler::UnpackTargetAddressUnsigned(uint32_t address,
- uint32_t& lui_offset,
- uint32_t& jic_offset) {
+ uint32_t* lui_offset,
+ uint32_t* jic_offset) {
int16_t lui_offset16 = (address & kHiMask) >> kLuiShift;
int16_t jic_offset16 = address & kLoMask;
if (jic_offset16 < 0) {
lui_offset16 -= kImm16Mask;
}
- lui_offset = static_cast<uint32_t>(lui_offset16) & kImm16Mask;
- jic_offset = static_cast<uint32_t>(jic_offset16) & kImm16Mask;
+ *lui_offset = static_cast<uint32_t>(lui_offset16) & kImm16Mask;
+ *jic_offset = static_cast<uint32_t>(jic_offset16) & kImm16Mask;
}
void Assembler::PatchLuiOriImmediate(int pc, int32_t imm, Instr instr_lui,
@@ -977,7 +977,7 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos,
if (IsJicOrJialc(instr2)) {
uint32_t lui_offset_u, jic_offset_u;
- UnpackTargetAddressUnsigned(imm, lui_offset_u, jic_offset_u);
+ UnpackTargetAddressUnsigned(imm, &lui_offset_u, &jic_offset_u);
instr_at_put(pos + 0 * kInstrSize, instr1 | lui_offset_u);
instr_at_put(pos + 1 * kInstrSize, instr2 | jic_offset_u);
} else {
@@ -1928,7 +1928,7 @@ void Assembler::lsa(Register rd, Register rt, Register rs, uint8_t sa) {
// ------------Memory-instructions-------------
-void Assembler::AdjustBaseAndOffset(MemOperand& src,
+void Assembler::AdjustBaseAndOffset(MemOperand* src,
OffsetAccessType access_type,
int second_access_add_to_offset) {
// This method is used to adjust the base register and offset pair
@@ -1941,26 +1941,26 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src,
// pointer register).
// We preserve the "alignment" of 'offset' by adjusting it by a multiple of 8.
- bool doubleword_aligned = (src.offset() & (kDoubleSize - 1)) == 0;
+ bool doubleword_aligned = (src->offset() & (kDoubleSize - 1)) == 0;
bool two_accesses = static_cast<bool>(access_type) || !doubleword_aligned;
DCHECK_LE(second_access_add_to_offset, 7); // Must be <= 7.
// is_int16 must be passed a signed value, hence the static cast below.
- if (is_int16(src.offset()) &&
+ if (is_int16(src->offset()) &&
(!two_accesses || is_int16(static_cast<int32_t>(
- src.offset() + second_access_add_to_offset)))) {
+ src->offset() + second_access_add_to_offset)))) {
// Nothing to do: 'offset' (and, if needed, 'offset + 4', or other specified
// value) fits into int16_t.
return;
}
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(src.rm() != scratch); // Must not overwrite the register 'base'
- // while loading 'offset'.
+ DCHECK(src->rm() != scratch); // Must not overwrite the register 'base'
+ // while loading 'offset'.
#ifdef DEBUG
// Remember the "(mis)alignment" of 'offset', it will be checked at the end.
- uint32_t misalignment = src.offset() & (kDoubleSize - 1);
+ uint32_t misalignment = src->offset() & (kDoubleSize - 1);
#endif
// Do not load the whole 32-bit 'offset' if it can be represented as
@@ -1972,13 +1972,13 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src,
0x7FF8; // Max int16_t that's a multiple of 8.
constexpr int32_t kMaxOffsetForSimpleAdjustment =
2 * kMinOffsetForSimpleAdjustment;
- if (0 <= src.offset() && src.offset() <= kMaxOffsetForSimpleAdjustment) {
- addiu(at, src.rm(), kMinOffsetForSimpleAdjustment);
- src.offset_ -= kMinOffsetForSimpleAdjustment;
- } else if (-kMaxOffsetForSimpleAdjustment <= src.offset() &&
- src.offset() < 0) {
- addiu(at, src.rm(), -kMinOffsetForSimpleAdjustment);
- src.offset_ += kMinOffsetForSimpleAdjustment;
+ if (0 <= src->offset() && src->offset() <= kMaxOffsetForSimpleAdjustment) {
+ addiu(at, src->rm(), kMinOffsetForSimpleAdjustment);
+ src->offset_ -= kMinOffsetForSimpleAdjustment;
+ } else if (-kMaxOffsetForSimpleAdjustment <= src->offset() &&
+ src->offset() < 0) {
+ addiu(at, src->rm(), -kMinOffsetForSimpleAdjustment);
+ src->offset_ += kMinOffsetForSimpleAdjustment;
} else if (IsMipsArchVariant(kMips32r6)) {
// On r6 take advantage of the aui instruction, e.g.:
// aui at, base, offset_high
@@ -1989,12 +1989,12 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src,
// addiu at, at, 8
// lw reg_lo, (offset_low-8)(at)
// lw reg_hi, (offset_low-4)(at)
- int16_t offset_high = static_cast<uint16_t>(src.offset() >> 16);
- int16_t offset_low = static_cast<uint16_t>(src.offset());
+ int16_t offset_high = static_cast<uint16_t>(src->offset() >> 16);
+ int16_t offset_low = static_cast<uint16_t>(src->offset());
offset_high += (offset_low < 0)
? 1
: 0; // Account for offset sign extension in load/store.
- aui(scratch, src.rm(), static_cast<uint16_t>(offset_high));
+ aui(scratch, src->rm(), static_cast<uint16_t>(offset_high));
if (two_accesses && !is_int16(static_cast<int32_t>(
offset_low + second_access_add_to_offset))) {
// Avoid overflow in the 16-bit offset of the load/store instruction when
@@ -2002,7 +2002,7 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src,
addiu(scratch, scratch, kDoubleSize);
offset_low -= kDoubleSize;
}
- src.offset_ = offset_low;
+ src->offset_ = offset_low;
} else {
// Do not load the whole 32-bit 'offset' if it can be represented as
// a sum of three 16-bit signed offsets. This can save an instruction.
@@ -2013,62 +2013,62 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src,
2 * kMinOffsetForSimpleAdjustment;
constexpr int32_t kMaxOffsetForMediumAdjustment =
3 * kMinOffsetForSimpleAdjustment;
- if (0 <= src.offset() && src.offset() <= kMaxOffsetForMediumAdjustment) {
- addiu(scratch, src.rm(), kMinOffsetForMediumAdjustment / 2);
+ if (0 <= src->offset() && src->offset() <= kMaxOffsetForMediumAdjustment) {
+ addiu(scratch, src->rm(), kMinOffsetForMediumAdjustment / 2);
addiu(scratch, scratch, kMinOffsetForMediumAdjustment / 2);
- src.offset_ -= kMinOffsetForMediumAdjustment;
- } else if (-kMaxOffsetForMediumAdjustment <= src.offset() &&
- src.offset() < 0) {
- addiu(scratch, src.rm(), -kMinOffsetForMediumAdjustment / 2);
+ src->offset_ -= kMinOffsetForMediumAdjustment;
+ } else if (-kMaxOffsetForMediumAdjustment <= src->offset() &&
+ src->offset() < 0) {
+ addiu(scratch, src->rm(), -kMinOffsetForMediumAdjustment / 2);
addiu(scratch, scratch, -kMinOffsetForMediumAdjustment / 2);
- src.offset_ += kMinOffsetForMediumAdjustment;
+ src->offset_ += kMinOffsetForMediumAdjustment;
} else {
// Now that all shorter options have been exhausted, load the full 32-bit
// offset.
- int32_t loaded_offset = RoundDown(src.offset(), kDoubleSize);
+ int32_t loaded_offset = RoundDown(src->offset(), kDoubleSize);
lui(scratch, (loaded_offset >> kLuiShift) & kImm16Mask);
ori(scratch, scratch, loaded_offset & kImm16Mask); // Load 32-bit offset.
- addu(scratch, scratch, src.rm());
- src.offset_ -= loaded_offset;
+ addu(scratch, scratch, src->rm());
+ src->offset_ -= loaded_offset;
}
}
- src.rm_ = scratch;
+ src->rm_ = scratch;
- DCHECK(is_int16(src.offset()));
+ DCHECK(is_int16(src->offset()));
if (two_accesses) {
DCHECK(is_int16(
- static_cast<int32_t>(src.offset() + second_access_add_to_offset)));
+ static_cast<int32_t>(src->offset() + second_access_add_to_offset)));
}
- DCHECK(misalignment == (src.offset() & (kDoubleSize - 1)));
+ DCHECK(misalignment == (src->offset() & (kDoubleSize - 1)));
}
void Assembler::lb(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
GenInstrImmediate(LB, source.rm(), rd, source.offset());
}
void Assembler::lbu(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
GenInstrImmediate(LBU, source.rm(), rd, source.offset());
}
void Assembler::lh(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
GenInstrImmediate(LH, source.rm(), rd, source.offset());
}
void Assembler::lhu(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
GenInstrImmediate(LHU, source.rm(), rd, source.offset());
}
void Assembler::lw(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
GenInstrImmediate(LW, source.rm(), rd, source.offset());
}
@@ -2088,19 +2088,19 @@ void Assembler::lwr(Register rd, const MemOperand& rs) {
void Assembler::sb(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
GenInstrImmediate(SB, source.rm(), rd, source.offset());
}
void Assembler::sh(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
GenInstrImmediate(SH, source.rm(), rd, source.offset());
}
void Assembler::sw(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
GenInstrImmediate(SW, source.rm(), rd, source.offset());
}
@@ -2385,13 +2385,13 @@ void Assembler::seb(Register rd, Register rt) {
// Load, store, move.
void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
MemOperand tmp = src;
- AdjustBaseAndOffset(tmp);
+ AdjustBaseAndOffset(&tmp);
GenInstrImmediate(LWC1, tmp.rm(), fd, tmp.offset());
}
void Assembler::swc1(FPURegister fd, const MemOperand& src) {
MemOperand tmp = src;
- AdjustBaseAndOffset(tmp);
+ AdjustBaseAndOffset(&tmp);
GenInstrImmediate(SWC1, tmp.rm(), fd, tmp.offset());
}
@@ -2969,7 +2969,7 @@ MSA_BRANCH_LIST(MSA_BRANCH)
#define MSA_LD_ST(name, opcode) \
void Assembler::name(MSARegister wd, const MemOperand& rs) { \
MemOperand source = rs; \
- AdjustBaseAndOffset(source); \
+ AdjustBaseAndOffset(&source); \
if (is_int10(source.offset())) { \
GenInstrMsaMI10(opcode, source.offset(), source.rm(), wd); \
} else { \
@@ -3473,7 +3473,8 @@ int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
if (IsJicOrJialc(instr2)) {
uint32_t lui_offset_u, jic_offset_u;
- Assembler::UnpackTargetAddressUnsigned(imm, lui_offset_u, jic_offset_u);
+ Assembler::UnpackTargetAddressUnsigned(imm,
+ &lui_offset_u, &jic_offset_u);
instr_at_put(pc + 0 * kInstrSize, instr1 | lui_offset_u);
instr_at_put(pc + 1 * kInstrSize, instr2 | jic_offset_u);
} else {
@@ -3717,7 +3718,7 @@ void Assembler::set_target_value_at(Address pc, uint32_t target,
if (IsJicOrJialc(instr2)) {
// Must use 2 instructions to insure patchable code => use lui and jic
uint32_t lui_offset, jic_offset;
- Assembler::UnpackTargetAddressUnsigned(target, lui_offset, jic_offset);
+ Assembler::UnpackTargetAddressUnsigned(target, &lui_offset, &jic_offset);
instr1 &= ~kImm16Mask;
instr2 &= ~kImm16Mask;
diff --git a/deps/v8/src/codegen/mips/assembler-mips.h b/deps/v8/src/codegen/mips/assembler-mips.h
index 0359be2c94..d8cb8ec3f2 100644
--- a/deps/v8/src/codegen/mips/assembler-mips.h
+++ b/deps/v8/src/codegen/mips/assembler-mips.h
@@ -36,6 +36,7 @@
#define V8_CODEGEN_MIPS_ASSEMBLER_MIPS_H_
#include <stdio.h>
+#include <memory>
#include <set>
@@ -1478,13 +1479,11 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static bool IsAddImmediate(Instr instr);
static Instr SetAddImmediateOffset(Instr instr, int16_t offset);
static uint32_t CreateTargetAddress(Instr instr_lui, Instr instr_jic);
- static void UnpackTargetAddress(
- uint32_t address, int16_t& lui_offset, // NOLINT(runtime/references)
- int16_t& jic_offset); // NOLINT(runtime/references)
- static void UnpackTargetAddressUnsigned(
- uint32_t address,
- uint32_t& lui_offset, // NOLINT(runtime/references)
- uint32_t& jic_offset); // NOLINT(runtime/references)
+ static void UnpackTargetAddress(uint32_t address, int16_t* lui_offset,
+ int16_t* jic_offset);
+ static void UnpackTargetAddressUnsigned(uint32_t address,
+ uint32_t* lui_offset,
+ uint32_t* jic_offset);
static bool IsAndImmediate(Instr instr);
static bool IsEmittedConstant(Instr instr);
@@ -1515,7 +1514,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Helper function for memory load/store using base register and offset.
void AdjustBaseAndOffset(
- MemOperand& src, // NOLINT(runtime/references)
+ MemOperand* src,
OffsetAccessType access_type = OffsetAccessType::SINGLE_ACCESS,
int second_access_add_to_offset = 4);
diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.cc b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
index 2e4698a9e7..760d33d7c9 100644
--- a/deps/v8/src/codegen/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
@@ -1063,7 +1063,7 @@ void TurboAssembler::Ulw(Register rd, const MemOperand& rs) {
DCHECK(kMipsLwrOffset <= 3 && kMipsLwlOffset <= 3);
MemOperand source = rs;
// Adjust offset for two accesses and check if offset + 3 fits into int16_t.
- AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 3);
+ AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 3);
if (rd != source.rm()) {
lwr(rd, MemOperand(source.rm(), source.offset() + kMipsLwrOffset));
lwl(rd, MemOperand(source.rm(), source.offset() + kMipsLwlOffset));
@@ -1089,7 +1089,7 @@ void TurboAssembler::Usw(Register rd, const MemOperand& rs) {
DCHECK(kMipsSwrOffset <= 3 && kMipsSwlOffset <= 3);
MemOperand source = rs;
// Adjust offset for two accesses and check if offset + 3 fits into int16_t.
- AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 3);
+ AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 3);
swr(rd, MemOperand(source.rm(), source.offset() + kMipsSwrOffset));
swl(rd, MemOperand(source.rm(), source.offset() + kMipsSwlOffset));
}
@@ -1105,7 +1105,7 @@ void TurboAssembler::Ulh(Register rd, const MemOperand& rs) {
IsMipsArchVariant(kLoongson));
MemOperand source = rs;
// Adjust offset for two accesses and check if offset + 1 fits into int16_t.
- AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1);
+ AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 1);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
if (source.rm() == scratch) {
@@ -1140,7 +1140,7 @@ void TurboAssembler::Ulhu(Register rd, const MemOperand& rs) {
IsMipsArchVariant(kLoongson));
MemOperand source = rs;
// Adjust offset for two accesses and check if offset + 1 fits into int16_t.
- AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1);
+ AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 1);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
if (source.rm() == scratch) {
@@ -1177,7 +1177,7 @@ void TurboAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
IsMipsArchVariant(kLoongson));
MemOperand source = rs;
// Adjust offset for two accesses and check if offset + 1 fits into int16_t.
- AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1);
+ AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 1);
if (scratch != rd) {
mov(scratch, rd);
@@ -1256,7 +1256,7 @@ void TurboAssembler::Ldc1(FPURegister fd, const MemOperand& src) {
BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(Register::kMantissaOffset <= 4 && Register::kExponentOffset <= 4);
MemOperand tmp = src;
- AdjustBaseAndOffset(tmp, OffsetAccessType::TWO_ACCESSES);
+ AdjustBaseAndOffset(&tmp, OffsetAccessType::TWO_ACCESSES);
lwc1(fd, MemOperand(tmp.rm(), tmp.offset() + Register::kMantissaOffset));
if (IsFp32Mode()) { // fp32 mode.
FPURegister nextfpreg = FPURegister::from_code(fd.code() + 1);
@@ -1284,7 +1284,7 @@ void TurboAssembler::Sdc1(FPURegister fd, const MemOperand& src) {
BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(Register::kMantissaOffset <= 4 && Register::kExponentOffset <= 4);
MemOperand tmp = src;
- AdjustBaseAndOffset(tmp, OffsetAccessType::TWO_ACCESSES);
+ AdjustBaseAndOffset(&tmp, OffsetAccessType::TWO_ACCESSES);
swc1(fd, MemOperand(tmp.rm(), tmp.offset() + Register::kMantissaOffset));
if (IsFp32Mode()) { // fp32 mode.
FPURegister nextfpreg = FPURegister::from_code(fd.code() + 1);
@@ -1305,13 +1305,13 @@ void TurboAssembler::Sdc1(FPURegister fd, const MemOperand& src) {
void TurboAssembler::Lw(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
lw(rd, source);
}
void TurboAssembler::Sw(Register rd, const MemOperand& rs) {
MemOperand dest = rs;
- AdjustBaseAndOffset(dest);
+ AdjustBaseAndOffset(&dest);
sw(rd, dest);
}
@@ -2926,18 +2926,18 @@ Register TurboAssembler::GetRtAsRegisterHelper(const Operand& rt,
return r2;
}
-bool TurboAssembler::CalculateOffset(Label* L, int32_t& offset,
+bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset,
OffsetSize bits) {
if (!is_near(L, bits)) return false;
- offset = GetOffset(offset, L, bits);
+ *offset = GetOffset(*offset, L, bits);
return true;
}
-bool TurboAssembler::CalculateOffset(Label* L, int32_t& offset, OffsetSize bits,
- Register& scratch, const Operand& rt) {
+bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
+ Register* scratch, const Operand& rt) {
if (!is_near(L, bits)) return false;
- scratch = GetRtAsRegisterHelper(rt, scratch);
- offset = GetOffset(offset, L, bits);
+ *scratch = GetRtAsRegisterHelper(rt, *scratch);
+ *offset = GetOffset(*offset, L, bits);
return true;
}
@@ -2955,23 +2955,23 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
BlockTrampolinePoolScope block_trampoline_pool(this);
switch (cond) {
case cc_always:
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
bc(offset);
break;
case eq:
if (rt.is_reg() && rs.code() == rt.rm().code()) {
// Pre R6 beq is used here to make the code patchable. Otherwise bc
// should be used which has no condition field so is not patchable.
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
beq(rs, scratch, offset);
nop();
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset21)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
beqzc(rs, offset);
} else {
// We don't want any other register but scratch clobbered.
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
beqc(rs, scratch, offset);
}
@@ -2980,16 +2980,16 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
if (rt.is_reg() && rs.code() == rt.rm().code()) {
// Pre R6 bne is used here to make the code patchable. Otherwise we
// should not generate any instruction.
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
bne(rs, scratch, offset);
nop();
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset21)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
bnezc(rs, offset);
} else {
// We don't want any other register but scratch clobbered.
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
bnec(rs, scratch, offset);
}
@@ -3001,14 +3001,14 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
if (rt.is_reg() && rs.code() == rt.rm().code()) {
break; // No code needs to be emitted.
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
bltzc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
bgtzc(rs, offset);
} else {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
DCHECK(rs != scratch);
bltc(scratch, rs, offset);
@@ -3017,17 +3017,17 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
case greater_equal:
// rs >= rt
if (rt.is_reg() && rs.code() == rt.rm().code()) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
bc(offset);
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
blezc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
bgezc(rs, offset);
} else {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
DCHECK(rs != scratch);
bgec(rs, scratch, offset);
@@ -3038,14 +3038,14 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
if (rt.is_reg() && rs.code() == rt.rm().code()) {
break; // No code needs to be emitted.
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
bgtzc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
bltzc(rs, offset);
} else {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
DCHECK(rs != scratch);
bltc(rs, scratch, offset);
@@ -3054,17 +3054,17 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
case less_equal:
// rs <= rt
if (rt.is_reg() && rs.code() == rt.rm().code()) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
bc(offset);
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
bgezc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
blezc(rs, offset);
} else {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
DCHECK(rs != scratch);
bgec(scratch, rs, offset);
@@ -3077,14 +3077,14 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
if (rt.is_reg() && rs.code() == rt.rm().code()) {
break; // No code needs to be emitted.
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset21, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21, &scratch, rt))
return false;
bnezc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset21)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
bnezc(rs, offset);
} else {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
DCHECK(rs != scratch);
bltuc(scratch, rs, offset);
@@ -3093,17 +3093,17 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
case Ugreater_equal:
// rs >= rt
if (rt.is_reg() && rs.code() == rt.rm().code()) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
bc(offset);
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset21, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21, &scratch, rt))
return false;
beqzc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
bc(offset);
} else {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
DCHECK(rs != scratch);
bgeuc(rs, scratch, offset);
@@ -3114,13 +3114,13 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
if (rt.is_reg() && rs.code() == rt.rm().code()) {
break; // No code needs to be emitted.
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset21, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21, &scratch, rt))
return false;
bnezc(scratch, offset);
} else if (IsZero(rt)) {
break; // No code needs to be emitted.
} else {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
DCHECK(rs != scratch);
bltuc(rs, scratch, offset);
@@ -3129,17 +3129,17 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
case Uless_equal:
// rs <= rt
if (rt.is_reg() && rs.code() == rt.rm().code()) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
bc(offset);
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26, &scratch, rt))
return false;
bc(offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset21)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
beqzc(rs, offset);
} else {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
DCHECK(rs != scratch);
bgeuc(scratch, rs, offset);
@@ -3418,7 +3418,7 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
DCHECK((cond == cc_always && is_int26(offset)) || is_int16(offset));
switch (cond) {
case cc_always:
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
balc(offset);
break;
case eq:
@@ -3440,11 +3440,11 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
if (rs.code() == rt.rm().code()) {
break; // No code needs to be emitted.
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
bltzalc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
bgtzalc(rs, offset);
} else {
if (!is_near(L, bits)) return false;
@@ -3456,14 +3456,14 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
case greater_equal:
// rs >= rt
if (rs.code() == rt.rm().code()) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
balc(offset);
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
blezalc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
bgezalc(rs, offset);
} else {
if (!is_near(L, bits)) return false;
@@ -3477,11 +3477,11 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
if (rs.code() == rt.rm().code()) {
break; // No code needs to be emitted.
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
bgtzalc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
bltzalc(rs, offset);
} else {
if (!is_near(L, bits)) return false;
@@ -3493,14 +3493,14 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
case less_equal:
// rs <= r2
if (rs.code() == rt.rm().code()) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
balc(offset);
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
bgezalc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
blezalc(rs, offset);
} else {
if (!is_near(L, bits)) return false;
@@ -3751,8 +3751,8 @@ void TurboAssembler::Jump(Register target, const Operand& offset,
if (IsMipsArchVariant(kMips32r6) && bd == PROTECT &&
!is_int16(offset.immediate())) {
uint32_t aui_offset, jic_offset;
- Assembler::UnpackTargetAddressUnsigned(offset.immediate(), aui_offset,
- jic_offset);
+ Assembler::UnpackTargetAddressUnsigned(offset.immediate(), &aui_offset,
+ &jic_offset);
RecordRelocInfo(RelocInfo::EXTERNAL_REFERENCE, offset.immediate());
aui(target, target, aui_offset);
if (cond == cc_always) {
@@ -3790,7 +3790,7 @@ void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
// This is not an issue, t9 is expected to be clobbered anyway.
if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) {
uint32_t lui_offset, jic_offset;
- UnpackTargetAddressUnsigned(target, lui_offset, jic_offset);
+ UnpackTargetAddressUnsigned(target, &lui_offset, &jic_offset);
if (MustUseReg(rmode)) {
RecordRelocInfo(rmode, target);
}
@@ -3853,10 +3853,8 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
}
void TurboAssembler::Jump(const ExternalReference& reference) {
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
- li(scratch, reference);
- Jump(scratch);
+ li(t9, reference);
+ Jump(t9);
}
void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
@@ -3940,7 +3938,7 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
int32_t target_int = static_cast<int32_t>(target);
if (IsMipsArchVariant(kMips32r6) && bd == PROTECT && cond == cc_always) {
uint32_t lui_offset, jialc_offset;
- UnpackTargetAddressUnsigned(target_int, lui_offset, jialc_offset);
+ UnpackTargetAddressUnsigned(target_int, &lui_offset, &jialc_offset);
if (MustUseReg(rmode)) {
RecordRelocInfo(rmode, target_int);
}
@@ -3990,7 +3988,6 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
}
}
DCHECK(RelocInfo::IsCodeTarget(rmode));
- AllowDeferredHandleDereference embedding_raw_address;
Call(code.address(), rmode, cond, rs, rt, bd);
}
diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.h b/deps/v8/src/codegen/mips/macro-assembler-mips.h
index d9c372f868..e82c88f0b5 100644
--- a/deps/v8/src/codegen/mips/macro-assembler-mips.h
+++ b/deps/v8/src/codegen/mips/macro-assembler-mips.h
@@ -849,12 +849,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void BranchShortMSA(MSABranchDF df, Label* target, MSABranchCondition cond,
MSARegister wt, BranchDelaySlot bd = PROTECT);
- bool CalculateOffset(Label* L, int32_t& offset, // NOLINT(runtime/references)
- OffsetSize bits);
- bool CalculateOffset(Label* L, int32_t& offset, // NOLINT(runtime/references)
- OffsetSize bits,
- Register& scratch, // NOLINT(runtime/references)
- const Operand& rt);
+ // TODO(mips) Reorder parameters so out parameters come last.
+ bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits);
+ bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
+ Register* scratch, const Operand& rt);
void BranchShortHelperR6(int32_t offset, Label* L);
void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot);
diff --git a/deps/v8/src/codegen/mips64/assembler-mips64-inl.h b/deps/v8/src/codegen/mips64/assembler-mips64-inl.h
index 7b9946d16e..cacdbd8f8b 100644
--- a/deps/v8/src/codegen/mips64/assembler-mips64-inl.h
+++ b/deps/v8/src/codegen/mips64/assembler-mips64-inl.h
@@ -159,7 +159,8 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target,
DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_));
Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
icache_flush_mode);
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) {
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() &&
+ !FLAG_disable_write_barriers) {
WriteBarrierForCode(host(), this, target);
}
}
diff --git a/deps/v8/src/codegen/mips64/assembler-mips64.cc b/deps/v8/src/codegen/mips64/assembler-mips64.cc
index 801faf6306..37a05585c4 100644
--- a/deps/v8/src/codegen/mips64/assembler-mips64.cc
+++ b/deps/v8/src/codegen/mips64/assembler-mips64.cc
@@ -207,8 +207,8 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
Handle<HeapObject> object;
switch (request.kind()) {
case HeapObjectRequest::kHeapNumber:
- object = isolate->factory()->NewHeapNumber(request.heap_number(),
- AllocationType::kOld);
+ object = isolate->factory()->NewHeapNumber<AllocationType::kOld>(
+ request.heap_number());
break;
case HeapObjectRequest::kStringConstant:
const StringConstantBase* str = request.string();
@@ -1996,7 +1996,7 @@ void Assembler::dlsa(Register rd, Register rt, Register rs, uint8_t sa) {
// ------------Memory-instructions-------------
-void Assembler::AdjustBaseAndOffset(MemOperand& src,
+void Assembler::AdjustBaseAndOffset(MemOperand* src,
OffsetAccessType access_type,
int second_access_add_to_offset) {
// This method is used to adjust the base register and offset pair
@@ -2009,25 +2009,25 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src,
// pointer register).
// We preserve the "alignment" of 'offset' by adjusting it by a multiple of 8.
- bool doubleword_aligned = (src.offset() & (kDoubleSize - 1)) == 0;
+ bool doubleword_aligned = (src->offset() & (kDoubleSize - 1)) == 0;
bool two_accesses = static_cast<bool>(access_type) || !doubleword_aligned;
DCHECK_LE(second_access_add_to_offset, 7); // Must be <= 7.
// is_int16 must be passed a signed value, hence the static cast below.
- if (is_int16(src.offset()) &&
+ if (is_int16(src->offset()) &&
(!two_accesses || is_int16(static_cast<int32_t>(
- src.offset() + second_access_add_to_offset)))) {
+ src->offset() + second_access_add_to_offset)))) {
// Nothing to do: 'offset' (and, if needed, 'offset + 4', or other specified
// value) fits into int16_t.
return;
}
- DCHECK(src.rm() !=
+ DCHECK(src->rm() !=
at); // Must not overwrite the register 'base' while loading 'offset'.
#ifdef DEBUG
// Remember the "(mis)alignment" of 'offset', it will be checked at the end.
- uint32_t misalignment = src.offset() & (kDoubleSize - 1);
+ uint32_t misalignment = src->offset() & (kDoubleSize - 1);
#endif
// Do not load the whole 32-bit 'offset' if it can be represented as
@@ -2042,13 +2042,13 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src,
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- if (0 <= src.offset() && src.offset() <= kMaxOffsetForSimpleAdjustment) {
- daddiu(scratch, src.rm(), kMinOffsetForSimpleAdjustment);
- src.offset_ -= kMinOffsetForSimpleAdjustment;
- } else if (-kMaxOffsetForSimpleAdjustment <= src.offset() &&
- src.offset() < 0) {
- daddiu(scratch, src.rm(), -kMinOffsetForSimpleAdjustment);
- src.offset_ += kMinOffsetForSimpleAdjustment;
+ if (0 <= src->offset() && src->offset() <= kMaxOffsetForSimpleAdjustment) {
+ daddiu(scratch, src->rm(), kMinOffsetForSimpleAdjustment);
+ src->offset_ -= kMinOffsetForSimpleAdjustment;
+ } else if (-kMaxOffsetForSimpleAdjustment <= src->offset() &&
+ src->offset() < 0) {
+ daddiu(scratch, src->rm(), -kMinOffsetForSimpleAdjustment);
+ src->offset_ += kMinOffsetForSimpleAdjustment;
} else if (kArchVariant == kMips64r6) {
// On r6 take advantage of the daui instruction, e.g.:
// daui at, base, offset_high
@@ -2060,9 +2060,9 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src,
// daddiu at, at, 8
// lw reg_lo, (offset_low-8)(at)
// lw reg_hi, (offset_low-4)(at)
- int16_t offset_low = static_cast<uint16_t>(src.offset());
+ int16_t offset_low = static_cast<uint16_t>(src->offset());
int32_t offset_low32 = offset_low;
- int16_t offset_high = static_cast<uint16_t>(src.offset() >> 16);
+ int16_t offset_high = static_cast<uint16_t>(src->offset() >> 16);
bool increment_hi16 = offset_low < 0;
bool overflow_hi16 = false;
@@ -2070,7 +2070,7 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src,
offset_high++;
overflow_hi16 = (offset_high == -32768);
}
- daui(scratch, src.rm(), static_cast<uint16_t>(offset_high));
+ daui(scratch, src->rm(), static_cast<uint16_t>(offset_high));
if (overflow_hi16) {
dahi(scratch, 1);
@@ -2084,7 +2084,7 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src,
offset_low32 -= kDoubleSize;
}
- src.offset_ = offset_low32;
+ src->offset_ = offset_low32;
} else {
// Do not load the whole 32-bit 'offset' if it can be represented as
// a sum of three 16-bit signed offsets. This can save an instruction.
@@ -2095,33 +2095,33 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src,
2 * kMinOffsetForSimpleAdjustment;
constexpr int32_t kMaxOffsetForMediumAdjustment =
3 * kMinOffsetForSimpleAdjustment;
- if (0 <= src.offset() && src.offset() <= kMaxOffsetForMediumAdjustment) {
- daddiu(scratch, src.rm(), kMinOffsetForMediumAdjustment / 2);
+ if (0 <= src->offset() && src->offset() <= kMaxOffsetForMediumAdjustment) {
+ daddiu(scratch, src->rm(), kMinOffsetForMediumAdjustment / 2);
daddiu(scratch, scratch, kMinOffsetForMediumAdjustment / 2);
- src.offset_ -= kMinOffsetForMediumAdjustment;
- } else if (-kMaxOffsetForMediumAdjustment <= src.offset() &&
- src.offset() < 0) {
- daddiu(scratch, src.rm(), -kMinOffsetForMediumAdjustment / 2);
+ src->offset_ -= kMinOffsetForMediumAdjustment;
+ } else if (-kMaxOffsetForMediumAdjustment <= src->offset() &&
+ src->offset() < 0) {
+ daddiu(scratch, src->rm(), -kMinOffsetForMediumAdjustment / 2);
daddiu(scratch, scratch, -kMinOffsetForMediumAdjustment / 2);
- src.offset_ += kMinOffsetForMediumAdjustment;
+ src->offset_ += kMinOffsetForMediumAdjustment;
} else {
// Now that all shorter options have been exhausted, load the full 32-bit
// offset.
- int32_t loaded_offset = RoundDown(src.offset(), kDoubleSize);
+ int32_t loaded_offset = RoundDown(src->offset(), kDoubleSize);
lui(scratch, (loaded_offset >> kLuiShift) & kImm16Mask);
ori(scratch, scratch, loaded_offset & kImm16Mask); // Load 32-bit offset.
- daddu(scratch, scratch, src.rm());
- src.offset_ -= loaded_offset;
+ daddu(scratch, scratch, src->rm());
+ src->offset_ -= loaded_offset;
}
}
- src.rm_ = scratch;
+ src->rm_ = scratch;
- DCHECK(is_int16(src.offset()));
+ DCHECK(is_int16(src->offset()));
if (two_accesses) {
DCHECK(is_int16(
- static_cast<int32_t>(src.offset() + second_access_add_to_offset)));
+ static_cast<int32_t>(src->offset() + second_access_add_to_offset)));
}
- DCHECK(misalignment == (src.offset() & (kDoubleSize - 1)));
+ DCHECK(misalignment == (src->offset() & (kDoubleSize - 1)));
}
void Assembler::lb(Register rd, const MemOperand& rs) {
@@ -3169,7 +3169,7 @@ MSA_BRANCH_LIST(MSA_BRANCH)
#define MSA_LD_ST(name, opcode) \
void Assembler::name(MSARegister wd, const MemOperand& rs) { \
MemOperand source = rs; \
- AdjustBaseAndOffset(source); \
+ AdjustBaseAndOffset(&source); \
if (is_int10(source.offset())) { \
GenInstrMsaMI10(opcode, source.offset(), source.rm(), wd); \
} else { \
diff --git a/deps/v8/src/codegen/mips64/assembler-mips64.h b/deps/v8/src/codegen/mips64/assembler-mips64.h
index 9695aa6524..48733eebea 100644
--- a/deps/v8/src/codegen/mips64/assembler-mips64.h
+++ b/deps/v8/src/codegen/mips64/assembler-mips64.h
@@ -36,7 +36,7 @@
#define V8_CODEGEN_MIPS64_ASSEMBLER_MIPS64_H_
#include <stdio.h>
-
+#include <memory>
#include <set>
#include "src/codegen/assembler.h"
@@ -1560,7 +1560,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Helper function for memory load/store using base register and offset.
void AdjustBaseAndOffset(
- MemOperand& src, // NOLINT(runtime/references)
+ MemOperand* src,
OffsetAccessType access_type = OffsetAccessType::SINGLE_ACCESS,
int second_access_add_to_offset = 4);
diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
index b353786064..2ea770d224 100644
--- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
@@ -1166,7 +1166,7 @@ void TurboAssembler::Ulw(Register rd, const MemOperand& rs) {
DCHECK(kMipsLwrOffset <= 3 && kMipsLwlOffset <= 3);
MemOperand source = rs;
// Adjust offset for two accesses and check if offset + 3 fits into int16_t.
- AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 3);
+ AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 3);
if (rd != source.rm()) {
lwr(rd, MemOperand(source.rm(), source.offset() + kMipsLwrOffset));
lwl(rd, MemOperand(source.rm(), source.offset() + kMipsLwlOffset));
@@ -1201,7 +1201,7 @@ void TurboAssembler::Usw(Register rd, const MemOperand& rs) {
DCHECK(kMipsSwrOffset <= 3 && kMipsSwlOffset <= 3);
MemOperand source = rs;
// Adjust offset for two accesses and check if offset + 3 fits into int16_t.
- AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 3);
+ AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 3);
swr(rd, MemOperand(source.rm(), source.offset() + kMipsSwrOffset));
swl(rd, MemOperand(source.rm(), source.offset() + kMipsSwlOffset));
}
@@ -1216,7 +1216,7 @@ void TurboAssembler::Ulh(Register rd, const MemOperand& rs) {
DCHECK_EQ(kArchVariant, kMips64r2);
MemOperand source = rs;
// Adjust offset for two accesses and check if offset + 1 fits into int16_t.
- AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1);
+ AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 1);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
if (source.rm() == scratch) {
@@ -1250,7 +1250,7 @@ void TurboAssembler::Ulhu(Register rd, const MemOperand& rs) {
DCHECK_EQ(kArchVariant, kMips64r2);
MemOperand source = rs;
// Adjust offset for two accesses and check if offset + 1 fits into int16_t.
- AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1);
+ AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 1);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
if (source.rm() == scratch) {
@@ -1286,7 +1286,7 @@ void TurboAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
DCHECK_EQ(kArchVariant, kMips64r2);
MemOperand source = rs;
// Adjust offset for two accesses and check if offset + 1 fits into int16_t.
- AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1);
+ AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 1);
if (scratch != rd) {
mov(scratch, rd);
@@ -1314,7 +1314,7 @@ void TurboAssembler::Uld(Register rd, const MemOperand& rs) {
DCHECK(kMipsLdrOffset <= 7 && kMipsLdlOffset <= 7);
MemOperand source = rs;
// Adjust offset for two accesses and check if offset + 7 fits into int16_t.
- AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 7);
+ AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 7);
if (rd != source.rm()) {
ldr(rd, MemOperand(source.rm(), source.offset() + kMipsLdrOffset));
ldl(rd, MemOperand(source.rm(), source.offset() + kMipsLdlOffset));
@@ -1349,7 +1349,7 @@ void TurboAssembler::Usd(Register rd, const MemOperand& rs) {
DCHECK(kMipsSdrOffset <= 7 && kMipsSdlOffset <= 7);
MemOperand source = rs;
// Adjust offset for two accesses and check if offset + 7 fits into int16_t.
- AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 7);
+ AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 7);
sdr(rd, MemOperand(source.rm(), source.offset() + kMipsSdrOffset));
sdl(rd, MemOperand(source.rm(), source.offset() + kMipsSdlOffset));
}
@@ -1411,91 +1411,91 @@ void TurboAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
void TurboAssembler::Lb(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
lb(rd, source);
}
void TurboAssembler::Lbu(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
lbu(rd, source);
}
void TurboAssembler::Sb(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
sb(rd, source);
}
void TurboAssembler::Lh(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
lh(rd, source);
}
void TurboAssembler::Lhu(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
lhu(rd, source);
}
void TurboAssembler::Sh(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
sh(rd, source);
}
void TurboAssembler::Lw(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
lw(rd, source);
}
void TurboAssembler::Lwu(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
lwu(rd, source);
}
void TurboAssembler::Sw(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
sw(rd, source);
}
void TurboAssembler::Ld(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
ld(rd, source);
}
void TurboAssembler::Sd(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
sd(rd, source);
}
void TurboAssembler::Lwc1(FPURegister fd, const MemOperand& src) {
MemOperand tmp = src;
- AdjustBaseAndOffset(tmp);
+ AdjustBaseAndOffset(&tmp);
lwc1(fd, tmp);
}
void TurboAssembler::Swc1(FPURegister fs, const MemOperand& src) {
MemOperand tmp = src;
- AdjustBaseAndOffset(tmp);
+ AdjustBaseAndOffset(&tmp);
swc1(fs, tmp);
}
void TurboAssembler::Ldc1(FPURegister fd, const MemOperand& src) {
MemOperand tmp = src;
- AdjustBaseAndOffset(tmp);
+ AdjustBaseAndOffset(&tmp);
ldc1(fd, tmp);
}
void TurboAssembler::Sdc1(FPURegister fs, const MemOperand& src) {
MemOperand tmp = src;
- AdjustBaseAndOffset(tmp);
+ AdjustBaseAndOffset(&tmp);
sdc1(fs, tmp);
}
@@ -3362,18 +3362,18 @@ Register TurboAssembler::GetRtAsRegisterHelper(const Operand& rt,
return r2;
}
-bool TurboAssembler::CalculateOffset(Label* L, int32_t& offset,
+bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset,
OffsetSize bits) {
if (!is_near(L, bits)) return false;
- offset = GetOffset(offset, L, bits);
+ *offset = GetOffset(*offset, L, bits);
return true;
}
-bool TurboAssembler::CalculateOffset(Label* L, int32_t& offset, OffsetSize bits,
- Register& scratch, const Operand& rt) {
+bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
+ Register* scratch, const Operand& rt) {
if (!is_near(L, bits)) return false;
- scratch = GetRtAsRegisterHelper(rt, scratch);
- offset = GetOffset(offset, L, bits);
+ *scratch = GetRtAsRegisterHelper(rt, *scratch);
+ *offset = GetOffset(*offset, L, bits);
return true;
}
@@ -3392,23 +3392,23 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
BlockTrampolinePoolScope block_trampoline_pool(this);
switch (cond) {
case cc_always:
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
bc(offset);
break;
case eq:
if (rt.is_reg() && rs.code() == rt.rm().code()) {
// Pre R6 beq is used here to make the code patchable. Otherwise bc
// should be used which has no condition field so is not patchable.
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
beq(rs, scratch, offset);
nop();
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset21)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
beqzc(rs, offset);
} else {
// We don't want any other register but scratch clobbered.
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
beqc(rs, scratch, offset);
}
@@ -3417,16 +3417,16 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
if (rt.is_reg() && rs.code() == rt.rm().code()) {
// Pre R6 bne is used here to make the code patchable. Otherwise we
// should not generate any instruction.
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
bne(rs, scratch, offset);
nop();
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset21)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
bnezc(rs, offset);
} else {
// We don't want any other register but scratch clobbered.
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
bnec(rs, scratch, offset);
}
@@ -3438,14 +3438,14 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
if (rt.is_reg() && rs.code() == rt.rm().code()) {
break; // No code needs to be emitted.
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
bltzc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
bgtzc(rs, offset);
} else {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
DCHECK(rs != scratch);
bltc(scratch, rs, offset);
@@ -3454,17 +3454,17 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
case greater_equal:
// rs >= rt
if (rt.is_reg() && rs.code() == rt.rm().code()) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
bc(offset);
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
blezc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
bgezc(rs, offset);
} else {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
DCHECK(rs != scratch);
bgec(rs, scratch, offset);
@@ -3475,14 +3475,14 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
if (rt.is_reg() && rs.code() == rt.rm().code()) {
break; // No code needs to be emitted.
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
bgtzc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
bltzc(rs, offset);
} else {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
DCHECK(rs != scratch);
bltc(rs, scratch, offset);
@@ -3491,17 +3491,17 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
case less_equal:
// rs <= rt
if (rt.is_reg() && rs.code() == rt.rm().code()) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
bc(offset);
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
bgezc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
blezc(rs, offset);
} else {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
DCHECK(rs != scratch);
bgec(scratch, rs, offset);
@@ -3514,14 +3514,14 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
if (rt.is_reg() && rs.code() == rt.rm().code()) {
break; // No code needs to be emitted.
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset21, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21, &scratch, rt))
return false;
bnezc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset21)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
bnezc(rs, offset);
} else {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
DCHECK(rs != scratch);
bltuc(scratch, rs, offset);
@@ -3530,17 +3530,17 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
case Ugreater_equal:
// rs >= rt
if (rt.is_reg() && rs.code() == rt.rm().code()) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
bc(offset);
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset21, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21, &scratch, rt))
return false;
beqzc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
bc(offset);
} else {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
DCHECK(rs != scratch);
bgeuc(rs, scratch, offset);
@@ -3551,13 +3551,13 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
if (rt.is_reg() && rs.code() == rt.rm().code()) {
break; // No code needs to be emitted.
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset21, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21, &scratch, rt))
return false;
bnezc(scratch, offset);
} else if (IsZero(rt)) {
break; // No code needs to be emitted.
} else {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
DCHECK(rs != scratch);
bltuc(rs, scratch, offset);
@@ -3566,17 +3566,17 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
case Uless_equal:
// rs <= rt
if (rt.is_reg() && rs.code() == rt.rm().code()) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
bc(offset);
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26, &scratch, rt))
return false;
bc(offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset21)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
beqzc(rs, offset);
} else {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
DCHECK(rs != scratch);
bgeuc(scratch, rs, offset);
@@ -3858,7 +3858,7 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
DCHECK((cond == cc_always && is_int26(offset)) || is_int16(offset));
switch (cond) {
case cc_always:
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
balc(offset);
break;
case eq:
@@ -3880,11 +3880,11 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
if (rs.code() == rt.rm().code()) {
break; // No code needs to be emitted.
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
bltzalc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
bgtzalc(rs, offset);
} else {
if (!is_near(L, bits)) return false;
@@ -3896,14 +3896,14 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
case greater_equal:
// rs >= rt
if (rs.code() == rt.rm().code()) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
balc(offset);
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
blezalc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
bgezalc(rs, offset);
} else {
if (!is_near(L, bits)) return false;
@@ -3917,11 +3917,11 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
if (rs.code() == rt.rm().code()) {
break; // No code needs to be emitted.
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
bgtzalc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
bltzalc(rs, offset);
} else {
if (!is_near(L, bits)) return false;
@@ -3933,14 +3933,14 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
case less_equal:
// rs <= r2
if (rs.code() == rt.rm().code()) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
balc(offset);
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
bgezalc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
blezalc(rs, offset);
} else {
if (!is_near(L, bits)) return false;
@@ -4202,10 +4202,8 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
}
void TurboAssembler::Jump(const ExternalReference& reference) {
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
- li(scratch, reference);
- Jump(scratch);
+ li(t9, reference);
+ Jump(t9);
}
// Note: To call gcc-compiled C code on mips, you must call through t9.
@@ -4284,7 +4282,6 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
STATIC_ASSERT(kSystemPointerSize == 8);
- STATIC_ASSERT(kSmiShiftSize == 31);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
index c2b701a5af..886d64e494 100644
--- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
@@ -850,12 +850,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CallCFunctionHelper(Register function, int num_reg_arguments,
int num_double_arguments);
- bool CalculateOffset(Label* L, int32_t& offset, // NOLINT(runtime/references)
- OffsetSize bits);
- bool CalculateOffset(Label* L, int32_t& offset, // NOLINT(runtime/references)
- OffsetSize bits,
- Register& scratch, // NOLINT(runtime/references)
- const Operand& rt);
+ // TODO(mips) Reorder parameters so out parameters come last.
+ bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits);
+ bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
+ Register* scratch, const Operand& rt);
void BranchShortHelperR6(int32_t offset, Label* L);
void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot);
diff --git a/deps/v8/src/codegen/optimized-compilation-info.cc b/deps/v8/src/codegen/optimized-compilation-info.cc
index 7dc94f39cd..de89371adb 100644
--- a/deps/v8/src/codegen/optimized-compilation-info.cc
+++ b/deps/v8/src/codegen/optimized-compilation-info.cc
@@ -111,15 +111,9 @@ OptimizedCompilationInfo::~OptimizedCompilationInfo() {
}
void OptimizedCompilationInfo::set_deferred_handles(
- std::shared_ptr<DeferredHandles> deferred_handles) {
+ std::unique_ptr<DeferredHandles> deferred_handles) {
DCHECK_NULL(deferred_handles_);
- deferred_handles_.swap(deferred_handles);
-}
-
-void OptimizedCompilationInfo::set_deferred_handles(
- DeferredHandles* deferred_handles) {
- DCHECK_NULL(deferred_handles_);
- deferred_handles_.reset(deferred_handles);
+ deferred_handles_ = std::move(deferred_handles);
}
void OptimizedCompilationInfo::ReopenHandlesInNewHandleScope(Isolate* isolate) {
@@ -132,6 +126,7 @@ void OptimizedCompilationInfo::ReopenHandlesInNewHandleScope(Isolate* isolate) {
if (!closure_.is_null()) {
closure_ = Handle<JSFunction>(*closure_, isolate);
}
+ DCHECK(code_.is_null());
}
void OptimizedCompilationInfo::AbortOptimization(BailoutReason reason) {
diff --git a/deps/v8/src/codegen/optimized-compilation-info.h b/deps/v8/src/codegen/optimized-compilation-info.h
index 624517283e..2f3afafc68 100644
--- a/deps/v8/src/codegen/optimized-compilation-info.h
+++ b/deps/v8/src/codegen/optimized-compilation-info.h
@@ -231,11 +231,7 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
osr_frame_ = osr_frame;
}
- void set_deferred_handles(std::shared_ptr<DeferredHandles> deferred_handles);
- void set_deferred_handles(DeferredHandles* deferred_handles);
- std::shared_ptr<DeferredHandles> deferred_handles() {
- return deferred_handles_;
- }
+ void set_deferred_handles(std::unique_ptr<DeferredHandles> deferred_handles);
void ReopenHandlesInNewHandleScope(Isolate* isolate);
@@ -330,7 +326,7 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
// OptimizedCompilationInfo allocates.
Zone* zone_;
- std::shared_ptr<DeferredHandles> deferred_handles_;
+ std::unique_ptr<DeferredHandles> deferred_handles_;
BailoutReason bailout_reason_ = BailoutReason::kNoReason;
diff --git a/deps/v8/src/codegen/pending-optimization-table.cc b/deps/v8/src/codegen/pending-optimization-table.cc
index b7be9c7775..84e36fc843 100644
--- a/deps/v8/src/codegen/pending-optimization-table.cc
+++ b/deps/v8/src/codegen/pending-optimization-table.cc
@@ -83,7 +83,7 @@ void PendingOptimizationTable::MarkedForOptimization(
function->ShortPrint();
PrintF(
" should be prepared for optimization with "
- "%%PrepareFunctionForOptimize before "
+ "%%PrepareFunctionForOptimization before "
"%%OptimizeFunctionOnNextCall / %%OptimizeOSR ");
UNREACHABLE();
}
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc-inl.h b/deps/v8/src/codegen/ppc/assembler-ppc-inl.h
index 166b9d4423..c55a5a9c0b 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc-inl.h
+++ b/deps/v8/src/codegen/ppc/assembler-ppc-inl.h
@@ -144,7 +144,8 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target,
DCHECK(IsCodeTarget(rmode_) || rmode_ == FULL_EMBEDDED_OBJECT);
Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
icache_flush_mode);
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) {
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() &&
+ !FLAG_disable_write_barriers) {
WriteBarrierForCode(host(), this, target);
}
}
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.cc b/deps/v8/src/codegen/ppc/assembler-ppc.cc
index 2a638af070..17a3aba1b2 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/assembler-ppc.cc
@@ -200,8 +200,8 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
Handle<HeapObject> object;
switch (request.kind()) {
case HeapObjectRequest::kHeapNumber: {
- object = isolate->factory()->NewHeapNumber(request.heap_number(),
- AllocationType::kOld);
+ object = isolate->factory()->NewHeapNumber<AllocationType::kOld>(
+ request.heap_number());
break;
}
case HeapObjectRequest::kStringConstant: {
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.h b/deps/v8/src/codegen/ppc/assembler-ppc.h
index dee264a75c..42eda72d4d 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc.h
+++ b/deps/v8/src/codegen/ppc/assembler-ppc.h
@@ -41,6 +41,7 @@
#define V8_CODEGEN_PPC_ASSEMBLER_PPC_H_
#include <stdio.h>
+#include <memory>
#include <vector>
#include "src/codegen/assembler.h"
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
index 4116206333..9e41dec2a8 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
@@ -1287,12 +1287,11 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
{
// Load receiver to pass it later to DebugOnFunctionCall hook.
if (actual.is_reg()) {
- mr(r7, actual.reg());
+ ShiftLeftImm(r7, actual.reg(), Operand(kPointerSizeLog2));
+ LoadPX(r7, MemOperand(sp, r7));
} else {
- mov(r7, Operand(actual.immediate()));
+ LoadP(r7, MemOperand(sp, actual.immediate() << kPointerSizeLog2), r0);
}
- ShiftLeftImm(r7, r7, Operand(kPointerSizeLog2));
- LoadPX(r7, MemOperand(sp, r7));
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
if (expected.is_reg()) {
@@ -2409,51 +2408,51 @@ void MacroAssembler::Xor(Register ra, Register rs, const Operand& rb,
void MacroAssembler::CmpSmiLiteral(Register src1, Smi smi, Register scratch,
CRegister cr) {
-#if V8_TARGET_ARCH_PPC64
+#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
+ Cmpi(src1, Operand(smi), scratch, cr);
+#else
LoadSmiLiteral(scratch, smi);
cmp(src1, scratch, cr);
-#else
- Cmpi(src1, Operand(smi), scratch, cr);
#endif
}
void MacroAssembler::CmplSmiLiteral(Register src1, Smi smi, Register scratch,
CRegister cr) {
-#if V8_TARGET_ARCH_PPC64
+#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
+ Cmpli(src1, Operand(smi), scratch, cr);
+#else
LoadSmiLiteral(scratch, smi);
cmpl(src1, scratch, cr);
-#else
- Cmpli(src1, Operand(smi), scratch, cr);
#endif
}
void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi smi,
Register scratch) {
-#if V8_TARGET_ARCH_PPC64
+#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
+ Add(dst, src, static_cast<intptr_t>(smi.ptr()), scratch);
+#else
LoadSmiLiteral(scratch, smi);
add(dst, src, scratch);
-#else
- Add(dst, src, reinterpret_cast<intptr_t>(smi), scratch);
#endif
}
void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi smi,
Register scratch) {
-#if V8_TARGET_ARCH_PPC64
+#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
+ Add(dst, src, -(static_cast<intptr_t>(smi.ptr())), scratch);
+#else
LoadSmiLiteral(scratch, smi);
sub(dst, src, scratch);
-#else
- Add(dst, src, -(reinterpret_cast<intptr_t>(smi)), scratch);
#endif
}
void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi smi,
Register scratch, RCBit rc) {
-#if V8_TARGET_ARCH_PPC64
+#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
+ And(dst, src, Operand(smi), rc);
+#else
LoadSmiLiteral(scratch, smi);
and_(dst, src, scratch, rc);
-#else
- And(dst, src, Operand(smi), rc);
#endif
}
@@ -2941,14 +2940,18 @@ void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) {
void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
STATIC_ASSERT(kSystemPointerSize == 8);
- STATIC_ASSERT(kSmiShiftSize == 31);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
// The builtin_index register contains the builtin index as a Smi.
// Untagging is folded into the indexing operand below.
+#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
+ ShiftLeftImm(builtin_index, builtin_index,
+ Operand(kSystemPointerSizeLog2 - kSmiShift));
+#else
ShiftRightArithImm(builtin_index, builtin_index,
kSmiShift - kSystemPointerSizeLog2);
+#endif
addi(builtin_index, builtin_index,
Operand(IsolateData::builtin_entry_table_offset()));
LoadPX(builtin_index, MemOperand(kRootRegister, builtin_index));
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
index fd4cb6014b..7ff5a6bb4b 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
@@ -876,12 +876,12 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
}
void SmiToPtrArrayOffset(Register dst, Register src) {
-#if V8_TARGET_ARCH_PPC64
- STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kPointerSizeLog2);
- ShiftRightArithImm(dst, src, kSmiShift - kPointerSizeLog2);
-#else
+#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kPointerSizeLog2);
ShiftLeftImm(dst, src, Operand(kPointerSizeLog2 - kSmiShift));
+#else
+ STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kPointerSizeLog2);
+ ShiftRightArithImm(dst, src, kSmiShift - kPointerSizeLog2);
#endif
}
@@ -895,7 +895,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void AssertNotSmi(Register object);
void AssertSmi(Register object);
-#if V8_TARGET_ARCH_PPC64
+#if !defined(V8_COMPRESS_POINTERS) && !defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
// Ensure it is permissible to read/write int value directly from
// upper half of the smi.
STATIC_ASSERT(kSmiTag == 0);
diff --git a/deps/v8/src/codegen/reglist.h b/deps/v8/src/codegen/reglist.h
index 609e6b8845..4f1d35267d 100644
--- a/deps/v8/src/codegen/reglist.h
+++ b/deps/v8/src/codegen/reglist.h
@@ -25,20 +25,18 @@ constexpr int NumRegs(RegList list) {
return base::bits::CountPopulation(list);
}
+namespace detail {
// Combine two RegLists by building the union of the contained registers.
-// Implemented as a Functor to pass it to base::fold even on gcc < 5 (see
-// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=52892).
-// TODO(clemensh): Remove this once we require gcc >= 5.0.
-struct CombineRegListsFunctor {
- constexpr RegList operator()(RegList list1, RegList list2) const {
- return list1 | list2;
- }
-};
+// TODO(clemensb): Replace by constexpr lambda once we have C++17.
+constexpr RegList CombineRegListsHelper(RegList list1, RegList list2) {
+ return list1 | list2;
+}
+} // namespace detail
// Combine several RegLists by building the union of the contained registers.
template <typename... RegLists>
constexpr RegList CombineRegLists(RegLists... lists) {
- return base::fold(CombineRegListsFunctor{}, 0, lists...);
+ return base::fold(detail::CombineRegListsHelper, 0, lists...);
}
} // namespace internal
diff --git a/deps/v8/src/codegen/reloc-info.cc b/deps/v8/src/codegen/reloc-info.cc
index a889a8b9c7..039a6746b1 100644
--- a/deps/v8/src/codegen/reloc-info.cc
+++ b/deps/v8/src/codegen/reloc-info.cc
@@ -366,7 +366,7 @@ void RelocInfo::set_target_address(Address target,
Assembler::set_target_address_at(pc_, constant_pool_, target,
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() &&
- IsCodeTargetMode(rmode_)) {
+ IsCodeTargetMode(rmode_) && !FLAG_disable_write_barriers) {
Code target_code = Code::GetCodeFromTargetAddress(target);
MarkingBarrierForCode(host(), this, target_code);
}
diff --git a/deps/v8/src/codegen/s390/assembler-s390-inl.h b/deps/v8/src/codegen/s390/assembler-s390-inl.h
index 5e7b193c8a..f911bdabf6 100644
--- a/deps/v8/src/codegen/s390/assembler-s390-inl.h
+++ b/deps/v8/src/codegen/s390/assembler-s390-inl.h
@@ -150,7 +150,8 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target,
DCHECK(IsCodeTarget(rmode_) || rmode_ == FULL_EMBEDDED_OBJECT);
Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
icache_flush_mode);
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) {
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() &&
+ !FLAG_disable_write_barriers) {
WriteBarrierForCode(host(), this, target);
}
}
diff --git a/deps/v8/src/codegen/s390/assembler-s390.cc b/deps/v8/src/codegen/s390/assembler-s390.cc
index 873c0a2ad0..9de95ed508 100644
--- a/deps/v8/src/codegen/s390/assembler-s390.cc
+++ b/deps/v8/src/codegen/s390/assembler-s390.cc
@@ -329,8 +329,8 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
Address pc = reinterpret_cast<Address>(buffer_start_) + request.offset();
switch (request.kind()) {
case HeapObjectRequest::kHeapNumber: {
- object = isolate->factory()->NewHeapNumber(request.heap_number(),
- AllocationType::kOld);
+ object = isolate->factory()->NewHeapNumber<AllocationType::kOld>(
+ request.heap_number());
set_target_address_at(pc, kNullAddress, object.address(),
SKIP_ICACHE_FLUSH);
break;
diff --git a/deps/v8/src/codegen/s390/assembler-s390.h b/deps/v8/src/codegen/s390/assembler-s390.h
index 0653e79b67..f1a418d1af 100644
--- a/deps/v8/src/codegen/s390/assembler-s390.h
+++ b/deps/v8/src/codegen/s390/assembler-s390.h
@@ -40,6 +40,7 @@
#ifndef V8_CODEGEN_S390_ASSEMBLER_S390_H_
#define V8_CODEGEN_S390_ASSEMBLER_S390_H_
#include <stdio.h>
+#include <memory>
#if V8_HOST_ARCH_S390
// elf.h include is required for auxv check for STFLE facility used
// for hardware detection, which is sensible only on s390 hosts.
diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.cc b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
index 355d536379..4cab44d9e1 100644
--- a/deps/v8/src/codegen/s390/macro-assembler-s390.cc
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
@@ -51,7 +51,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
}
RegList list = kJSCallerSaved & ~exclusions;
- bytes += NumRegs(list) * kPointerSize;
+ bytes += NumRegs(list) * kSystemPointerSize;
if (fp_mode == kSaveFPRegs) {
bytes += NumRegs(kCallerSavedDoubles) * kDoubleSize;
@@ -76,7 +76,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
RegList list = kJSCallerSaved & ~exclusions;
MultiPush(list);
- bytes += NumRegs(list) * kPointerSize;
+ bytes += NumRegs(list) * kSystemPointerSize;
if (fp_mode == kSaveFPRegs) {
MultiPushDoubles(kCallerSavedDoubles);
@@ -107,7 +107,7 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
RegList list = kJSCallerSaved & ~exclusions;
MultiPop(list);
- bytes += NumRegs(list) * kPointerSize;
+ bytes += NumRegs(list) * kSystemPointerSize;
return bytes;
}
@@ -116,8 +116,8 @@ void TurboAssembler::LoadFromConstantsTable(Register destination,
int constant_index) {
DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
- const uint32_t offset =
- FixedArray::kHeaderSize + constant_index * kPointerSize - kHeapObjectTag;
+ const uint32_t offset = FixedArray::kHeaderSize +
+ constant_index * kSystemPointerSize - kHeapObjectTag;
CHECK(is_uint19(offset));
DCHECK_NE(destination, r0);
@@ -258,7 +258,7 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
void TurboAssembler::Drop(int count) {
if (count > 0) {
- int total = count * kPointerSize;
+ int total = count * kSystemPointerSize;
if (is_uint12(total)) {
la(sp, MemOperand(sp, total));
} else if (is_int20(total)) {
@@ -270,7 +270,7 @@ void TurboAssembler::Drop(int count) {
}
void TurboAssembler::Drop(Register count, Register scratch) {
- ShiftLeftP(scratch, count, Operand(kPointerSizeLog2));
+ ShiftLeftP(scratch, count, Operand(kSystemPointerSizeLog2));
AddP(sp, sp, scratch);
}
@@ -367,12 +367,12 @@ void TurboAssembler::BranchRelativeOnIdxHighP(Register dst, Register inc,
void TurboAssembler::MultiPush(RegList regs, Register location) {
int16_t num_to_push = base::bits::CountPopulation(regs);
- int16_t stack_offset = num_to_push * kPointerSize;
+ int16_t stack_offset = num_to_push * kSystemPointerSize;
SubP(location, location, Operand(stack_offset));
for (int16_t i = Register::kNumRegisters - 1; i >= 0; i--) {
if ((regs & (1 << i)) != 0) {
- stack_offset -= kPointerSize;
+ stack_offset -= kSystemPointerSize;
StoreP(ToRegister(i), MemOperand(location, stack_offset));
}
}
@@ -384,7 +384,7 @@ void TurboAssembler::MultiPop(RegList regs, Register location) {
for (int16_t i = 0; i < Register::kNumRegisters; i++) {
if ((regs & (1 << i)) != 0) {
LoadP(ToRegister(i), MemOperand(location, stack_offset));
- stack_offset += kPointerSize;
+ stack_offset += kSystemPointerSize;
}
}
AddP(location, location, Operand(stack_offset));
@@ -439,13 +439,13 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
// Although the object register is tagged, the offset is relative to the start
- // of the object, so so offset must be a multiple of kPointerSize.
- DCHECK(IsAligned(offset, kPointerSize));
+ // of the object, so so offset must be a multiple of kSystemPointerSize.
+ DCHECK(IsAligned(offset, kSystemPointerSize));
lay(dst, MemOperand(object, offset - kHeapObjectTag));
if (emit_debug_code()) {
Label ok;
- AndP(r0, dst, Operand(kPointerSize - 1));
+ AndP(r0, dst, Operand(kSystemPointerSize - 1));
beq(&ok, Label::kNear);
stop();
bind(&ok);
@@ -632,7 +632,7 @@ void TurboAssembler::PushCommonFrame(Register marker_reg) {
Push(r14, fp);
fp_delta = 0;
}
- la(fp, MemOperand(sp, fp_delta * kPointerSize));
+ la(fp, MemOperand(sp, fp_delta * kSystemPointerSize));
}
void TurboAssembler::PopCommonFrame(Register marker_reg) {
@@ -653,7 +653,7 @@ void TurboAssembler::PushStandardFrame(Register function_reg) {
Push(r14, fp, cp);
fp_delta = 1;
}
- la(fp, MemOperand(sp, fp_delta * kPointerSize));
+ la(fp, MemOperand(sp, fp_delta * kSystemPointerSize));
}
void TurboAssembler::RestoreFrameStateForTailCall() {
@@ -1082,9 +1082,9 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
DCHECK(frame_type == StackFrame::EXIT ||
frame_type == StackFrame::BUILTIN_EXIT);
// Set up the frame structure on the stack.
- DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
- DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
- DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
+ DCHECK_EQ(2 * kSystemPointerSize, ExitFrameConstants::kCallerSPDisplacement);
+ DCHECK_EQ(1 * kSystemPointerSize, ExitFrameConstants::kCallerPCOffset);
+ DCHECK_EQ(0 * kSystemPointerSize, ExitFrameConstants::kCallerFPOffset);
DCHECK_GT(stack_space, 0);
// This is an opportunity to build a frame to wrap
@@ -1117,7 +1117,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
// since the sp slot and code slot were pushed after the fp.
}
- lay(sp, MemOperand(sp, -stack_space * kPointerSize));
+ lay(sp, MemOperand(sp, -stack_space * kSystemPointerSize));
// Allocate and align the frame preparing for calling the runtime
// function.
@@ -1127,11 +1127,11 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
ClearRightImm(sp, sp, Operand(3)); // equivalent to &= -8
}
- lay(sp, MemOperand(sp, -kNumRequiredStackFrameSlots * kPointerSize));
+ lay(sp, MemOperand(sp, -kNumRequiredStackFrameSlots * kSystemPointerSize));
StoreP(MemOperand(sp), Operand::Zero(), r0);
// Set the exit frame sp value to point just before the return address
// location.
- lay(r1, MemOperand(sp, kStackFrameSPSlot * kPointerSize));
+ lay(r1, MemOperand(sp, kStackFrameSPSlot * kSystemPointerSize));
StoreP(r1, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
@@ -1184,7 +1184,8 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
if (argument_count.is_valid()) {
if (!argument_count_is_length) {
- ShiftLeftP(argument_count, argument_count, Operand(kPointerSizeLog2));
+ ShiftLeftP(argument_count, argument_count,
+ Operand(kSystemPointerSizeLog2));
}
la(sp, MemOperand(sp, argument_count));
}
@@ -1211,22 +1212,24 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
#endif
// Calculate the end of destination area where we will put the arguments
- // after we drop current frame. We AddP kPointerSize to count the receiver
- // argument which is not included into formal parameters count.
+ // after we drop current frame. We AddP kSystemPointerSize to count the
+ // receiver argument which is not included into formal parameters count.
Register dst_reg = scratch0;
- ShiftLeftP(dst_reg, caller_args_count_reg, Operand(kPointerSizeLog2));
+ ShiftLeftP(dst_reg, caller_args_count_reg, Operand(kSystemPointerSizeLog2));
AddP(dst_reg, fp, dst_reg);
AddP(dst_reg, dst_reg,
- Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
+ Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
Register src_reg = caller_args_count_reg;
- // Calculate the end of source area. +kPointerSize is for the receiver.
+ // Calculate the end of source area. +kSystemPointerSize is for the receiver.
if (callee_args_count.is_reg()) {
- ShiftLeftP(src_reg, callee_args_count.reg(), Operand(kPointerSizeLog2));
+ ShiftLeftP(src_reg, callee_args_count.reg(),
+ Operand(kSystemPointerSizeLog2));
AddP(src_reg, sp, src_reg);
- AddP(src_reg, src_reg, Operand(kPointerSize));
+ AddP(src_reg, src_reg, Operand(kSystemPointerSize));
} else {
- mov(src_reg, Operand((callee_args_count.immediate() + 1) * kPointerSize));
+ mov(src_reg,
+ Operand((callee_args_count.immediate() + 1) * kSystemPointerSize));
AddP(src_reg, src_reg, sp);
}
@@ -1253,10 +1256,10 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
}
LoadRR(r1, tmp_reg);
bind(&loop);
- LoadP(tmp_reg, MemOperand(src_reg, -kPointerSize));
- StoreP(tmp_reg, MemOperand(dst_reg, -kPointerSize));
- lay(src_reg, MemOperand(src_reg, -kPointerSize));
- lay(dst_reg, MemOperand(dst_reg, -kPointerSize));
+ LoadP(tmp_reg, MemOperand(src_reg, -kSystemPointerSize));
+ StoreP(tmp_reg, MemOperand(dst_reg, -kSystemPointerSize));
+ lay(src_reg, MemOperand(src_reg, -kSystemPointerSize));
+ lay(dst_reg, MemOperand(dst_reg, -kSystemPointerSize));
BranchOnCount(r1, &loop);
// Leave current frame.
@@ -1342,12 +1345,12 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
{
// Load receiver to pass it later to DebugOnFunctionCall hook.
if (actual.is_reg()) {
- LoadRR(r6, actual.reg());
+ ShiftLeftP(r6, actual.reg(), Operand(kSystemPointerSizeLog2));
+ LoadP(r6, MemOperand(sp, r6));
} else {
- mov(r6, Operand(actual.immediate()));
+ LoadP(r6, MemOperand(sp, actual.immediate() << kSystemPointerSizeLog2),
+ ip);
}
- ShiftLeftP(r6, r6, Operand(kPointerSizeLog2));
- LoadP(r6, MemOperand(sp, r6));
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
if (expected.is_reg()) {
@@ -1470,8 +1473,8 @@ void MacroAssembler::MaybeDropFrames() {
void MacroAssembler::PushStackHandler() {
// Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kSystemPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kSystemPointerSize);
// Link the current handler as the next handler.
Move(r7,
@@ -1486,13 +1489,13 @@ void MacroAssembler::PushStackHandler() {
// Copy the old handler into the next handler slot.
MoveChar(MemOperand(sp, StackHandlerConstants::kNextOffset), MemOperand(r7),
- Operand(kPointerSize));
+ Operand(kSystemPointerSize));
// Set this new handler as the current one.
StoreP(sp, MemOperand(r7));
}
void MacroAssembler::PopStackHandler() {
- STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kSystemPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
// Pop the Next Handler into r3 and store it into Handler Address reference.
@@ -1839,18 +1842,19 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
int stack_passed_arguments =
CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
int stack_space = kNumRequiredStackFrameSlots;
- if (frame_alignment > kPointerSize) {
+ if (frame_alignment > kSystemPointerSize) {
// Make stack end at alignment and make room for stack arguments
// -- preserving original value of sp.
LoadRR(scratch, sp);
- lay(sp, MemOperand(sp, -(stack_passed_arguments + 1) * kPointerSize));
+ lay(sp, MemOperand(sp, -(stack_passed_arguments + 1) * kSystemPointerSize));
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
- StoreP(scratch, MemOperand(sp, (stack_passed_arguments)*kPointerSize));
+ StoreP(scratch,
+ MemOperand(sp, (stack_passed_arguments)*kSystemPointerSize));
} else {
stack_space += stack_passed_arguments;
}
- lay(sp, MemOperand(sp, (-stack_space) * kPointerSize));
+ lay(sp, MemOperand(sp, (-stack_space) * kSystemPointerSize));
}
void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
@@ -1940,11 +1944,11 @@ void TurboAssembler::CallCFunctionHelper(Register function,
int stack_passed_arguments =
CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
int stack_space = kNumRequiredStackFrameSlots + stack_passed_arguments;
- if (ActivationFrameAlignment() > kPointerSize) {
+ if (ActivationFrameAlignment() > kSystemPointerSize) {
// Load the original stack pointer (pre-alignment) from the stack
- LoadP(sp, MemOperand(sp, stack_space * kPointerSize));
+ LoadP(sp, MemOperand(sp, stack_space * kSystemPointerSize));
} else {
- la(sp, MemOperand(sp, stack_space * kPointerSize));
+ la(sp, MemOperand(sp, stack_space * kSystemPointerSize));
}
}
@@ -1962,20 +1966,20 @@ void TurboAssembler::CheckPageFlag(
uint32_t shifted_mask = mask;
// Determine the byte offset to be tested
if (mask <= 0x80) {
- byte_offset = kPointerSize - 1;
+ byte_offset = kSystemPointerSize - 1;
} else if (mask < 0x8000) {
- byte_offset = kPointerSize - 2;
+ byte_offset = kSystemPointerSize - 2;
shifted_mask = mask >> 8;
} else if (mask < 0x800000) {
- byte_offset = kPointerSize - 3;
+ byte_offset = kSystemPointerSize - 3;
shifted_mask = mask >> 16;
} else {
- byte_offset = kPointerSize - 4;
+ byte_offset = kSystemPointerSize - 4;
shifted_mask = mask >> 24;
}
#if V8_TARGET_LITTLE_ENDIAN
// Reverse the byte_offset if emulating on little endian platform
- byte_offset = kPointerSize - byte_offset - 1;
+ byte_offset = kSystemPointerSize - byte_offset - 1;
#endif
tm(MemOperand(scratch, MemoryChunk::kFlagsOffset + byte_offset),
Operand(shifted_mask));
@@ -3415,12 +3419,12 @@ void TurboAssembler::LoadIntLiteral(Register dst, int value) {
void TurboAssembler::LoadSmiLiteral(Register dst, Smi smi) {
intptr_t value = static_cast<intptr_t>(smi.ptr());
-#if V8_TARGET_ARCH_S390X
+#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
+ llilf(dst, Operand(value));
+#else
DCHECK_EQ(value & 0xFFFFFFFF, 0);
// The smi value is loaded in upper 32-bits. Lower 32-bit are zeros.
llihf(dst, Operand(value >> 32));
-#else
- llilf(dst, Operand(value));
#endif
}
@@ -3456,16 +3460,16 @@ void TurboAssembler::LoadFloat32Literal(DoubleRegister result, float value,
}
void TurboAssembler::CmpSmiLiteral(Register src1, Smi smi, Register scratch) {
-#if V8_TARGET_ARCH_S390X
+#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
+ // CFI takes 32-bit immediate.
+ cfi(src1, Operand(smi));
+#else
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
cih(src1, Operand(static_cast<intptr_t>(smi.ptr()) >> 32));
} else {
LoadSmiLiteral(scratch, smi);
cgr(src1, scratch);
}
-#else
- // CFI takes 32-bit immediate.
- cfi(src1, Operand(smi));
#endif
}
@@ -4154,7 +4158,7 @@ void TurboAssembler::ShiftRightArith(Register dst, Register src, Register val) {
// Clear right most # of bits
void TurboAssembler::ClearRightImm(Register dst, Register src,
const Operand& val) {
- int numBitsToClear = val.immediate() % (kPointerSize * 8);
+ int numBitsToClear = val.immediate() % (kSystemPointerSize * 8);
// Try to use RISBG if possible
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
@@ -4342,14 +4346,19 @@ void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) {
void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
STATIC_ASSERT(kSystemPointerSize == 8);
- STATIC_ASSERT(kSmiShiftSize == 31);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
// The builtin_index register contains the builtin index as a Smi.
// Untagging is folded into the indexing operand below.
+#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
+ STATIC_ASSERT(kSmiShiftSize == 0);
+ ShiftLeftP(builtin_index, builtin_index,
+ Operand(kSystemPointerSizeLog2 - kSmiShift));
+#else
ShiftRightArithP(builtin_index, builtin_index,
Operand(kSmiShift - kSystemPointerSizeLog2));
+#endif
AddP(builtin_index, builtin_index,
Operand(IsolateData::builtin_entry_table_offset()));
LoadP(builtin_index, MemOperand(kRootRegister, builtin_index));
@@ -4427,7 +4436,7 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) {
Label return_label;
larl(r14, &return_label); // Generate the return addr of call later.
- StoreP(r14, MemOperand(sp, kStackFrameRASlot * kPointerSize));
+ StoreP(r14, MemOperand(sp, kStackFrameRASlot * kSystemPointerSize));
// zLinux ABI requires caller's frame to have sufficient space for callee
// preserved regsiter save area.
diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.h b/deps/v8/src/codegen/s390/macro-assembler-s390.h
index 856e4b592e..06c26cb305 100644
--- a/deps/v8/src/codegen/s390/macro-assembler-s390.h
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.h
@@ -515,26 +515,26 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
}
void push(DoubleRegister src) {
- lay(sp, MemOperand(sp, -kPointerSize));
+ lay(sp, MemOperand(sp, -kSystemPointerSize));
StoreDouble(src, MemOperand(sp));
}
void push(Register src) {
- lay(sp, MemOperand(sp, -kPointerSize));
+ lay(sp, MemOperand(sp, -kSystemPointerSize));
StoreP(src, MemOperand(sp));
}
void pop(DoubleRegister dst) {
LoadDouble(dst, MemOperand(sp));
- la(sp, MemOperand(sp, kPointerSize));
+ la(sp, MemOperand(sp, kSystemPointerSize));
}
void pop(Register dst) {
LoadP(dst, MemOperand(sp));
- la(sp, MemOperand(sp, kPointerSize));
+ la(sp, MemOperand(sp, kSystemPointerSize));
}
- void pop() { la(sp, MemOperand(sp, kPointerSize)); }
+ void pop() { la(sp, MemOperand(sp, kSystemPointerSize)); }
void Push(Register src) { push(src); }
@@ -544,25 +544,25 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2) {
- lay(sp, MemOperand(sp, -kPointerSize * 2));
- StoreP(src1, MemOperand(sp, kPointerSize));
+ lay(sp, MemOperand(sp, -kSystemPointerSize * 2));
+ StoreP(src1, MemOperand(sp, kSystemPointerSize));
StoreP(src2, MemOperand(sp, 0));
}
// Push three registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3) {
- lay(sp, MemOperand(sp, -kPointerSize * 3));
- StoreP(src1, MemOperand(sp, kPointerSize * 2));
- StoreP(src2, MemOperand(sp, kPointerSize));
+ lay(sp, MemOperand(sp, -kSystemPointerSize * 3));
+ StoreP(src1, MemOperand(sp, kSystemPointerSize * 2));
+ StoreP(src2, MemOperand(sp, kSystemPointerSize));
StoreP(src3, MemOperand(sp, 0));
}
// Push four registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3, Register src4) {
- lay(sp, MemOperand(sp, -kPointerSize * 4));
- StoreP(src1, MemOperand(sp, kPointerSize * 3));
- StoreP(src2, MemOperand(sp, kPointerSize * 2));
- StoreP(src3, MemOperand(sp, kPointerSize));
+ lay(sp, MemOperand(sp, -kSystemPointerSize * 4));
+ StoreP(src1, MemOperand(sp, kSystemPointerSize * 3));
+ StoreP(src2, MemOperand(sp, kSystemPointerSize * 2));
+ StoreP(src3, MemOperand(sp, kSystemPointerSize));
StoreP(src4, MemOperand(sp, 0));
}
@@ -580,11 +580,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
DCHECK(src3 != src5);
DCHECK(src4 != src5);
- lay(sp, MemOperand(sp, -kPointerSize * 5));
- StoreP(src1, MemOperand(sp, kPointerSize * 4));
- StoreP(src2, MemOperand(sp, kPointerSize * 3));
- StoreP(src3, MemOperand(sp, kPointerSize * 2));
- StoreP(src4, MemOperand(sp, kPointerSize));
+ lay(sp, MemOperand(sp, -kSystemPointerSize * 5));
+ StoreP(src1, MemOperand(sp, kSystemPointerSize * 4));
+ StoreP(src2, MemOperand(sp, kSystemPointerSize * 3));
+ StoreP(src3, MemOperand(sp, kSystemPointerSize * 2));
+ StoreP(src4, MemOperand(sp, kSystemPointerSize));
StoreP(src5, MemOperand(sp, 0));
}
@@ -593,36 +593,36 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Pop two registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2) {
LoadP(src2, MemOperand(sp, 0));
- LoadP(src1, MemOperand(sp, kPointerSize));
- la(sp, MemOperand(sp, 2 * kPointerSize));
+ LoadP(src1, MemOperand(sp, kSystemPointerSize));
+ la(sp, MemOperand(sp, 2 * kSystemPointerSize));
}
// Pop three registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Register src3) {
LoadP(src3, MemOperand(sp, 0));
- LoadP(src2, MemOperand(sp, kPointerSize));
- LoadP(src1, MemOperand(sp, 2 * kPointerSize));
- la(sp, MemOperand(sp, 3 * kPointerSize));
+ LoadP(src2, MemOperand(sp, kSystemPointerSize));
+ LoadP(src1, MemOperand(sp, 2 * kSystemPointerSize));
+ la(sp, MemOperand(sp, 3 * kSystemPointerSize));
}
// Pop four registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Register src3, Register src4) {
LoadP(src4, MemOperand(sp, 0));
- LoadP(src3, MemOperand(sp, kPointerSize));
- LoadP(src2, MemOperand(sp, 2 * kPointerSize));
- LoadP(src1, MemOperand(sp, 3 * kPointerSize));
- la(sp, MemOperand(sp, 4 * kPointerSize));
+ LoadP(src3, MemOperand(sp, kSystemPointerSize));
+ LoadP(src2, MemOperand(sp, 2 * kSystemPointerSize));
+ LoadP(src1, MemOperand(sp, 3 * kSystemPointerSize));
+ la(sp, MemOperand(sp, 4 * kSystemPointerSize));
}
// Pop five registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Register src3, Register src4,
Register src5) {
LoadP(src5, MemOperand(sp, 0));
- LoadP(src4, MemOperand(sp, kPointerSize));
- LoadP(src3, MemOperand(sp, 2 * kPointerSize));
- LoadP(src2, MemOperand(sp, 3 * kPointerSize));
- LoadP(src1, MemOperand(sp, 4 * kPointerSize));
- la(sp, MemOperand(sp, 5 * kPointerSize));
+ LoadP(src4, MemOperand(sp, kSystemPointerSize));
+ LoadP(src3, MemOperand(sp, 2 * kSystemPointerSize));
+ LoadP(src2, MemOperand(sp, 3 * kSystemPointerSize));
+ LoadP(src1, MemOperand(sp, 4 * kSystemPointerSize));
+ la(sp, MemOperand(sp, 5 * kSystemPointerSize));
}
// Push a fixed frame, consisting of lr, fp, constant pool.
@@ -1182,12 +1182,12 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
}
void SmiToPtrArrayOffset(Register dst, Register src) {
-#if V8_TARGET_ARCH_S390X
- STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kPointerSizeLog2);
- ShiftRightArithP(dst, src, Operand(kSmiShift - kPointerSizeLog2));
+#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
+ STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kSystemPointerSizeLog2);
+ ShiftLeftP(dst, src, Operand(kSystemPointerSizeLog2 - kSmiShift));
#else
- STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kPointerSizeLog2);
- ShiftLeftP(dst, src, Operand(kPointerSizeLog2 - kSmiShift));
+ STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kSystemPointerSizeLog2);
+ ShiftRightArithP(dst, src, Operand(kSmiShift - kSystemPointerSizeLog2));
#endif
}
@@ -1201,14 +1201,14 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void AssertNotSmi(Register object);
void AssertSmi(Register object);
-#if V8_TARGET_ARCH_S390X
+#if !defined(V8_COMPRESS_POINTERS) && !defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
// Ensure it is permissible to read/write int value directly from
// upper half of the smi.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
#endif
#if V8_TARGET_LITTLE_ENDIAN
-#define SmiWordOffset(offset) (offset + kPointerSize / 2)
+#define SmiWordOffset(offset) (offset + kSystemPointerSize / 2)
#else
#define SmiWordOffset(offset) offset
#endif
diff --git a/deps/v8/src/codegen/source-position-table.cc b/deps/v8/src/codegen/source-position-table.cc
index 870241eac6..ba8e5981f0 100644
--- a/deps/v8/src/codegen/source-position-table.cc
+++ b/deps/v8/src/codegen/source-position-table.cc
@@ -31,24 +31,23 @@ using MoreBit = BitField8<bool, 7, 1>;
using ValueBits = BitField8<unsigned, 0, 7>;
// Helper: Add the offsets from 'other' to 'value'. Also set is_statement.
-void AddAndSetEntry(PositionTableEntry& value, // NOLINT(runtime/references)
+void AddAndSetEntry(PositionTableEntry* value,
const PositionTableEntry& other) {
- value.code_offset += other.code_offset;
- value.source_position += other.source_position;
- value.is_statement = other.is_statement;
+ value->code_offset += other.code_offset;
+ value->source_position += other.source_position;
+ value->is_statement = other.is_statement;
}
// Helper: Subtract the offsets from 'other' from 'value'.
-void SubtractFromEntry(PositionTableEntry& value, // NOLINT(runtime/references)
+void SubtractFromEntry(PositionTableEntry* value,
const PositionTableEntry& other) {
- value.code_offset -= other.code_offset;
- value.source_position -= other.source_position;
+ value->code_offset -= other.code_offset;
+ value->source_position -= other.source_position;
}
// Helper: Encode an integer.
template <typename T>
-void EncodeInt(std::vector<byte>& bytes, // NOLINT(runtime/references)
- T value) {
+void EncodeInt(std::vector<byte>* bytes, T value) {
using unsigned_type = typename std::make_unsigned<T>::type;
// Zig-zag encoding.
static const int kShift = sizeof(T) * kBitsPerByte - 1;
@@ -60,14 +59,13 @@ void EncodeInt(std::vector<byte>& bytes, // NOLINT(runtime/references)
more = encoded > ValueBits::kMax;
byte current =
MoreBit::encode(more) | ValueBits::encode(encoded & ValueBits::kMask);
- bytes.push_back(current);
+ bytes->push_back(current);
encoded >>= ValueBits::kSize;
} while (more);
}
// Encode a PositionTableEntry.
-void EncodeEntry(std::vector<byte>& bytes, // NOLINT(runtime/references)
- const PositionTableEntry& entry) {
+void EncodeEntry(std::vector<byte>* bytes, const PositionTableEntry& entry) {
// We only accept ascending code offsets.
DCHECK_GE(entry.code_offset, 0);
// Since code_offset is not negative, we use sign to encode is_statement.
@@ -115,17 +113,16 @@ Vector<const byte> VectorFromByteArray(ByteArray byte_array) {
}
#ifdef ENABLE_SLOW_DCHECKS
-void CheckTableEquals(
- std::vector<PositionTableEntry>& raw_entries, // NOLINT(runtime/references)
- SourcePositionTableIterator& encoded) { // NOLINT(runtime/references)
+void CheckTableEquals(const std::vector<PositionTableEntry>& raw_entries,
+ SourcePositionTableIterator* encoded) {
// Brute force testing: Record all positions and decode
// the entire table to verify they are identical.
auto raw = raw_entries.begin();
- for (; !encoded.done(); encoded.Advance(), raw++) {
+ for (; !encoded->done(); encoded->Advance(), raw++) {
DCHECK(raw != raw_entries.end());
- DCHECK_EQ(encoded.code_offset(), raw->code_offset);
- DCHECK_EQ(encoded.source_position().raw(), raw->source_position);
- DCHECK_EQ(encoded.is_statement(), raw->is_statement);
+ DCHECK_EQ(encoded->code_offset(), raw->code_offset);
+ DCHECK_EQ(encoded->source_position().raw(), raw->source_position);
+ DCHECK_EQ(encoded->is_statement(), raw->is_statement);
}
DCHECK(raw == raw_entries.end());
}
@@ -148,8 +145,8 @@ void SourcePositionTableBuilder::AddPosition(size_t code_offset,
void SourcePositionTableBuilder::AddEntry(const PositionTableEntry& entry) {
PositionTableEntry tmp(entry);
- SubtractFromEntry(tmp, previous_);
- EncodeEntry(bytes_, tmp);
+ SubtractFromEntry(&tmp, previous_);
+ EncodeEntry(&bytes_, tmp);
previous_ = entry;
#ifdef ENABLE_SLOW_DCHECKS
raw_entries_.push_back(entry);
@@ -169,7 +166,7 @@ Handle<ByteArray> SourcePositionTableBuilder::ToSourcePositionTable(
// Brute force testing: Record all positions and decode
// the entire table to verify they are identical.
SourcePositionTableIterator it(*table, SourcePositionTableIterator::kAll);
- CheckTableEquals(raw_entries_, it);
+ CheckTableEquals(raw_entries_, &it);
// No additional source positions after creating the table.
mode_ = OMIT_SOURCE_POSITIONS;
#endif
@@ -187,7 +184,7 @@ OwnedVector<byte> SourcePositionTableBuilder::ToSourcePositionTableVector() {
// the entire table to verify they are identical.
SourcePositionTableIterator it(table.as_vector(),
SourcePositionTableIterator::kAll);
- CheckTableEquals(raw_entries_, it);
+ CheckTableEquals(raw_entries_, &it);
// No additional source positions after creating the table.
mode_ = OMIT_SOURCE_POSITIONS;
#endif
@@ -232,7 +229,7 @@ void SourcePositionTableIterator::Advance() {
} else {
PositionTableEntry tmp;
DecodeEntry(bytes, &index_, &tmp);
- AddAndSetEntry(current_, tmp);
+ AddAndSetEntry(&current_, tmp);
SourcePosition p = source_position();
filter_satisfied = (filter_ == kAll) ||
(filter_ == kJavaScriptOnly && p.IsJavaScript()) ||
diff --git a/deps/v8/src/codegen/tnode.h b/deps/v8/src/codegen/tnode.h
new file mode 100644
index 0000000000..1f6c627929
--- /dev/null
+++ b/deps/v8/src/codegen/tnode.h
@@ -0,0 +1,374 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_TNODE_H_
+#define V8_CODEGEN_TNODE_H_
+
+#include "src/codegen/machine-type.h"
+
+namespace v8 {
+namespace internal {
+
+class HeapNumber;
+class BigInt;
+class Object;
+
+namespace compiler {
+
+class Node;
+
+}
+
+struct UntaggedT {};
+
+struct IntegralT : UntaggedT {};
+
+struct WordT : IntegralT {
+ static const MachineRepresentation kMachineRepresentation =
+ (kSystemPointerSize == 4) ? MachineRepresentation::kWord32
+ : MachineRepresentation::kWord64;
+};
+
+struct RawPtrT : WordT {
+ static constexpr MachineType kMachineType = MachineType::Pointer();
+};
+
+template <class To>
+struct RawPtr : RawPtrT {};
+
+struct Word32T : IntegralT {
+ static const MachineRepresentation kMachineRepresentation =
+ MachineRepresentation::kWord32;
+};
+struct Int32T : Word32T {
+ static constexpr MachineType kMachineType = MachineType::Int32();
+};
+struct Uint32T : Word32T {
+ static constexpr MachineType kMachineType = MachineType::Uint32();
+};
+struct Int16T : Int32T {
+ static constexpr MachineType kMachineType = MachineType::Int16();
+};
+struct Uint16T : Uint32T, Int32T {
+ static constexpr MachineType kMachineType = MachineType::Uint16();
+};
+struct Int8T : Int16T {
+ static constexpr MachineType kMachineType = MachineType::Int8();
+};
+struct Uint8T : Uint16T, Int16T {
+ static constexpr MachineType kMachineType = MachineType::Uint8();
+};
+
+struct Word64T : IntegralT {
+ static const MachineRepresentation kMachineRepresentation =
+ MachineRepresentation::kWord64;
+};
+struct Int64T : Word64T {
+ static constexpr MachineType kMachineType = MachineType::Int64();
+};
+struct Uint64T : Word64T {
+ static constexpr MachineType kMachineType = MachineType::Uint64();
+};
+
+struct IntPtrT : WordT {
+ static constexpr MachineType kMachineType = MachineType::IntPtr();
+};
+struct UintPtrT : WordT {
+ static constexpr MachineType kMachineType = MachineType::UintPtr();
+};
+
+struct Float32T : UntaggedT {
+ static const MachineRepresentation kMachineRepresentation =
+ MachineRepresentation::kFloat32;
+ static constexpr MachineType kMachineType = MachineType::Float32();
+};
+
+struct Float64T : UntaggedT {
+ static const MachineRepresentation kMachineRepresentation =
+ MachineRepresentation::kFloat64;
+ static constexpr MachineType kMachineType = MachineType::Float64();
+};
+
+#ifdef V8_COMPRESS_POINTERS
+using TaggedT = Int32T;
+#else
+using TaggedT = IntPtrT;
+#endif
+
+// Result of a comparison operation.
+struct BoolT : Word32T {};
+
+// Value type of a Turbofan node with two results.
+template <class T1, class T2>
+struct PairT {};
+
+inline constexpr MachineType CommonMachineType(MachineType type1,
+ MachineType type2) {
+ return (type1 == type2) ? type1
+ : ((type1.IsTagged() && type2.IsTagged())
+ ? MachineType::AnyTagged()
+ : MachineType::None());
+}
+
+template <class Type, class Enable = void>
+struct MachineTypeOf {
+ static constexpr MachineType value = Type::kMachineType;
+};
+
+template <class Type, class Enable>
+constexpr MachineType MachineTypeOf<Type, Enable>::value;
+
+template <>
+struct MachineTypeOf<Object> {
+ static constexpr MachineType value = MachineType::AnyTagged();
+};
+template <>
+struct MachineTypeOf<MaybeObject> {
+ static constexpr MachineType value = MachineType::AnyTagged();
+};
+template <>
+struct MachineTypeOf<Smi> {
+ static constexpr MachineType value = MachineType::TaggedSigned();
+};
+template <class HeapObjectSubtype>
+struct MachineTypeOf<HeapObjectSubtype,
+ typename std::enable_if<std::is_base_of<
+ HeapObject, HeapObjectSubtype>::value>::type> {
+ static constexpr MachineType value = MachineType::TaggedPointer();
+};
+
+template <class HeapObjectSubtype>
+constexpr MachineType MachineTypeOf<
+ HeapObjectSubtype, typename std::enable_if<std::is_base_of<
+ HeapObject, HeapObjectSubtype>::value>::type>::value;
+
+template <class Type, class Enable = void>
+struct MachineRepresentationOf {
+ static const MachineRepresentation value = Type::kMachineRepresentation;
+};
+template <class T>
+struct MachineRepresentationOf<
+ T, typename std::enable_if<std::is_base_of<Object, T>::value>::type> {
+ static const MachineRepresentation value =
+ MachineTypeOf<T>::value.representation();
+};
+template <class T>
+struct MachineRepresentationOf<
+ T, typename std::enable_if<std::is_base_of<MaybeObject, T>::value>::type> {
+ static const MachineRepresentation value =
+ MachineTypeOf<T>::value.representation();
+};
+template <>
+struct MachineRepresentationOf<ExternalReference> {
+ static const MachineRepresentation value = RawPtrT::kMachineRepresentation;
+};
+
+template <class T>
+struct is_valid_type_tag {
+ static const bool value = std::is_base_of<Object, T>::value ||
+ std::is_base_of<UntaggedT, T>::value ||
+ std::is_base_of<MaybeObject, T>::value ||
+ std::is_same<ExternalReference, T>::value;
+ static const bool is_tagged = std::is_base_of<Object, T>::value ||
+ std::is_base_of<MaybeObject, T>::value;
+};
+
+template <class T1, class T2>
+struct is_valid_type_tag<PairT<T1, T2>> {
+ static const bool value =
+ is_valid_type_tag<T1>::value && is_valid_type_tag<T2>::value;
+ static const bool is_tagged = false;
+};
+
+template <class T1, class T2>
+struct UnionT;
+
+template <class T1, class T2>
+struct is_valid_type_tag<UnionT<T1, T2>> {
+ static const bool is_tagged =
+ is_valid_type_tag<T1>::is_tagged && is_valid_type_tag<T2>::is_tagged;
+ static const bool value = is_tagged;
+};
+
+template <class T1, class T2>
+struct UnionT {
+ static constexpr MachineType kMachineType =
+ CommonMachineType(MachineTypeOf<T1>::value, MachineTypeOf<T2>::value);
+ static const MachineRepresentation kMachineRepresentation =
+ kMachineType.representation();
+ static_assert(kMachineRepresentation != MachineRepresentation::kNone,
+ "no common representation");
+ static_assert(is_valid_type_tag<T1>::is_tagged &&
+ is_valid_type_tag<T2>::is_tagged,
+ "union types are only possible for tagged values");
+};
+
+using AnyTaggedT = UnionT<Object, MaybeObject>;
+using Number = UnionT<Smi, HeapNumber>;
+using Numeric = UnionT<Number, BigInt>;
+
+// A pointer to a builtin function, used by Torque's function pointers.
+using BuiltinPtr = Smi;
+
+class int31_t {
+ public:
+ int31_t() : value_(0) {}
+ int31_t(int value) : value_(value) { // NOLINT(runtime/explicit)
+ DCHECK_EQ((value & 0x80000000) != 0, (value & 0x40000000) != 0);
+ }
+ int31_t& operator=(int value) {
+ DCHECK_EQ((value & 0x80000000) != 0, (value & 0x40000000) != 0);
+ value_ = value;
+ return *this;
+ }
+ int32_t value() const { return value_; }
+ operator int32_t() const { return value_; }
+
+ private:
+ int32_t value_;
+};
+
+template <class T, class U>
+struct is_subtype {
+ static const bool value = std::is_base_of<U, T>::value;
+};
+template <class T1, class T2, class U>
+struct is_subtype<UnionT<T1, T2>, U> {
+ static const bool value =
+ is_subtype<T1, U>::value && is_subtype<T2, U>::value;
+};
+template <class T, class U1, class U2>
+struct is_subtype<T, UnionT<U1, U2>> {
+ static const bool value =
+ is_subtype<T, U1>::value || is_subtype<T, U2>::value;
+};
+template <class T1, class T2, class U1, class U2>
+struct is_subtype<UnionT<T1, T2>, UnionT<U1, U2>> {
+ static const bool value =
+ (is_subtype<T1, U1>::value || is_subtype<T1, U2>::value) &&
+ (is_subtype<T2, U1>::value || is_subtype<T2, U2>::value);
+};
+
+template <class T, class U>
+struct types_have_common_values {
+ static const bool value = is_subtype<T, U>::value || is_subtype<U, T>::value;
+};
+template <class U>
+struct types_have_common_values<BoolT, U> {
+ static const bool value = types_have_common_values<Word32T, U>::value;
+};
+template <class U>
+struct types_have_common_values<Uint32T, U> {
+ static const bool value = types_have_common_values<Word32T, U>::value;
+};
+template <class U>
+struct types_have_common_values<Int32T, U> {
+ static const bool value = types_have_common_values<Word32T, U>::value;
+};
+template <class U>
+struct types_have_common_values<Uint64T, U> {
+ static const bool value = types_have_common_values<Word64T, U>::value;
+};
+template <class U>
+struct types_have_common_values<Int64T, U> {
+ static const bool value = types_have_common_values<Word64T, U>::value;
+};
+template <class U>
+struct types_have_common_values<IntPtrT, U> {
+ static const bool value = types_have_common_values<WordT, U>::value;
+};
+template <class U>
+struct types_have_common_values<UintPtrT, U> {
+ static const bool value = types_have_common_values<WordT, U>::value;
+};
+template <class T1, class T2, class U>
+struct types_have_common_values<UnionT<T1, T2>, U> {
+ static const bool value = types_have_common_values<T1, U>::value ||
+ types_have_common_values<T2, U>::value;
+};
+
+template <class T, class U1, class U2>
+struct types_have_common_values<T, UnionT<U1, U2>> {
+ static const bool value = types_have_common_values<T, U1>::value ||
+ types_have_common_values<T, U2>::value;
+};
+template <class T1, class T2, class U1, class U2>
+struct types_have_common_values<UnionT<T1, T2>, UnionT<U1, U2>> {
+ static const bool value = types_have_common_values<T1, U1>::value ||
+ types_have_common_values<T1, U2>::value ||
+ types_have_common_values<T2, U1>::value ||
+ types_have_common_values<T2, U2>::value;
+};
+
+template <class T>
+struct types_have_common_values<T, MaybeObject> {
+ static const bool value = types_have_common_values<T, Object>::value;
+};
+
+template <class T>
+struct types_have_common_values<MaybeObject, T> {
+ static const bool value = types_have_common_values<Object, T>::value;
+};
+
+// TNode<T> is an SSA value with the static type tag T, which is one of the
+// following:
+// - a subclass of internal::Object represents a tagged type
+// - a subclass of internal::UntaggedT represents an untagged type
+// - ExternalReference
+// - PairT<T1, T2> for an operation returning two values, with types T1
+// and T2
+// - UnionT<T1, T2> represents either a value of type T1 or of type T2.
+template <class T>
+class TNode {
+ public:
+ template <class U,
+ typename std::enable_if<is_subtype<U, T>::value, int>::type = 0>
+ TNode(const TNode<U>& other) : node_(other) {
+ LazyTemplateChecks();
+ }
+ TNode() : TNode(nullptr) {}
+
+ TNode operator=(TNode other) {
+ DCHECK_NOT_NULL(other.node_);
+ node_ = other.node_;
+ return *this;
+ }
+
+ bool is_null() { return node_ == nullptr; }
+
+ operator compiler::Node*() const { return node_; }
+
+ static TNode UncheckedCast(compiler::Node* node) { return TNode(node); }
+
+ protected:
+ explicit TNode(compiler::Node* node) : node_(node) { LazyTemplateChecks(); }
+
+ private:
+ // These checks shouldn't be checked before TNode is actually used.
+ void LazyTemplateChecks() {
+ static_assert(is_valid_type_tag<T>::value, "invalid type tag");
+ }
+
+ compiler::Node* node_;
+};
+
+// SloppyTNode<T> is a variant of TNode<T> and allows implicit casts from
+// Node*. It is intended for function arguments as long as some call sites
+// still use untyped Node* arguments.
+// TODO(tebbi): Delete this class once transition is finished.
+template <class T>
+class SloppyTNode : public TNode<T> {
+ public:
+ SloppyTNode(compiler::Node* node) // NOLINT(runtime/explicit)
+ : TNode<T>(node) {}
+ template <class U, typename std::enable_if<is_subtype<U, T>::value,
+ int>::type = 0>
+ SloppyTNode(const TNode<U>& other) // NOLINT(runtime/explicit)
+ : TNode<T>(other) {}
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODEGEN_TNODE_H_
diff --git a/deps/v8/src/codegen/turbo-assembler.h b/deps/v8/src/codegen/turbo-assembler.h
index 3a3e65a41e..c0f833b652 100644
--- a/deps/v8/src/codegen/turbo-assembler.h
+++ b/deps/v8/src/codegen/turbo-assembler.h
@@ -5,6 +5,8 @@
#ifndef V8_CODEGEN_TURBO_ASSEMBLER_H_
#define V8_CODEGEN_TURBO_ASSEMBLER_H_
+#include <memory>
+
#include "src/base/template-utils.h"
#include "src/builtins/builtins.h"
#include "src/codegen/assembler-arch.h"
@@ -100,7 +102,7 @@ class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler {
static bool IsAddressableThroughRootRegister(
Isolate* isolate, const ExternalReference& reference);
-#if V8_OS_WIN
+#if V8_TARGET_OS_WIN
// Minimum page size. We must touch memory once per page when expanding the
// stack, to avoid access violations.
static constexpr int kStackPageSize = 4 * KB;
diff --git a/deps/v8/src/codegen/x64/assembler-x64-inl.h b/deps/v8/src/codegen/x64/assembler-x64-inl.h
index f5d0c0ffcf..d8457d9d3e 100644
--- a/deps/v8/src/codegen/x64/assembler-x64-inl.h
+++ b/deps/v8/src/codegen/x64/assembler-x64-inl.h
@@ -218,6 +218,7 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) {
void Assembler::set_target_address_at(Address pc, Address constant_pool,
Address target,
ICacheFlushMode icache_flush_mode) {
+ DCHECK(is_int32(target - pc - 4));
WriteUnalignedValue(pc, static_cast<int32_t>(target - pc - 4));
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
FlushInstructionCache(pc, sizeof(int32_t));
@@ -363,7 +364,8 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target,
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
FlushInstructionCache(pc_, sizeof(Address));
}
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) {
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() &&
+ !FLAG_disable_write_barriers) {
WriteBarrierForCode(host(), this, target);
}
}
diff --git a/deps/v8/src/codegen/x64/assembler-x64.cc b/deps/v8/src/codegen/x64/assembler-x64.cc
index 1783da700b..16791a6453 100644
--- a/deps/v8/src/codegen/x64/assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/assembler-x64.cc
@@ -327,8 +327,9 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
Address pc = reinterpret_cast<Address>(buffer_start_) + request.offset();
switch (request.kind()) {
case HeapObjectRequest::kHeapNumber: {
- Handle<HeapNumber> object = isolate->factory()->NewHeapNumber(
- request.heap_number(), AllocationType::kOld);
+ Handle<HeapNumber> object =
+ isolate->factory()->NewHeapNumber<AllocationType::kOld>(
+ request.heap_number());
WriteUnalignedValue(pc, object);
break;
}
@@ -1777,6 +1778,13 @@ void Assembler::emit_mov(Register dst, Immediate64 value, int size) {
}
}
+void Assembler::movq_imm64(Register dst, int64_t value) {
+ EnsureSpace ensure_space(this);
+ emit_rex(dst, kInt64Size);
+ emit(0xB8 | dst.low_bits());
+ emitq(static_cast<uint64_t>(value));
+}
+
void Assembler::movq_heap_number(Register dst, double value) {
EnsureSpace ensure_space(this);
emit_rex(dst, kInt64Size);
@@ -1963,6 +1971,13 @@ void Assembler::emit_repmovs(int size) {
emit(0xA5);
}
+void Assembler::repstosq() {
+ EnsureSpace ensure_space(this);
+ emit(0xF3);
+ emit_rex_64();
+ emit(0xAB);
+}
+
void Assembler::mull(Register src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(src);
@@ -4099,6 +4114,42 @@ void Assembler::vfmass(byte op, XMMRegister dst, XMMRegister src1,
emit_sse_operand(dst, src2);
}
+void Assembler::vfmaps(byte op, XMMRegister dst, XMMRegister src1,
+ XMMRegister src2) {
+ DCHECK(IsEnabled(FMA3));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(dst, src1, src2, kL128, k66, k0F38, kW0);
+ emit(op);
+ emit_sse_operand(dst, src2);
+}
+
+void Assembler::vfmaps(byte op, XMMRegister dst, XMMRegister src1,
+ Operand src2) {
+ DCHECK(IsEnabled(FMA3));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(dst, src1, src2, kL128, k66, k0F38, kW0);
+ emit(op);
+ emit_sse_operand(dst, src2);
+}
+
+void Assembler::vfmapd(byte op, XMMRegister dst, XMMRegister src1,
+ XMMRegister src2) {
+ DCHECK(IsEnabled(FMA3));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(dst, src1, src2, kL128, k66, k0F38, kW1);
+ emit(op);
+ emit_sse_operand(dst, src2);
+}
+
+void Assembler::vfmapd(byte op, XMMRegister dst, XMMRegister src1,
+ Operand src2) {
+ DCHECK(IsEnabled(FMA3));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(dst, src1, src2, kL128, k66, k0F38, kW1);
+ emit(op);
+ emit_sse_operand(dst, src2);
+}
+
void Assembler::vmovd(XMMRegister dst, Register src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
diff --git a/deps/v8/src/codegen/x64/assembler-x64.h b/deps/v8/src/codegen/x64/assembler-x64.h
index 7c69b4c473..74cfd0ab85 100644
--- a/deps/v8/src/codegen/x64/assembler-x64.h
+++ b/deps/v8/src/codegen/x64/assembler-x64.h
@@ -39,6 +39,7 @@
#include <deque>
#include <map>
+#include <memory>
#include <vector>
#include "src/codegen/assembler.h"
@@ -155,7 +156,9 @@ enum ScaleFactor : int8_t {
times_4 = 2,
times_8 = 3,
times_int_size = times_4,
- times_system_pointer_size = (kSystemPointerSize == 8) ? times_8 : times_4,
+
+ times_half_system_pointer_size = times_4,
+ times_system_pointer_size = times_8,
times_tagged_size = (kTaggedSize == 8) ? times_8 : times_4,
};
@@ -513,12 +516,16 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void movq_string(Register dst, const StringConstantBase* str);
- // Loads a 64-bit immediate into a register.
+ // Loads a 64-bit immediate into a register, potentially using the constant
+ // pool.
void movq(Register dst, int64_t value) { movq(dst, Immediate64(value)); }
void movq(Register dst, uint64_t value) {
movq(dst, Immediate64(static_cast<int64_t>(value)));
}
+ // Loads a 64-bit immediate into a register without using the constant pool.
+ void movq_imm64(Register dst, int64_t value);
+
void movsxbl(Register dst, Register src);
void movsxbl(Register dst, Operand src);
void movsxbq(Register dst, Register src);
@@ -531,12 +538,14 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void movsxlq(Register dst, Operand src);
// Repeated moves.
-
void repmovsb();
void repmovsw();
void repmovsl() { emit_repmovs(kInt32Size); }
void repmovsq() { emit_repmovs(kInt64Size); }
+ // Repeated store of quadwords (fill RCX quadwords at [RDI] with RAX).
+ void repstosq();
+
// Instruction to load from an immediate 64-bit pointer into RAX.
void load_rax(Address value, RelocInfo::Mode rmode);
void load_rax(ExternalReference ext);
@@ -1295,6 +1304,36 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vfmass(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
void vfmass(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
+ void vfmadd231ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+ vfmaps(0xb8, dst, src1, src2);
+ }
+ void vfmadd231ps(XMMRegister dst, XMMRegister src1, Operand src2) {
+ vfmaps(0xb8, dst, src1, src2);
+ }
+ void vfnmadd231ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+ vfmaps(0xbc, dst, src1, src2);
+ }
+ void vfnmadd231ps(XMMRegister dst, XMMRegister src1, Operand src2) {
+ vfmaps(0xbc, dst, src1, src2);
+ }
+ void vfmaps(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
+ void vfmaps(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
+
+ void vfmadd231pd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+ vfmapd(0xb8, dst, src1, src2);
+ }
+ void vfmadd231pd(XMMRegister dst, XMMRegister src1, Operand src2) {
+ vfmapd(0xb8, dst, src1, src2);
+ }
+ void vfnmadd231pd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+ vfmapd(0xbc, dst, src1, src2);
+ }
+ void vfnmadd231pd(XMMRegister dst, XMMRegister src1, Operand src2) {
+ vfmapd(0xbc, dst, src1, src2);
+ }
+ void vfmapd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
+ void vfmapd(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
+
void vmovd(XMMRegister dst, Register src);
void vmovd(XMMRegister dst, Operand src);
void vmovd(Register dst, XMMRegister src);
@@ -1330,7 +1369,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
impl(opcode, dst, src1, src2); \
}
- AVX_SP_3(vsqrt, 0x51)
+ // vsqrtpd is defined by sqrtpd in SSE2_INSTRUCTION_LIST
+ AVX_S_3(vsqrt, 0x51)
+ AVX_3(vsqrtps, 0x51, vps)
AVX_S_3(vadd, 0x58)
AVX_S_3(vsub, 0x5c)
AVX_S_3(vmul, 0x59)
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.cc b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
index 4deeb1bc02..d02b95b38e 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
@@ -218,45 +218,45 @@ void TurboAssembler::CompareRoot(Operand with, RootIndex index) {
void TurboAssembler::LoadTaggedPointerField(Register destination,
Operand field_operand) {
-#ifdef V8_COMPRESS_POINTERS
- DecompressTaggedPointer(destination, field_operand);
-#else
- mov_tagged(destination, field_operand);
-#endif
+ if (COMPRESS_POINTERS_BOOL) {
+ DecompressTaggedPointer(destination, field_operand);
+ } else {
+ mov_tagged(destination, field_operand);
+ }
}
void TurboAssembler::LoadAnyTaggedField(Register destination,
Operand field_operand,
Register scratch) {
-#ifdef V8_COMPRESS_POINTERS
- DecompressAnyTagged(destination, field_operand, scratch);
-#else
- mov_tagged(destination, field_operand);
-#endif
+ if (COMPRESS_POINTERS_BOOL) {
+ DecompressAnyTagged(destination, field_operand, scratch);
+ } else {
+ mov_tagged(destination, field_operand);
+ }
}
void TurboAssembler::PushTaggedPointerField(Operand field_operand,
Register scratch) {
-#ifdef V8_COMPRESS_POINTERS
- DCHECK(!field_operand.AddressUsesRegister(scratch));
- DecompressTaggedPointer(scratch, field_operand);
- Push(scratch);
-#else
- Push(field_operand);
-#endif
+ if (COMPRESS_POINTERS_BOOL) {
+ DCHECK(!field_operand.AddressUsesRegister(scratch));
+ DecompressTaggedPointer(scratch, field_operand);
+ Push(scratch);
+ } else {
+ Push(field_operand);
+ }
}
void TurboAssembler::PushTaggedAnyField(Operand field_operand,
Register scratch1, Register scratch2) {
-#ifdef V8_COMPRESS_POINTERS
- DCHECK(!AreAliased(scratch1, scratch2));
- DCHECK(!field_operand.AddressUsesRegister(scratch1));
- DCHECK(!field_operand.AddressUsesRegister(scratch2));
- DecompressAnyTagged(scratch1, field_operand, scratch2);
- Push(scratch1);
-#else
- Push(field_operand);
-#endif
+ if (COMPRESS_POINTERS_BOOL) {
+ DCHECK(!AreAliased(scratch1, scratch2));
+ DCHECK(!field_operand.AddressUsesRegister(scratch1));
+ DCHECK(!field_operand.AddressUsesRegister(scratch2));
+ DecompressAnyTagged(scratch1, field_operand, scratch2);
+ Push(scratch1);
+ } else {
+ Push(field_operand);
+ }
}
void TurboAssembler::SmiUntagField(Register dst, Operand src) {
@@ -265,44 +265,40 @@ void TurboAssembler::SmiUntagField(Register dst, Operand src) {
void TurboAssembler::StoreTaggedField(Operand dst_field_operand,
Immediate value) {
-#ifdef V8_COMPRESS_POINTERS
- RecordComment("[ StoreTagged");
- movl(dst_field_operand, value);
- RecordComment("]");
-#else
- movq(dst_field_operand, value);
-#endif
+ if (COMPRESS_POINTERS_BOOL) {
+ movl(dst_field_operand, value);
+ } else {
+ movq(dst_field_operand, value);
+ }
}
void TurboAssembler::StoreTaggedField(Operand dst_field_operand,
Register value) {
-#ifdef V8_COMPRESS_POINTERS
- RecordComment("[ StoreTagged");
- movl(dst_field_operand, value);
- RecordComment("]");
-#else
- movq(dst_field_operand, value);
-#endif
+ if (COMPRESS_POINTERS_BOOL) {
+ movl(dst_field_operand, value);
+ } else {
+ movq(dst_field_operand, value);
+ }
}
void TurboAssembler::DecompressTaggedSigned(Register destination,
Operand field_operand) {
RecordComment("[ DecompressTaggedSigned");
- movsxlq(destination, field_operand);
+ movl(destination, field_operand);
RecordComment("]");
}
void TurboAssembler::DecompressTaggedSigned(Register destination,
Register source) {
RecordComment("[ DecompressTaggedSigned");
- movsxlq(destination, source);
+ movl(destination, source);
RecordComment("]");
}
void TurboAssembler::DecompressTaggedPointer(Register destination,
Operand field_operand) {
RecordComment("[ DecompressTaggedPointer");
- movsxlq(destination, field_operand);
+ movl(destination, field_operand);
addq(destination, kRootRegister);
RecordComment("]");
}
@@ -310,30 +306,14 @@ void TurboAssembler::DecompressTaggedPointer(Register destination,
void TurboAssembler::DecompressTaggedPointer(Register destination,
Register source) {
RecordComment("[ DecompressTaggedPointer");
- movsxlq(destination, source);
+ movl(destination, source);
addq(destination, kRootRegister);
RecordComment("]");
}
void TurboAssembler::DecompressRegisterAnyTagged(Register destination,
Register scratch) {
- if (kUseBranchlessPtrDecompressionInGeneratedCode) {
- // Branchlessly compute |masked_root|:
- // masked_root = HAS_SMI_TAG(destination) ? 0 : kRootRegister;
- STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag < 32));
- Register masked_root = scratch;
- xorq(masked_root, masked_root);
- Condition smi = CheckSmi(destination);
- cmovq(NegateCondition(smi), masked_root, kRootRegister);
- // Now this add operation will either leave the value unchanged if it is
- // a smi or add the isolate root if it is a heap object.
- addq(destination, masked_root);
- } else {
- Label done;
- JumpIfSmi(destination, &done);
- addq(destination, kRootRegister);
- bind(&done);
- }
+ addq(destination, kRootRegister);
}
void TurboAssembler::DecompressAnyTagged(Register destination,
@@ -341,7 +321,7 @@ void TurboAssembler::DecompressAnyTagged(Register destination,
Register scratch) {
DCHECK(!AreAliased(destination, scratch));
RecordComment("[ DecompressAnyTagged");
- movsxlq(destination, field_operand);
+ movl(destination, field_operand);
DecompressRegisterAnyTagged(destination, scratch);
RecordComment("]");
}
@@ -350,7 +330,7 @@ void TurboAssembler::DecompressAnyTagged(Register destination, Register source,
Register scratch) {
DCHECK(!AreAliased(destination, scratch));
RecordComment("[ DecompressAnyTagged");
- movsxlq(destination, source);
+ movl(destination, source);
DecompressRegisterAnyTagged(destination, scratch);
RecordComment("]");
}
@@ -1109,7 +1089,11 @@ Register TurboAssembler::GetSmiConstant(Smi source) {
xorl(kScratchRegister, kScratchRegister);
return kScratchRegister;
}
- Move(kScratchRegister, source);
+ if (SmiValuesAre32Bits()) {
+ Move(kScratchRegister, source);
+ } else {
+ movl(kScratchRegister, Immediate(source));
+ }
return kScratchRegister;
}
@@ -1133,20 +1117,47 @@ void TurboAssembler::Move(Register dst, ExternalReference ext) {
movq(dst, Immediate64(ext.address(), RelocInfo::EXTERNAL_REFERENCE));
}
-void MacroAssembler::SmiTag(Register dst, Register src) {
+void MacroAssembler::SmiTag(Register reg) {
STATIC_ASSERT(kSmiTag == 0);
- if (dst != src) {
+ DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
+ if (COMPRESS_POINTERS_BOOL) {
+ shll(reg, Immediate(kSmiShift));
+ } else {
+ shlq(reg, Immediate(kSmiShift));
+ }
+}
+
+void MacroAssembler::SmiTag(Register dst, Register src) {
+ DCHECK(dst != src);
+ if (COMPRESS_POINTERS_BOOL) {
+ movl(dst, src);
+ } else {
movq(dst, src);
}
+ SmiTag(dst);
+}
+
+void TurboAssembler::SmiUntag(Register reg) {
+ STATIC_ASSERT(kSmiTag == 0);
DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
- shlq(dst, Immediate(kSmiShift));
+ // TODO(v8:7703): Is there a way to avoid this sign extension when pointer
+ // compression is enabled?
+ if (COMPRESS_POINTERS_BOOL) {
+ movsxlq(reg, reg);
+ }
+ sarq(reg, Immediate(kSmiShift));
}
void TurboAssembler::SmiUntag(Register dst, Register src) {
- STATIC_ASSERT(kSmiTag == 0);
- if (dst != src) {
+ DCHECK(dst != src);
+ if (COMPRESS_POINTERS_BOOL) {
+ movsxlq(dst, src);
+ } else {
movq(dst, src);
}
+ // TODO(v8:7703): Call SmiUntag(reg) if we can find a way to avoid the extra
+ // mov when pointer compression is enabled.
+ STATIC_ASSERT(kSmiTag == 0);
DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
sarq(dst, Immediate(kSmiShift));
}
@@ -1158,12 +1169,13 @@ void TurboAssembler::SmiUntag(Register dst, Operand src) {
movsxlq(dst, dst);
} else {
DCHECK(SmiValuesAre31Bits());
-#ifdef V8_COMPRESS_POINTERS
- movsxlq(dst, src);
-#else
- movq(dst, src);
-#endif
- sarq(dst, Immediate(kSmiShift));
+ if (COMPRESS_POINTERS_BOOL) {
+ movsxlq(dst, src);
+ sarq(dst, Immediate(kSmiShift));
+ } else {
+ movq(dst, src);
+ sarq(dst, Immediate(kSmiShift));
+ }
}
}
@@ -1283,12 +1295,9 @@ SmiIndex MacroAssembler::SmiToIndex(Register dst, Register src, int shift) {
return SmiIndex(dst, times_1);
} else {
DCHECK(SmiValuesAre31Bits());
- if (dst != src) {
- mov_tagged(dst, src);
- }
// We have to sign extend the index register to 64-bit as the SMI might
// be negative.
- movsxlq(dst, dst);
+ movsxlq(dst, src);
if (shift < kSmiShift) {
sarq(dst, Immediate(kSmiShift - shift));
} else if (shift != kSmiShift) {
@@ -1423,7 +1432,6 @@ void MacroAssembler::Negpd(XMMRegister dst) {
}
void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
- AllowDeferredHandleDereference smi_check;
if (source->IsSmi()) {
Cmp(dst, Smi::cast(*source));
} else {
@@ -1433,7 +1441,6 @@ void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
}
void MacroAssembler::Cmp(Operand dst, Handle<Object> source) {
- AllowDeferredHandleDereference smi_check;
if (source->IsSmi()) {
Cmp(dst, Smi::cast(*source));
} else {
@@ -1463,6 +1470,8 @@ void TurboAssembler::Move(Register result, Handle<HeapObject> object,
RelocInfo::Mode rmode) {
if (FLAG_embedded_builtins) {
if (root_array_available_ && options().isolate_independent_code) {
+ // TODO(v8:9706): Fix-it! This load will always uncompress the value
+ // even when we are loading a compressed embedded object.
IndirectLoadConstant(result, object);
return;
}
@@ -1605,26 +1614,20 @@ void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
}
Operand TurboAssembler::EntryFromBuiltinIndexAsOperand(Register builtin_index) {
-#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
- STATIC_ASSERT(kSmiShiftSize == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
-
- // The builtin_index register contains the builtin index as a Smi.
- // Untagging is folded into the indexing operand below (we use times_4 instead
- // of times_8 since smis are already shifted by one).
- return Operand(kRootRegister, builtin_index, times_4,
- IsolateData::builtin_entry_table_offset());
-#else // defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
- STATIC_ASSERT(kSmiShiftSize == 31);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
+ if (SmiValuesAre32Bits()) {
+ // The builtin_index register contains the builtin index as a Smi.
+ SmiUntag(builtin_index);
+ return Operand(kRootRegister, builtin_index, times_system_pointer_size,
+ IsolateData::builtin_entry_table_offset());
+ } else {
+ DCHECK(SmiValuesAre31Bits());
- // The builtin_index register contains the builtin index as a Smi.
- SmiUntag(builtin_index, builtin_index);
- return Operand(kRootRegister, builtin_index, times_8,
- IsolateData::builtin_entry_table_offset());
-#endif // defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
+ // The builtin_index register contains the builtin index as a Smi.
+ // Untagging is folded into the indexing operand below (we use
+ // times_half_system_pointer_size since smis are already shifted by one).
+ return Operand(kRootRegister, builtin_index, times_half_system_pointer_size,
+ IsolateData::builtin_entry_table_offset());
+ }
}
void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
@@ -1739,7 +1742,11 @@ void TurboAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) {
Movd(dst, src);
return;
}
- if (CpuFeatures::IsSupported(SSE4_1)) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpextrd(dst, src, imm8);
+ return;
+ } else if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatureScope sse_scope(this, SSE4_1);
pextrd(dst, src, imm8);
return;
@@ -1749,8 +1756,38 @@ void TurboAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) {
shrq(dst, Immediate(32));
}
+void TurboAssembler::Pextrw(Register dst, XMMRegister src, int8_t imm8) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpextrw(dst, src, imm8);
+ return;
+ } else {
+ DCHECK(CpuFeatures::IsSupported(SSE4_1));
+ CpuFeatureScope sse_scope(this, SSE4_1);
+ pextrw(dst, src, imm8);
+ return;
+ }
+}
+
+void TurboAssembler::Pextrb(Register dst, XMMRegister src, int8_t imm8) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpextrb(dst, src, imm8);
+ return;
+ } else {
+ DCHECK(CpuFeatures::IsSupported(SSE4_1));
+ CpuFeatureScope sse_scope(this, SSE4_1);
+ pextrb(dst, src, imm8);
+ return;
+ }
+}
+
void TurboAssembler::Pinsrd(XMMRegister dst, Register src, int8_t imm8) {
- if (CpuFeatures::IsSupported(SSE4_1)) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpinsrd(dst, dst, src, imm8);
+ return;
+ } else if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatureScope sse_scope(this, SSE4_1);
pinsrd(dst, src, imm8);
return;
@@ -1765,7 +1802,11 @@ void TurboAssembler::Pinsrd(XMMRegister dst, Register src, int8_t imm8) {
}
void TurboAssembler::Pinsrd(XMMRegister dst, Operand src, int8_t imm8) {
- if (CpuFeatures::IsSupported(SSE4_1)) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpinsrd(dst, dst, src, imm8);
+ return;
+ } else if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatureScope sse_scope(this, SSE4_1);
pinsrd(dst, src, imm8);
return;
@@ -1779,6 +1820,56 @@ void TurboAssembler::Pinsrd(XMMRegister dst, Operand src, int8_t imm8) {
}
}
+void TurboAssembler::Pinsrw(XMMRegister dst, Register src, int8_t imm8) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpinsrw(dst, dst, src, imm8);
+ return;
+ } else {
+ DCHECK(CpuFeatures::IsSupported(SSE4_1));
+ CpuFeatureScope sse_scope(this, SSE4_1);
+ pinsrw(dst, src, imm8);
+ return;
+ }
+}
+
+void TurboAssembler::Pinsrw(XMMRegister dst, Operand src, int8_t imm8) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpinsrw(dst, dst, src, imm8);
+ return;
+ } else {
+ CpuFeatureScope sse_scope(this, SSE4_1);
+ pinsrw(dst, src, imm8);
+ return;
+ }
+}
+
+void TurboAssembler::Pinsrb(XMMRegister dst, Register src, int8_t imm8) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpinsrb(dst, dst, src, imm8);
+ return;
+ } else {
+ DCHECK(CpuFeatures::IsSupported(SSE4_1));
+ CpuFeatureScope sse_scope(this, SSE4_1);
+ pinsrb(dst, src, imm8);
+ return;
+ }
+}
+
+void TurboAssembler::Pinsrb(XMMRegister dst, Operand src, int8_t imm8) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpinsrb(dst, dst, src, imm8);
+ return;
+ } else {
+ CpuFeatureScope sse_scope(this, SSE4_1);
+ pinsrb(dst, src, imm8);
+ return;
+ }
+}
+
void TurboAssembler::Psllq(XMMRegister dst, byte imm8) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
@@ -1819,6 +1910,16 @@ void TurboAssembler::Psrld(XMMRegister dst, byte imm8) {
}
}
+void TurboAssembler::Pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpshufd(dst, src, shuffle);
+ } else {
+ DCHECK(!IsEnabled(AVX));
+ pshufd(dst, src, shuffle);
+ }
+}
+
void TurboAssembler::Lzcntl(Register dst, Register src) {
if (CpuFeatures::IsSupported(LZCNT)) {
CpuFeatureScope scope(this, LZCNT);
@@ -2278,7 +2379,16 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
DCHECK_IMPLIES(new_target.is_valid(), new_target == rdx);
// On function call, call into the debugger if necessary.
- CheckDebugHook(function, new_target, expected, actual);
+ Label debug_hook, continue_after_hook;
+ {
+ ExternalReference debug_hook_active =
+ ExternalReference::debug_hook_on_function_call_address(isolate());
+ Operand debug_hook_active_operand =
+ ExternalReferenceAsOperand(debug_hook_active);
+ cmpb(debug_hook_active_operand, Immediate(0));
+ j(not_equal, &debug_hook, Label::kNear);
+ }
+ bind(&continue_after_hook);
// Clear the new.target register if not given.
if (!new_target.is_valid()) {
@@ -2302,8 +2412,15 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
DCHECK(flag == JUMP_FUNCTION);
JumpCodeObject(rcx);
}
- bind(&done);
}
+ jmp(&done, Label::kNear);
+
+ // Deferred debug hook.
+ bind(&debug_hook);
+ CallDebugOnFunctionCall(function, new_target, expected, actual);
+ jmp(&continue_after_hook, Label::kNear);
+
+ bind(&done);
}
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
@@ -2368,50 +2485,38 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
}
-void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
- const ParameterCount& expected,
- const ParameterCount& actual) {
- Label skip_hook;
- ExternalReference debug_hook_active =
- ExternalReference::debug_hook_on_function_call_address(isolate());
- Operand debug_hook_active_operand =
- ExternalReferenceAsOperand(debug_hook_active);
- cmpb(debug_hook_active_operand, Immediate(0));
- j(equal, &skip_hook);
-
- {
- FrameScope frame(this,
- has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
- if (expected.is_reg()) {
- SmiTag(expected.reg(), expected.reg());
- Push(expected.reg());
- }
- if (actual.is_reg()) {
- SmiTag(actual.reg(), actual.reg());
- Push(actual.reg());
- SmiUntag(actual.reg(), actual.reg());
- }
- if (new_target.is_valid()) {
- Push(new_target);
- }
- Push(fun);
- Push(fun);
- Push(StackArgumentsAccessor(rbp, actual).GetReceiverOperand());
- CallRuntime(Runtime::kDebugOnFunctionCall);
- Pop(fun);
- if (new_target.is_valid()) {
- Pop(new_target);
- }
- if (actual.is_reg()) {
- Pop(actual.reg());
- SmiUntag(actual.reg(), actual.reg());
- }
- if (expected.is_reg()) {
- Pop(expected.reg());
- SmiUntag(expected.reg(), expected.reg());
- }
+void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual) {
+ FrameScope frame(this, has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ if (expected.is_reg()) {
+ SmiTag(expected.reg());
+ Push(expected.reg());
+ }
+ if (actual.is_reg()) {
+ SmiTag(actual.reg());
+ Push(actual.reg());
+ SmiUntag(actual.reg());
+ }
+ if (new_target.is_valid()) {
+ Push(new_target);
+ }
+ Push(fun);
+ Push(fun);
+ Push(StackArgumentsAccessor(rbp, actual).GetReceiverOperand());
+ CallRuntime(Runtime::kDebugOnFunctionCall);
+ Pop(fun);
+ if (new_target.is_valid()) {
+ Pop(new_target);
+ }
+ if (actual.is_reg()) {
+ Pop(actual.reg());
+ SmiUntag(actual.reg());
+ }
+ if (expected.is_reg()) {
+ Pop(expected.reg());
+ SmiUntag(expected.reg());
}
- bind(&skip_hook);
}
void TurboAssembler::StubPrologue(StackFrame::Type type) {
@@ -2443,7 +2548,7 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) {
popq(rbp);
}
-#ifdef V8_OS_WIN
+#ifdef V8_TARGET_OS_WIN
void TurboAssembler::AllocateStackSpace(Register bytes_scratch) {
// In windows, we cannot increment the stack size by more than one page
// (minimum page size is 4KB) without accessing at least one byte on the
@@ -2511,7 +2616,7 @@ void MacroAssembler::EnterExitFramePrologue(bool save_rax,
void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
bool save_doubles) {
-#ifdef _WIN64
+#ifdef V8_TARGET_OS_WIN
const int kShadowSpace = 4;
arg_stack_space += kShadowSpace;
#endif
@@ -2615,7 +2720,7 @@ void MacroAssembler::LeaveExitFrameEpilogue() {
movq(c_entry_fp_operand, Immediate(0));
}
-#ifdef _WIN64
+#ifdef V8_TARGET_OS_WIN
static const int kRegisterPassedArguments = 4;
#else
static const int kRegisterPassedArguments = 6;
@@ -2634,7 +2739,7 @@ int TurboAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
// On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
// and the caller does not reserve stack slots for them.
DCHECK_GE(num_arguments, 0);
-#ifdef _WIN64
+#ifdef V8_TARGET_OS_WIN
const int kMinimumStackSlots = kRegisterPassedArguments;
if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
return num_arguments;
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.h b/deps/v8/src/codegen/x64/macro-assembler-x64.h
index 8e7766c7e1..f38da45788 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.h
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.h
@@ -152,8 +152,26 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP(Roundsd, roundsd)
AVX_OP(Sqrtss, sqrtss)
AVX_OP(Sqrtsd, sqrtsd)
+ AVX_OP(Sqrtpd, sqrtpd)
AVX_OP(Ucomiss, ucomiss)
AVX_OP(Ucomisd, ucomisd)
+ AVX_OP(Pshufb, pshufb)
+ AVX_OP(Paddusb, paddusb)
+ AVX_OP(Psignd, psignd)
+ AVX_OP(Pand, pand)
+ AVX_OP(Por, por)
+ AVX_OP(Pxor, pxor)
+ AVX_OP(Psubd, psubd)
+ AVX_OP(Pslld, pslld)
+ AVX_OP(Psrad, psrad)
+ AVX_OP(Psrld, psrld)
+ AVX_OP(Paddd, paddd)
+ AVX_OP(Pmulld, pmulld)
+ AVX_OP(Pminsd, pminsd)
+ AVX_OP(Pminud, pminud)
+ AVX_OP(Pmaxsd, pmaxsd)
+ AVX_OP(Pmaxud, pmaxud)
+ AVX_OP(Pcmpgtd, pcmpgtd)
#undef AVX_OP
@@ -314,6 +332,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
RelocInfo::Mode rmode = RelocInfo::FULL_EMBEDDED_OBJECT);
// Convert smi to word-size sign-extended value.
+ void SmiUntag(Register reg);
+ // Requires dst != src
void SmiUntag(Register dst, Register src);
void SmiUntag(Register dst, Operand src);
@@ -365,14 +385,22 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Non-SSE2 instructions.
void Pextrd(Register dst, XMMRegister src, int8_t imm8);
+ void Pextrw(Register dst, XMMRegister src, int8_t imm8);
+ void Pextrb(Register dst, XMMRegister src, int8_t imm8);
void Pinsrd(XMMRegister dst, Register src, int8_t imm8);
void Pinsrd(XMMRegister dst, Operand src, int8_t imm8);
+ void Pinsrw(XMMRegister dst, Register src, int8_t imm8);
+ void Pinsrw(XMMRegister dst, Operand src, int8_t imm8);
+ void Pinsrb(XMMRegister dst, Register src, int8_t imm8);
+ void Pinsrb(XMMRegister dst, Operand src, int8_t imm8);
void Psllq(XMMRegister dst, byte imm8);
void Psrlq(XMMRegister dst, byte imm8);
void Pslld(XMMRegister dst, byte imm8);
void Psrld(XMMRegister dst, byte imm8);
+ void Pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle);
+
void CompareRoot(Register with, RootIndex index);
void CompareRoot(Operand with, RootIndex index);
@@ -414,7 +442,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// stack check, do it before calling this function because this function may
// write into the newly allocated space. It may also overwrite the given
// register's value, in the version that takes a register.
-#ifdef V8_OS_WIN
+#ifdef V8_TARGET_OS_WIN
void AllocateStackSpace(Register bytes_scratch);
void AllocateStackSpace(int bytes);
#else
@@ -647,10 +675,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag);
- // On function call, call into the debugger if necessary.
- void CheckDebugHook(Register fun, Register new_target,
- const ParameterCount& expected,
- const ParameterCount& actual);
+ // On function call, call into the debugger.
+ void CallDebugOnFunctionCall(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
@@ -665,6 +693,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Conversions between tagged smi values and non-tagged integer values.
// Tag an word-size value. The result must be known to be a valid smi value.
+ void SmiTag(Register reg);
+ // Requires dst != src
void SmiTag(Register dst, Register src);
// Simple comparison of smis. Both sides must be known smis to use these,
@@ -917,7 +947,7 @@ inline Operand NativeContextOperand() {
// Provides access to exit frame stack space (not GCed).
inline Operand StackSpaceOperand(int index) {
-#ifdef _WIN64
+#ifdef V8_TARGET_OS_WIN
const int kShaddowSpace = 4;
return Operand(rsp, (index + kShaddowSpace) * kSystemPointerSize);
#else
diff --git a/deps/v8/src/codegen/x64/register-x64.h b/deps/v8/src/codegen/x64/register-x64.h
index 199571f088..181da9d9f3 100644
--- a/deps/v8/src/codegen/x64/register-x64.h
+++ b/deps/v8/src/codegen/x64/register-x64.h
@@ -88,7 +88,7 @@ constexpr int kNumJSCallerSaved = 5;
// Number of registers for which space is reserved in safepoints.
constexpr int kNumSafepointRegisters = 16;
-#ifdef _WIN64
+#ifdef V8_TARGET_OS_WIN
// Windows calling convention
constexpr Register arg_reg_1 = rcx;
constexpr Register arg_reg_2 = rdx;
@@ -100,7 +100,7 @@ constexpr Register arg_reg_1 = rdi;
constexpr Register arg_reg_2 = rsi;
constexpr Register arg_reg_3 = rdx;
constexpr Register arg_reg_4 = rcx;
-#endif // _WIN64
+#endif // V8_TARGET_OS_WIN
#define DOUBLE_REGISTERS(V) \
V(xmm0) \
diff --git a/deps/v8/src/codegen/x64/sse-instr.h b/deps/v8/src/codegen/x64/sse-instr.h
index 8ba54e85b4..8af06ae92c 100644
--- a/deps/v8/src/codegen/x64/sse-instr.h
+++ b/deps/v8/src/codegen/x64/sse-instr.h
@@ -6,6 +6,7 @@
#define V8_CODEGEN_X64_SSE_INSTR_H_
#define SSE2_INSTRUCTION_LIST(V) \
+ V(sqrtpd, 66, 0F, 51) \
V(andnpd, 66, 0F, 55) \
V(addpd, 66, 0F, 58) \
V(mulpd, 66, 0F, 59) \
diff --git a/deps/v8/src/common/assert-scope.cc b/deps/v8/src/common/assert-scope.cc
index f1fe717cc0..5138ce7122 100644
--- a/deps/v8/src/common/assert-scope.cc
+++ b/deps/v8/src/common/assert-scope.cc
@@ -126,8 +126,6 @@ template class PerThreadAssertScope<HANDLE_ALLOCATION_ASSERT, false>;
template class PerThreadAssertScope<HANDLE_ALLOCATION_ASSERT, true>;
template class PerThreadAssertScope<HANDLE_DEREFERENCE_ASSERT, false>;
template class PerThreadAssertScope<HANDLE_DEREFERENCE_ASSERT, true>;
-template class PerThreadAssertScope<DEFERRED_HANDLE_DEREFERENCE_ASSERT, false>;
-template class PerThreadAssertScope<DEFERRED_HANDLE_DEREFERENCE_ASSERT, true>;
template class PerThreadAssertScope<CODE_DEPENDENCY_CHANGE_ASSERT, false>;
template class PerThreadAssertScope<CODE_DEPENDENCY_CHANGE_ASSERT, true>;
diff --git a/deps/v8/src/common/assert-scope.h b/deps/v8/src/common/assert-scope.h
index 73729400ac..27f4112144 100644
--- a/deps/v8/src/common/assert-scope.h
+++ b/deps/v8/src/common/assert-scope.h
@@ -28,7 +28,6 @@ enum PerThreadAssertType {
HEAP_ALLOCATION_ASSERT,
HANDLE_ALLOCATION_ASSERT,
HANDLE_DEREFERENCE_ASSERT,
- DEFERRED_HANDLE_DEREFERENCE_ASSERT,
CODE_DEPENDENCY_CHANGE_ASSERT,
LAST_PER_THREAD_ASSERT_TYPE
};
@@ -145,19 +144,11 @@ using DisallowHandleDereference =
using AllowHandleDereference =
PerThreadAssertScopeDebugOnly<HANDLE_DEREFERENCE_ASSERT, true>;
-// Scope to document where we do not expect deferred handles to be dereferenced.
-using DisallowDeferredHandleDereference =
- PerThreadAssertScopeDebugOnly<DEFERRED_HANDLE_DEREFERENCE_ASSERT, false>;
-
-// Scope to introduce an exception to DisallowDeferredHandleDereference.
-using AllowDeferredHandleDereference =
- PerThreadAssertScopeDebugOnly<DEFERRED_HANDLE_DEREFERENCE_ASSERT, true>;
-
-// Scope to document where we do not expect deferred handles to be dereferenced.
+// Scope to document where we do not expect code dependencies to change.
using DisallowCodeDependencyChange =
PerThreadAssertScopeDebugOnly<CODE_DEPENDENCY_CHANGE_ASSERT, false>;
-// Scope to introduce an exception to DisallowDeferredHandleDereference.
+// Scope to introduce an exception to DisallowCodeDependencyChange.
using AllowCodeDependencyChange =
PerThreadAssertScopeDebugOnly<CODE_DEPENDENCY_CHANGE_ASSERT, true>;
@@ -243,10 +234,6 @@ extern template class PerThreadAssertScope<HANDLE_ALLOCATION_ASSERT, false>;
extern template class PerThreadAssertScope<HANDLE_ALLOCATION_ASSERT, true>;
extern template class PerThreadAssertScope<HANDLE_DEREFERENCE_ASSERT, false>;
extern template class PerThreadAssertScope<HANDLE_DEREFERENCE_ASSERT, true>;
-extern template class PerThreadAssertScope<DEFERRED_HANDLE_DEREFERENCE_ASSERT,
- false>;
-extern template class PerThreadAssertScope<DEFERRED_HANDLE_DEREFERENCE_ASSERT,
- true>;
extern template class PerThreadAssertScope<CODE_DEPENDENCY_CHANGE_ASSERT,
false>;
extern template class PerThreadAssertScope<CODE_DEPENDENCY_CHANGE_ASSERT, true>;
diff --git a/deps/v8/src/common/globals.h b/deps/v8/src/common/globals.h
index a0584b95c4..20faebfe3a 100644
--- a/deps/v8/src/common/globals.h
+++ b/deps/v8/src/common/globals.h
@@ -166,13 +166,14 @@ constexpr int kElidedFrameSlots = 0;
#endif
constexpr int kDoubleSizeLog2 = 3;
+constexpr size_t kMaxWasmCodeMB = 1024;
+constexpr size_t kMaxWasmCodeMemory = kMaxWasmCodeMB * MB;
#if V8_TARGET_ARCH_ARM64
// ARM64 only supports direct calls within a 128 MB range.
-constexpr size_t kMaxWasmCodeMB = 128;
+constexpr size_t kMaxWasmCodeSpaceSize = 128 * MB;
#else
-constexpr size_t kMaxWasmCodeMB = 1024;
+constexpr size_t kMaxWasmCodeSpaceSize = kMaxWasmCodeMemory;
#endif
-constexpr size_t kMaxWasmCodeMemory = kMaxWasmCodeMB * MB;
#if V8_HOST_ARCH_64_BIT
constexpr int kSystemPointerSizeLog2 = 3;
@@ -230,7 +231,7 @@ constexpr int kTaggedSizeLog2 = 2;
// These types define raw and atomic storage types for tagged values stored
// on V8 heap.
-using Tagged_t = int32_t;
+using Tagged_t = uint32_t;
using AtomicTagged_t = base::Atomic32;
#else
@@ -245,11 +246,6 @@ using AtomicTagged_t = base::AtomicWord;
#endif // V8_COMPRESS_POINTERS
-// Defines whether the branchless or branchful implementation of pointer
-// decompression should be used.
-constexpr bool kUseBranchlessPtrDecompressionInRuntime = false;
-constexpr bool kUseBranchlessPtrDecompressionInGeneratedCode = false;
-
STATIC_ASSERT(kTaggedSize == (1 << kTaggedSizeLog2));
STATIC_ASSERT((kTaggedSize == 8) == TAGGED_SIZE_8_BYTES);
@@ -795,8 +791,6 @@ enum InlineCacheState {
NO_FEEDBACK,
// Has never been executed.
UNINITIALIZED,
- // Has been executed but monomorphic state has been delayed.
- PREMONOMORPHIC,
// Has been executed and only one receiver type has been seen.
MONOMORPHIC,
// Check failed due to prototype (or map deprecation).
@@ -816,8 +810,6 @@ inline const char* InlineCacheState2String(InlineCacheState state) {
return "NOFEEDBACK";
case UNINITIALIZED:
return "UNINITIALIZED";
- case PREMONOMORPHIC:
- return "PREMONOMORPHIC";
case MONOMORPHIC:
return "MONOMORPHIC";
case RECOMPUTE_HANDLER:
@@ -1216,6 +1208,10 @@ enum VariableLocation : uint8_t {
// immediately initialized upon creation (kCreatedInitialized).
enum InitializationFlag : uint8_t { kNeedsInitialization, kCreatedInitialized };
+// Static variables can only be used with the class in the closest
+// class scope as receivers.
+enum class IsStaticFlag : uint8_t { kNotStatic, kStatic };
+
enum MaybeAssignedFlag : uint8_t { kNotAssigned, kMaybeAssigned };
enum class InterpreterPushArgsMode : unsigned {
diff --git a/deps/v8/src/common/message-template.h b/deps/v8/src/common/message-template.h
index e3307a525c..41af7b8f18 100644
--- a/deps/v8/src/common/message-template.h
+++ b/deps/v8/src/common/message-template.h
@@ -10,7 +10,6 @@
namespace v8 {
namespace internal {
-// TODO(913887): fix the use of 'neuter' in these error messages.
#define MESSAGE_TEMPLATES(T) \
/* Error */ \
T(None, "") \
@@ -34,7 +33,6 @@ namespace internal {
"Derived ArrayBuffer constructor created a buffer which was too small") \
T(ArrayBufferSpeciesThis, \
"ArrayBuffer subclass returned this from species constructor") \
- T(ArrayItemNotType, "array %[%] is not type %") \
T(AwaitNotInAsyncFunction, "await is only valid in async function") \
T(AtomicsWaitNotAllowed, "Atomics.wait cannot be called in this context") \
T(BadSortComparisonFunction, \
@@ -78,7 +76,7 @@ namespace internal {
T(DebuggerType, "Debugger: Parameters have wrong types.") \
T(DeclarationMissingInitializer, "Missing initializer in % declaration") \
T(DefineDisallowed, "Cannot define property %, object is not extensible") \
- T(DetachedOperation, "Cannot perform % on a neutered ArrayBuffer") \
+ T(DetachedOperation, "Cannot perform % on a detached ArrayBuffer") \
T(DuplicateTemplateProperty, "Object template has duplicate property '%'") \
T(ExtendsValueNotConstructor, \
"Class extends value % is not a constructor or null") \
@@ -101,6 +99,7 @@ namespace internal {
T(InvalidRegExpExecResult, \
"RegExp exec method returned something other than an Object or null") \
T(InvalidUnit, "Invalid unit argument for %() '%'") \
+ T(IterableYieldedNonString, "Iterable yielded % which is not a string") \
T(IteratorResultNotAnObject, "Iterator result % is not an object") \
T(IteratorSymbolNonCallable, "Found non-callable @@iterator") \
T(IteratorValueNotAnObject, "Iterator value % is not an entry object") \
@@ -540,6 +539,7 @@ namespace internal {
T(WasmTrapFloatUnrepresentable, "float unrepresentable in integer range") \
T(WasmTrapFuncInvalid, "invalid index into function table") \
T(WasmTrapFuncSigMismatch, "function signature mismatch") \
+ T(WasmTrapMultiReturnLengthMismatch, "multi-return length mismatch") \
T(WasmTrapTypeError, "wasm function signature contains illegal type") \
T(WasmTrapDataSegmentDropped, "data segment has been dropped") \
T(WasmTrapElemSegmentDropped, "element segment has been dropped") \
@@ -554,7 +554,7 @@ namespace internal {
T(DataCloneError, "% could not be cloned.") \
T(DataCloneErrorOutOfMemory, "Data cannot be cloned, out of memory.") \
T(DataCloneErrorDetachedArrayBuffer, \
- "An ArrayBuffer is neutered and could not be cloned.") \
+ "An ArrayBuffer is detached and could not be cloned.") \
T(DataCloneErrorSharedArrayBufferTransferred, \
"A SharedArrayBuffer could not be cloned. SharedArrayBuffer must not be " \
"transferred.") \
diff --git a/deps/v8/src/common/ptr-compr-inl.h b/deps/v8/src/common/ptr-compr-inl.h
index a8fd7f245c..17239d15c2 100644
--- a/deps/v8/src/common/ptr-compr-inl.h
+++ b/deps/v8/src/common/ptr-compr-inl.h
@@ -29,8 +29,7 @@ V8_INLINE Address GetIsolateRoot<Address>(Address on_heap_addr) {
// signed constant instead of 64-bit constant (the problem is that 2Gb looks
// like a negative 32-bit value). It's correct because we will never use
// leftmost address of V8 heap as |on_heap_addr|.
- return RoundDown<kPtrComprIsolateRootAlignment>(on_heap_addr +
- kPtrComprIsolateRootBias - 1);
+ return RoundDown<kPtrComprIsolateRootAlignment>(on_heap_addr);
}
template <>
@@ -54,37 +53,20 @@ V8_INLINE Address DecompressTaggedSigned(Tagged_t raw_value) {
template <typename TOnHeapAddress>
V8_INLINE Address DecompressTaggedPointer(TOnHeapAddress on_heap_addr,
Tagged_t raw_value) {
- // Current compression scheme requires |raw_value| to be sign-extended
- // from int32_t to intptr_t.
- intptr_t value = static_cast<intptr_t>(static_cast<int32_t>(raw_value));
- Address root = GetIsolateRoot(on_heap_addr);
- return root + static_cast<Address>(value);
+ return GetIsolateRoot(on_heap_addr) + static_cast<Address>(raw_value);
}
// Decompresses any tagged value, preserving both weak- and smi- tags.
template <typename TOnHeapAddress>
V8_INLINE Address DecompressTaggedAny(TOnHeapAddress on_heap_addr,
Tagged_t raw_value) {
- if (kUseBranchlessPtrDecompressionInRuntime) {
- // Current compression scheme requires |raw_value| to be sign-extended
- // from int32_t to intptr_t.
- intptr_t value = static_cast<intptr_t>(static_cast<int32_t>(raw_value));
- // |root_mask| is 0 if the |value| was a smi or -1 otherwise.
- Address root_mask = static_cast<Address>(-(value & kSmiTagMask));
- Address root_or_zero = root_mask & GetIsolateRoot(on_heap_addr);
- return root_or_zero + static_cast<Address>(value);
- } else {
- return HAS_SMI_TAG(raw_value)
- ? DecompressTaggedSigned(raw_value)
- : DecompressTaggedPointer(on_heap_addr, raw_value);
- }
+ return DecompressTaggedPointer(on_heap_addr, raw_value);
}
#ifdef V8_COMPRESS_POINTERS
STATIC_ASSERT(kPtrComprHeapReservationSize ==
Internals::kPtrComprHeapReservationSize);
-STATIC_ASSERT(kPtrComprIsolateRootBias == Internals::kPtrComprIsolateRootBias);
STATIC_ASSERT(kPtrComprIsolateRootAlignment ==
Internals::kPtrComprIsolateRootAlignment);
diff --git a/deps/v8/src/common/ptr-compr.h b/deps/v8/src/common/ptr-compr.h
index 5b4a74e7e3..105d5f1a4f 100644
--- a/deps/v8/src/common/ptr-compr.h
+++ b/deps/v8/src/common/ptr-compr.h
@@ -14,7 +14,6 @@ namespace internal {
// See v8:7703 for details about how pointer compression works.
constexpr size_t kPtrComprHeapReservationSize = size_t{4} * GB;
-constexpr size_t kPtrComprIsolateRootBias = kPtrComprHeapReservationSize / 2;
constexpr size_t kPtrComprIsolateRootAlignment = size_t{4} * GB;
} // namespace internal
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
index e1d47d30a6..42d64b6614 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
@@ -6,7 +6,6 @@
#include "src/ast/ast.h"
#include "src/base/platform/time.h"
-#include "src/base/template-utils.h"
#include "src/codegen/compiler.h"
#include "src/flags/flags.h"
#include "src/handles/global-handles.h"
@@ -66,7 +65,7 @@ base::Optional<CompilerDispatcher::JobId> CompilerDispatcher::Enqueue(
if (!IsEnabled()) return base::nullopt;
- std::unique_ptr<Job> job = base::make_unique<Job>(new BackgroundCompileTask(
+ std::unique_ptr<Job> job = std::make_unique<Job>(new BackgroundCompileTask(
allocator_, outer_parse_info, function_name, function_literal,
worker_thread_runtime_call_stats_, background_compile_timer_,
static_cast<int>(max_stack_size_)));
diff --git a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
index fbaeaa73f8..3d2342e9a2 100644
--- a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
@@ -5,7 +5,6 @@
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/base/atomicops.h"
-#include "src/base/template-utils.h"
#include "src/codegen/compiler.h"
#include "src/codegen/optimized-compilation-info.h"
#include "src/execution/isolate.h"
@@ -244,14 +243,14 @@ void OptimizingCompileDispatcher::QueueForOptimization(
blocked_jobs_++;
} else {
V8::GetCurrentPlatform()->CallOnWorkerThread(
- base::make_unique<CompileTask>(isolate_, this));
+ std::make_unique<CompileTask>(isolate_, this));
}
}
void OptimizingCompileDispatcher::Unblock() {
while (blocked_jobs_ > 0) {
V8::GetCurrentPlatform()->CallOnWorkerThread(
- base::make_unique<CompileTask>(isolate_, this));
+ std::make_unique<CompileTask>(isolate_, this));
blocked_jobs_--;
}
}
diff --git a/deps/v8/src/compiler/OWNERS b/deps/v8/src/compiler/OWNERS
index 50e2af7129..204c0ba115 100644
--- a/deps/v8/src/compiler/OWNERS
+++ b/deps/v8/src/compiler/OWNERS
@@ -8,11 +8,12 @@ tebbi@chromium.org
neis@chromium.org
mvstanton@chromium.org
mslekova@chromium.org
+jgruber@chromium.org
per-file wasm-*=ahaas@chromium.org
per-file wasm-*=bbudge@chromium.org
per-file wasm-*=binji@chromium.org
-per-file wasm-*=clemensh@chromium.org
+per-file wasm-*=clemensb@chromium.org
per-file wasm-*=gdeepti@chromium.org
per-file int64-lowering.*=ahaas@chromium.org
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index 7a72be8028..e6c5568af0 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -23,10 +23,9 @@ namespace internal {
namespace compiler {
// static
-FieldAccess AccessBuilder::ForExternalTaggedValue() {
- FieldAccess access = {kUntaggedBase, 0,
- MaybeHandle<Name>(), MaybeHandle<Map>(),
- Type::Any(), MachineType::AnyTagged(),
+FieldAccess AccessBuilder::ForExternalIntPtr() {
+ FieldAccess access = {kUntaggedBase, 0, MaybeHandle<Name>(),
+ MaybeHandle<Map>(), Type::Any(), MachineType::IntPtr(),
kNoWriteBarrier};
return access;
}
@@ -109,7 +108,6 @@ FieldAccess AccessBuilder::ForJSObjectElements() {
return access;
}
-
// static
FieldAccess AccessBuilder::ForJSObjectInObjectProperty(const MapRef& map,
int index) {
@@ -185,7 +183,6 @@ FieldAccess AccessBuilder::ForJSFunctionContext() {
return access;
}
-
// static
FieldAccess AccessBuilder::ForJSFunctionSharedFunctionInfo() {
FieldAccess access = {
@@ -296,7 +293,6 @@ FieldAccess AccessBuilder::ForJSGeneratorObjectInputOrDebugPos() {
return access;
}
-
// static
FieldAccess AccessBuilder::ForJSGeneratorObjectParametersAndRegisters() {
FieldAccess access = {
@@ -478,7 +474,6 @@ FieldAccess AccessBuilder::ForJSDateField(JSDate::FieldIndex index) {
return access;
}
-
// static
FieldAccess AccessBuilder::ForJSIteratorResultDone() {
FieldAccess access = {
@@ -489,7 +484,6 @@ FieldAccess AccessBuilder::ForJSIteratorResultDone() {
return access;
}
-
// static
FieldAccess AccessBuilder::ForJSIteratorResultValue() {
FieldAccess access = {
@@ -540,7 +534,6 @@ FieldAccess AccessBuilder::ForJSRegExpSource() {
return access;
}
-
// static
FieldAccess AccessBuilder::ForFixedArrayLength() {
FieldAccess access = {kTaggedBase,
@@ -600,7 +593,6 @@ FieldAccess AccessBuilder::ForMapBitField3() {
return access;
}
-
// static
FieldAccess AccessBuilder::ForMapDescriptors() {
FieldAccess access = {
@@ -611,7 +603,6 @@ FieldAccess AccessBuilder::ForMapDescriptors() {
return access;
}
-
// static
FieldAccess AccessBuilder::ForMapInstanceType() {
FieldAccess access = {
@@ -621,7 +612,6 @@ FieldAccess AccessBuilder::ForMapInstanceType() {
return access;
}
-
// static
FieldAccess AccessBuilder::ForMapPrototype() {
FieldAccess access = {
@@ -810,7 +800,7 @@ FieldAccess AccessBuilder::ForJSStringIteratorString() {
// static
FieldAccess AccessBuilder::ForJSStringIteratorIndex() {
FieldAccess access = {kTaggedBase,
- JSStringIterator::kNextIndexOffset,
+ JSStringIterator::kIndexOffset,
Handle<Name>(),
MaybeHandle<Map>(),
TypeCache::Get()->kStringLengthType,
@@ -829,7 +819,6 @@ FieldAccess AccessBuilder::ForArgumentsLength() {
return access;
}
-
// static
FieldAccess AccessBuilder::ForArgumentsCallee() {
FieldAccess access = {
@@ -840,7 +829,6 @@ FieldAccess AccessBuilder::ForArgumentsCallee() {
return access;
}
-
// static
FieldAccess AccessBuilder::ForFixedArraySlot(
size_t index, WriteBarrierKind write_barrier_kind) {
@@ -852,7 +840,6 @@ FieldAccess AccessBuilder::ForFixedArraySlot(
return access;
}
-
// static
FieldAccess AccessBuilder::ForCellValue() {
FieldAccess access = {kTaggedBase, Cell::kValueOffset,
@@ -937,7 +924,7 @@ ElementAccess AccessBuilder::ForStackArgument() {
ElementAccess access = {
kUntaggedBase,
CommonFrameConstants::kFixedFrameSizeAboveFp - kSystemPointerSize,
- Type::NonInternal(), MachineType::AnyTagged(),
+ Type::NonInternal(), MachineType::Pointer(),
WriteBarrierKind::kNoWriteBarrier};
return access;
}
diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h
index 231e75f819..4aa69e3726 100644
--- a/deps/v8/src/compiler/access-builder.h
+++ b/deps/v8/src/compiler/access-builder.h
@@ -24,11 +24,8 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// ===========================================================================
// Access to external values (based on external references).
- // Provides access to a tagged field identified by an external reference.
- static FieldAccess ForExternalTaggedValue();
-
- // Provides access to an uint8 field identified by an external reference.
- static FieldAccess ForExternalUint8Value();
+ // Provides access to an IntPtr field identified by an external reference.
+ static FieldAccess ForExternalIntPtr();
// ===========================================================================
// Access to heap object fields and elements (based on tagged pointer).
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
index 269ef90375..dcdd1de831 100644
--- a/deps/v8/src/compiler/access-info.cc
+++ b/deps/v8/src/compiler/access-info.cc
@@ -31,9 +31,9 @@ bool CanInlinePropertyAccess(Handle<Map> map) {
// We can inline property access to prototypes of all primitives, except
// the special Oddball ones that have no wrapper counterparts (i.e. Null,
// Undefined and TheHole).
- STATIC_ASSERT(ODDBALL_TYPE == LAST_PRIMITIVE_TYPE);
+ STATIC_ASSERT(ODDBALL_TYPE == LAST_PRIMITIVE_HEAP_OBJECT_TYPE);
if (map->IsBooleanMap()) return true;
- if (map->instance_type() < LAST_PRIMITIVE_TYPE) return true;
+ if (map->instance_type() < LAST_PRIMITIVE_HEAP_OBJECT_TYPE) return true;
return map->IsJSObjectMap() && !map->is_dictionary_map() &&
!map->has_named_interceptor() &&
// TODO(verwaest): Whitelist contexts to which we have access.
@@ -323,8 +323,8 @@ bool AccessInfoFactory::ComputeElementAccessInfos(
PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo(
Handle<Map> receiver_map, Handle<Map> map, MaybeHandle<JSObject> holder,
- int descriptor, AccessMode access_mode) const {
- DCHECK_NE(descriptor, DescriptorArray::kNotFound);
+ InternalIndex descriptor, AccessMode access_mode) const {
+ DCHECK(descriptor.is_found());
Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate());
PropertyDetails const details = descriptors->GetDetails(descriptor);
int index = descriptors->GetFieldIndex(descriptor);
@@ -351,6 +351,11 @@ PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo(
descriptor));
} else if (details_representation.IsDouble()) {
field_type = type_cache_->kFloat64;
+ if (!FLAG_unbox_double_fields) {
+ unrecorded_dependencies.push_back(
+ dependencies()->FieldRepresentationDependencyOffTheRecord(
+ map_ref, descriptor));
+ }
} else if (details_representation.IsHeapObject()) {
// Extract the field type from the property details (make sure its
// representation is TaggedPointer to reflect the heap object case).
@@ -408,9 +413,9 @@ PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo(
PropertyAccessInfo AccessInfoFactory::ComputeAccessorDescriptorAccessInfo(
Handle<Map> receiver_map, Handle<Name> name, Handle<Map> map,
- MaybeHandle<JSObject> holder, int descriptor,
+ MaybeHandle<JSObject> holder, InternalIndex descriptor,
AccessMode access_mode) const {
- DCHECK_NE(descriptor, DescriptorArray::kNotFound);
+ DCHECK(descriptor.is_found());
Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate());
SLOW_DCHECK(descriptor == descriptors->Search(*name, *map));
if (map->instance_type() == JS_MODULE_NAMESPACE_TYPE) {
@@ -497,8 +502,8 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
while (true) {
// Lookup the named property on the {map}.
Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate());
- int const number = descriptors->Search(*name, *map);
- if (number != DescriptorArray::kNotFound) {
+ InternalIndex const number = descriptors->Search(*name, *map);
+ if (number.is_found()) {
PropertyDetails const details = descriptors->GetDetails(number);
if (access_mode == AccessMode::kStore ||
access_mode == AccessMode::kStoreInLiteral) {
@@ -762,7 +767,7 @@ PropertyAccessInfo AccessInfoFactory::LookupTransition(
}
Handle<Map> transition_map(transition, isolate());
- int const number = transition_map->LastAdded();
+ InternalIndex const number = transition_map->LastAdded();
PropertyDetails const details =
transition_map->instance_descriptors().GetDetails(number);
// Don't bother optimizing stores to read-only properties.
@@ -789,6 +794,12 @@ PropertyAccessInfo AccessInfoFactory::LookupTransition(
transition_map_ref, number));
} else if (details_representation.IsDouble()) {
field_type = type_cache_->kFloat64;
+ if (!FLAG_unbox_double_fields) {
+ transition_map_ref.SerializeOwnDescriptor(number);
+ unrecorded_dependencies.push_back(
+ dependencies()->FieldRepresentationDependencyOffTheRecord(
+ transition_map_ref, number));
+ }
} else if (details_representation.IsHeapObject()) {
// Extract the field type from the property details (make sure its
// representation is TaggedPointer to reflect the heap object case).
diff --git a/deps/v8/src/compiler/access-info.h b/deps/v8/src/compiler/access-info.h
index e2f6e6d453..59101e2cc9 100644
--- a/deps/v8/src/compiler/access-info.h
+++ b/deps/v8/src/compiler/access-info.h
@@ -204,11 +204,11 @@ class AccessInfoFactory final {
PropertyAccessInfo ComputeDataFieldAccessInfo(Handle<Map> receiver_map,
Handle<Map> map,
MaybeHandle<JSObject> holder,
- int descriptor,
+ InternalIndex descriptor,
AccessMode access_mode) const;
PropertyAccessInfo ComputeAccessorDescriptorAccessInfo(
Handle<Map> receiver_map, Handle<Name> name, Handle<Map> map,
- MaybeHandle<JSObject> holder, int descriptor,
+ MaybeHandle<JSObject> holder, InternalIndex descriptor,
AccessMode access_mode) const;
void MergePropertyAccessInfos(ZoneVector<PropertyAccessInfo> infos,
diff --git a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
index 65a569d755..3fe5361083 100644
--- a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
@@ -44,7 +44,7 @@ class ArmOperandConverter final : public InstructionOperandConverter {
UNREACHABLE();
}
- Operand InputImmediate(size_t index) {
+ Operand InputImmediate(size_t index) const {
return ToImmediate(instr_->InputAt(index));
}
@@ -111,7 +111,7 @@ class ArmOperandConverter final : public InstructionOperandConverter {
return InputOffset(&first_index);
}
- Operand ToImmediate(InstructionOperand* operand) {
+ Operand ToImmediate(InstructionOperand* operand) const {
Constant constant = ToConstant(operand);
switch (constant.type()) {
case Constant::kInt32:
@@ -153,9 +153,6 @@ class ArmOperandConverter final : public InstructionOperandConverter {
NeonMemOperand NeonInputOperand(size_t first_index) {
const size_t index = first_index;
switch (AddressingModeField::decode(instr_->opcode())) {
- case kMode_Offset_RR:
- return NeonMemOperand(InputRegister(index + 0),
- InputRegister(index + 1));
case kMode_Operand2_R:
return NeonMemOperand(InputRegister(index + 0));
default:
@@ -309,9 +306,9 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
UNREACHABLE();
}
-void EmitWordLoadPoisoningIfNeeded(
- CodeGenerator* codegen, InstructionCode opcode,
- ArmOperandConverter& i) { // NOLINT(runtime/references)
+void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
+ InstructionCode opcode,
+ ArmOperandConverter const& i) {
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
if (access_mode == kMemoryAccessPoisoned) {
@@ -320,10 +317,10 @@ void EmitWordLoadPoisoningIfNeeded(
}
}
-void ComputePoisonedAddressForLoad(
- CodeGenerator* codegen, InstructionCode opcode,
- ArmOperandConverter& i, // NOLINT(runtime/references)
- Register address) {
+void ComputePoisonedAddressForLoad(CodeGenerator* codegen,
+ InstructionCode opcode,
+ ArmOperandConverter const& i,
+ Register address) {
DCHECK_EQ(kMemoryAccessPoisoned,
static_cast<MemoryAccessMode>(MiscField::decode(opcode)));
switch (AddressingModeField::decode(opcode)) {
@@ -1798,6 +1795,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vneg(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
+ case kArmF32x4Sqrt: {
+ QwNeonRegister dst = i.OutputSimd128Register();
+ QwNeonRegister src1 = i.InputSimd128Register(0);
+ DCHECK_EQ(dst, q0);
+ DCHECK_EQ(src1, q0);
+#define S_FROM_Q(reg, lane) SwVfpRegister::from_code(reg.code() * 4 + lane)
+ __ vsqrt(S_FROM_Q(dst, 0), S_FROM_Q(src1, 0));
+ __ vsqrt(S_FROM_Q(dst, 1), S_FROM_Q(src1, 1));
+ __ vsqrt(S_FROM_Q(dst, 2), S_FROM_Q(src1, 2));
+ __ vsqrt(S_FROM_Q(dst, 3), S_FROM_Q(src1, 3));
+#undef S_FROM_Q
+ break;
+ }
case kArmF32x4RecipApprox: {
__ vrecpe(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
@@ -1919,14 +1929,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArmI32x4Shl: {
QwNeonRegister tmp = i.TempSimd128Register(0);
- __ vdup(Neon32, tmp, i.InputRegister(1));
+ Register shift = i.TempRegister(1);
+ // Take shift value modulo 32.
+ __ and_(shift, i.InputRegister(1), Operand(31));
+ __ vdup(Neon32, tmp, shift);
__ vshl(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0),
tmp);
break;
}
case kArmI32x4ShrS: {
QwNeonRegister tmp = i.TempSimd128Register(0);
- __ vdup(Neon32, tmp, i.InputRegister(1));
+ Register shift = i.TempRegister(1);
+ // Take shift value modulo 32.
+ __ and_(shift, i.InputRegister(1), Operand(31));
+ __ vdup(Neon32, tmp, shift);
__ vneg(Neon32, tmp, tmp);
__ vshl(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0),
tmp);
@@ -1998,7 +2014,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArmI32x4ShrU: {
QwNeonRegister tmp = i.TempSimd128Register(0);
- __ vdup(Neon32, tmp, i.InputRegister(1));
+ Register shift = i.TempRegister(1);
+ // Take shift value modulo 32.
+ __ and_(shift, i.InputRegister(1), Operand(31));
+ __ vdup(Neon32, tmp, shift);
__ vneg(Neon32, tmp, tmp);
__ vshl(NeonU32, i.OutputSimd128Register(), i.InputSimd128Register(0),
tmp);
@@ -2029,7 +2048,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmI16x8ExtractLane: {
- __ ExtractLane(i.OutputRegister(), i.InputSimd128Register(0), NeonS16,
+ __ ExtractLane(i.OutputRegister(), i.InputSimd128Register(0), NeonU16,
i.InputInt8(1));
break;
}
@@ -2054,14 +2073,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArmI16x8Shl: {
QwNeonRegister tmp = i.TempSimd128Register(0);
- __ vdup(Neon16, tmp, i.InputRegister(1));
+ Register shift = i.TempRegister(1);
+ // Take shift value modulo 16.
+ __ and_(shift, i.InputRegister(1), Operand(15));
+ __ vdup(Neon16, tmp, shift);
__ vshl(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
tmp);
break;
}
case kArmI16x8ShrS: {
QwNeonRegister tmp = i.TempSimd128Register(0);
- __ vdup(Neon16, tmp, i.InputRegister(1));
+ Register shift = i.TempRegister(1);
+ // Take shift value modulo 16.
+ __ and_(shift, i.InputRegister(1), Operand(15));
+ __ vdup(Neon16, tmp, shift);
__ vneg(Neon16, tmp, tmp);
__ vshl(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
tmp);
@@ -2142,7 +2167,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArmI16x8ShrU: {
QwNeonRegister tmp = i.TempSimd128Register(0);
- __ vdup(Neon16, tmp, i.InputRegister(1));
+ Register shift = i.TempRegister(1);
+ // Take shift value modulo 16.
+ __ and_(shift, i.InputRegister(1), Operand(15));
+ __ vdup(Neon16, tmp, shift);
__ vneg(Neon16, tmp, tmp);
__ vshl(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0),
tmp);
@@ -2186,7 +2214,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmI8x16ExtractLane: {
- __ ExtractLane(i.OutputRegister(), i.InputSimd128Register(0), NeonS8,
+ __ ExtractLane(i.OutputRegister(), i.InputSimd128Register(0), NeonU8,
i.InputInt8(1));
break;
}
@@ -2201,6 +2229,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArmI8x16Shl: {
QwNeonRegister tmp = i.TempSimd128Register(0);
+ Register shift = i.TempRegister(1);
+ // Take shift value modulo 8.
+ __ and_(shift, i.InputRegister(1), Operand(7));
__ vdup(Neon8, tmp, i.InputRegister(1));
__ vshl(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
tmp);
@@ -2208,7 +2239,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArmI8x16ShrS: {
QwNeonRegister tmp = i.TempSimd128Register(0);
- __ vdup(Neon8, tmp, i.InputRegister(1));
+ Register shift = i.TempRegister(1);
+ // Take shift value modulo 8.
+ __ and_(shift, i.InputRegister(1), Operand(7));
+ __ vdup(Neon8, tmp, shift);
__ vneg(Neon8, tmp, tmp);
__ vshl(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
tmp);
@@ -2275,7 +2309,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArmI8x16ShrU: {
QwNeonRegister tmp = i.TempSimd128Register(0);
- __ vdup(Neon8, tmp, i.InputRegister(1));
+ Register shift = i.TempRegister(1);
+ // Take shift value modulo 8.
+ __ and_(shift, i.InputRegister(1), Operand(7));
+ __ vdup(Neon8, tmp, shift);
__ vneg(Neon8, tmp, tmp);
__ vshl(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0),
tmp);
diff --git a/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h b/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
index 3551e26aea..d398ec0ed6 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
+++ b/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
@@ -135,6 +135,7 @@ namespace compiler {
V(ArmF32x4UConvertI32x4) \
V(ArmF32x4Abs) \
V(ArmF32x4Neg) \
+ V(ArmF32x4Sqrt) \
V(ArmF32x4RecipApprox) \
V(ArmF32x4RecipSqrtApprox) \
V(ArmF32x4Add) \
diff --git a/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
index 1d7cf61dfe..92be55dcc3 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
@@ -115,6 +115,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmF32x4UConvertI32x4:
case kArmF32x4Abs:
case kArmF32x4Neg:
+ case kArmF32x4Sqrt:
case kArmF32x4RecipApprox:
case kArmF32x4RecipSqrtApprox:
case kArmF32x4Add:
diff --git a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
index ce74faa4a6..303648051f 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/base/adapters.h"
#include "src/base/bits.h"
#include "src/base/enum-set.h"
+#include "src/base/iterator.h"
#include "src/compiler/backend/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
@@ -94,7 +94,7 @@ void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
void VisitSimdShiftRRR(InstructionSelector* selector, ArchOpcode opcode,
Node* node) {
ArmOperandGenerator g(selector);
- InstructionOperand temps[] = {g.TempSimd128Register()};
+ InstructionOperand temps[] = {g.TempSimd128Register(), g.TempRegister()};
selector->Emit(opcode, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)),
g.UseRegister(node->InputAt(1)), arraysize(temps), temps);
@@ -352,6 +352,26 @@ void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode div_opcode,
}
}
+// Adds the base and offset into a register, then change the addressing
+// mode of opcode_return to use this register. Certain instructions, e.g.
+// vld1 and vst1, when given two registers, will post-increment the offset, i.e.
+// perform the operation at base, then add offset to base. What we intend is to
+// access at (base+offset).
+void EmitAddBeforeS128LoadStore(InstructionSelector* selector,
+ InstructionCode* opcode_return,
+ size_t* input_count_return,
+ InstructionOperand* inputs) {
+ DCHECK(*opcode_return == kArmVld1S128 || *opcode_return == kArmVst1S128);
+ ArmOperandGenerator g(selector);
+ InstructionOperand addr = g.TempRegister();
+ InstructionCode op = kArmAdd;
+ op |= AddressingModeField::encode(kMode_Operand2_R);
+ selector->Emit(op, 1, &addr, 2, inputs);
+ *opcode_return |= AddressingModeField::encode(kMode_Operand2_R);
+ *input_count_return -= 1;
+ inputs[0] = addr;
+}
+
void EmitLoad(InstructionSelector* selector, InstructionCode opcode,
InstructionOperand* output, Node* base, Node* index) {
ArmOperandGenerator g(selector);
@@ -368,7 +388,11 @@ void EmitLoad(InstructionSelector* selector, InstructionCode opcode,
input_count = 3;
} else {
inputs[1] = g.UseRegister(index);
- opcode |= AddressingModeField::encode(kMode_Offset_RR);
+ if (opcode == kArmVld1S128) {
+ EmitAddBeforeS128LoadStore(selector, &opcode, &input_count, &inputs[0]);
+ } else {
+ opcode |= AddressingModeField::encode(kMode_Offset_RR);
+ }
}
selector->Emit(opcode, 1, output, input_count, inputs);
}
@@ -386,7 +410,12 @@ void EmitStore(InstructionSelector* selector, InstructionCode opcode,
input_count = 4;
} else {
inputs[input_count++] = g.UseRegister(index);
- opcode |= AddressingModeField::encode(kMode_Offset_RR);
+ if (opcode == kArmVst1S128) {
+ // Inputs are value, base, index, only care about base and index.
+ EmitAddBeforeS128LoadStore(selector, &opcode, &input_count, &inputs[1]);
+ } else {
+ opcode |= AddressingModeField::encode(kMode_Offset_RR);
+ }
}
selector->Emit(opcode, 0, nullptr, input_count, inputs);
}
@@ -596,8 +625,7 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
Emit(kArmVmovF32U32, g.DefineAsRegister(node), temp);
return;
}
- case MachineRepresentation::kFloat64:
- case MachineRepresentation::kSimd128: {
+ case MachineRepresentation::kFloat64: {
// Compute the address of the least-significant byte of the FP value.
// We assume that the base node is unlikely to be an encodable immediate
// or the result of a shift operation, so only consider the addressing
@@ -623,13 +651,10 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
if (CpuFeatures::IsSupported(NEON)) {
// With NEON we can load directly from the calculated address.
- InstructionCode op = load_rep == MachineRepresentation::kFloat64
- ? kArmVld1F64
- : kArmVld1S128;
+ InstructionCode op = kArmVld1F64;
op |= AddressingModeField::encode(kMode_Operand2_R);
Emit(op, g.DefineAsRegister(node), addr);
} else {
- DCHECK_NE(MachineRepresentation::kSimd128, load_rep);
// Load both halves and move to an FP register.
InstructionOperand fp_lo = g.TempRegister();
InstructionOperand fp_hi = g.TempRegister();
@@ -670,8 +695,7 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
EmitStore(this, kArmStr, input_count, inputs, index);
return;
}
- case MachineRepresentation::kFloat64:
- case MachineRepresentation::kSimd128: {
+ case MachineRepresentation::kFloat64: {
if (CpuFeatures::IsSupported(NEON)) {
InstructionOperand address = g.TempRegister();
{
@@ -697,13 +721,10 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
inputs[input_count++] = g.UseRegister(value);
inputs[input_count++] = address;
- InstructionCode op = store_rep == MachineRepresentation::kFloat64
- ? kArmVst1F64
- : kArmVst1S128;
+ InstructionCode op = kArmVst1F64;
op |= AddressingModeField::encode(kMode_Operand2_R);
Emit(op, 0, nullptr, input_count, inputs);
} else {
- DCHECK_NE(MachineRepresentation::kSimd128, store_rep);
// Store a 64-bit floating point value using two 32-bit integer stores.
// Computing the store address here would require three live temporary
// registers (fp<63:32>, fp<31:0>, address), so compute base + 4 after
@@ -942,7 +963,8 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
uint32_t lsb = m.right().Value();
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue()) {
- uint32_t value = (mleft.right().Value() >> lsb) << lsb;
+ uint32_t value = static_cast<uint32_t>(mleft.right().Value() >> lsb)
+ << lsb;
uint32_t width = base::bits::CountPopulation(value);
uint32_t msb = base::bits::CountLeadingZeros32(value);
if ((width != 0) && (msb + width + lsb == 32)) {
@@ -1119,6 +1141,10 @@ void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
VisitRR(this, kArmRev, node);
}
+void InstructionSelector::VisitSimd128ReverseBytes(Node* node) {
+ UNREACHABLE();
+}
+
void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitInt32Add(Node* node) {
@@ -2513,6 +2539,14 @@ SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
#undef SIMD_VISIT_BINOP
#undef SIMD_BINOP_LIST
+void InstructionSelector::VisitF32x4Sqrt(Node* node) {
+ ArmOperandGenerator g(this);
+ // Use fixed registers in the lower 8 Q-registers so we can directly access
+ // mapped registers S0-S31.
+ Emit(kArmF32x4Sqrt, g.DefineAsFixed(node, q0),
+ g.UseFixed(node->InputAt(0), q0));
+}
+
void InstructionSelector::VisitF32x4Div(Node* node) {
ArmOperandGenerator g(this);
// Use fixed registers in the lower 8 Q-registers so we can directly access
diff --git a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
index 66ca7f6cf0..6f65c905dd 100644
--- a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
@@ -376,9 +376,9 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
UNREACHABLE();
}
-void EmitWordLoadPoisoningIfNeeded(
- CodeGenerator* codegen, InstructionCode opcode, Instruction* instr,
- Arm64OperandConverter& i) { // NOLINT(runtime/references)
+void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
+ InstructionCode opcode, Instruction* instr,
+ Arm64OperandConverter const& i) {
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
if (access_mode == kMemoryAccessPoisoned) {
@@ -389,6 +389,36 @@ void EmitWordLoadPoisoningIfNeeded(
}
}
+void EmitMaybePoisonedFPLoad(CodeGenerator* codegen, InstructionCode opcode,
+ Arm64OperandConverter* i, VRegister output_reg) {
+ const MemoryAccessMode access_mode =
+ static_cast<MemoryAccessMode>(MiscField::decode(opcode));
+ AddressingMode address_mode = AddressingModeField::decode(opcode);
+ if (access_mode == kMemoryAccessPoisoned && address_mode != kMode_Root) {
+ UseScratchRegisterScope temps(codegen->tasm());
+ Register address = temps.AcquireX();
+ switch (address_mode) {
+ case kMode_MRI: // Fall through.
+ case kMode_MRR:
+ codegen->tasm()->Add(address, i->InputRegister(0), i->InputOperand(1));
+ break;
+ case kMode_Operand2_R_LSL_I:
+ codegen->tasm()->Add(address, i->InputRegister(0),
+ i->InputOperand2_64(1));
+ break;
+ default:
+ // Note: we don't need poisoning for kMode_Root loads as those loads
+ // target a fixed offset from root register which is set once when
+ // initializing the vm.
+ UNREACHABLE();
+ }
+ codegen->tasm()->And(address, address, Operand(kSpeculationPoisonRegister));
+ codegen->tasm()->Ldr(output_reg, MemOperand(address));
+ } else {
+ codegen->tasm()->Ldr(output_reg, i->MemoryOperand());
+ }
+}
+
} // namespace
#define ASSEMBLE_SHIFT(asm_instr, width) \
@@ -1198,6 +1228,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Sxtw:
__ Sxtw(i.OutputRegister(), i.InputRegister32(0));
break;
+ case kArm64Sbfx:
+ __ Sbfx(i.OutputRegister(), i.InputRegister(0), i.InputInt6(1),
+ i.InputInt6(2));
+ break;
case kArm64Sbfx32:
__ Sbfx(i.OutputRegister32(), i.InputRegister32(0), i.InputInt5(1),
i.InputInt5(2));
@@ -1586,6 +1620,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Str:
__ Str(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
break;
+ case kArm64StrCompressTagged:
+ __ StoreTaggedField(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
+ break;
case kArm64DecompressSigned: {
__ DecompressTaggedSigned(i.OutputRegister(), i.InputRegister(0));
break;
@@ -1599,13 +1636,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArm64LdrS:
- __ Ldr(i.OutputDoubleRegister().S(), i.MemoryOperand());
+ EmitMaybePoisonedFPLoad(this, opcode, &i, i.OutputDoubleRegister().S());
break;
case kArm64StrS:
__ Str(i.InputFloat32OrZeroRegister(0), i.MemoryOperand(1));
break;
case kArm64LdrD:
- __ Ldr(i.OutputDoubleRegister(), i.MemoryOperand());
+ EmitMaybePoisonedFPLoad(this, opcode, &i, i.OutputDoubleRegister());
break;
case kArm64StrD:
__ Str(i.InputFloat64OrZeroRegister(0), i.MemoryOperand(1));
@@ -1616,9 +1653,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64StrQ:
__ Str(i.InputSimd128Register(0), i.MemoryOperand(1));
break;
- case kArm64StrCompressTagged:
- __ StoreTaggedField(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
- break;
case kArm64DmbIsh:
__ Dmb(InnerShareable, BarrierAll);
break;
@@ -1794,6 +1828,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
SIMD_UNOP_CASE(kArm64F64x2Abs, Fabs, 2D);
SIMD_UNOP_CASE(kArm64F64x2Neg, Fneg, 2D);
+ SIMD_UNOP_CASE(kArm64F64x2Sqrt, Fsqrt, 2D);
SIMD_BINOP_CASE(kArm64F64x2Add, Fadd, 2D);
SIMD_BINOP_CASE(kArm64F64x2Sub, Fsub, 2D);
SIMD_BINOP_CASE(kArm64F64x2Mul, Fmul, 2D);
@@ -1818,6 +1853,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(0).V2D());
break;
}
+ case kArm64F64x2Qfma: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ Fmla(i.OutputSimd128Register().V2D(), i.InputSimd128Register(1).V2D(),
+ i.InputSimd128Register(2).V2D());
+ break;
+ }
+ case kArm64F64x2Qfms: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ Fmls(i.OutputSimd128Register().V2D(), i.InputSimd128Register(1).V2D(),
+ i.InputSimd128Register(2).V2D());
+ break;
+ }
case kArm64F32x4Splat: {
__ Dup(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).S(), 0);
break;
@@ -1840,6 +1887,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_UNOP_CASE(kArm64F32x4UConvertI32x4, Ucvtf, 4S);
SIMD_UNOP_CASE(kArm64F32x4Abs, Fabs, 4S);
SIMD_UNOP_CASE(kArm64F32x4Neg, Fneg, 4S);
+ SIMD_UNOP_CASE(kArm64F32x4Sqrt, Fsqrt, 4S);
SIMD_UNOP_CASE(kArm64F32x4RecipApprox, Frecpe, 4S);
SIMD_UNOP_CASE(kArm64F32x4RecipSqrtApprox, Frsqrte, 4S);
SIMD_BINOP_CASE(kArm64F32x4Add, Fadd, 4S);
@@ -1867,6 +1915,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(0).V4S());
break;
}
+ case kArm64F32x4Qfma: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ Fmla(i.OutputSimd128Register().V4S(), i.InputSimd128Register(1).V4S(),
+ i.InputSimd128Register(2).V4S());
+ break;
+ }
+ case kArm64F32x4Qfms: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ Fmls(i.OutputSimd128Register().V4S(), i.InputSimd128Register(1).V4S(),
+ i.InputSimd128Register(2).V4S());
+ break;
+ }
case kArm64I64x2Splat: {
__ Dup(i.OutputSimd128Register().V2D(), i.InputRegister64(0));
break;
@@ -1888,14 +1948,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_UNOP_CASE(kArm64I64x2Neg, Neg, 2D);
case kArm64I64x2Shl: {
VRegister tmp = i.TempSimd128Register(0);
- __ Dup(tmp.V2D(), i.InputRegister64(1));
+ Register shift = i.TempRegister(1);
+ // Take shift value modulo 64.
+ __ And(shift, i.InputRegister64(1), 63);
+ __ Dup(tmp.V2D(), shift);
__ Sshl(i.OutputSimd128Register().V2D(), i.InputSimd128Register(0).V2D(),
tmp.V2D());
break;
}
case kArm64I64x2ShrS: {
VRegister tmp = i.TempSimd128Register(0);
- __ Dup(tmp.V2D(), i.InputRegister64(1));
+ Register shift = i.TempRegister(1);
+ // Take shift value modulo 64.
+ __ And(shift, i.InputRegister64(1), 63);
+ __ Dup(tmp.V2D(), shift);
__ Neg(tmp.V2D(), tmp.V2D());
__ Sshl(i.OutputSimd128Register().V2D(), i.InputSimd128Register(0).V2D(),
tmp.V2D());
@@ -1903,6 +1969,65 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
SIMD_BINOP_CASE(kArm64I64x2Add, Add, 2D);
SIMD_BINOP_CASE(kArm64I64x2Sub, Sub, 2D);
+ case kArm64I64x2Mul: {
+ UseScratchRegisterScope scope(tasm());
+ VRegister dst = i.OutputSimd128Register();
+ VRegister src1 = i.InputSimd128Register(0);
+ VRegister src2 = i.InputSimd128Register(1);
+ VRegister tmp1 = scope.AcquireSameSizeAs(dst);
+ VRegister tmp2 = scope.AcquireSameSizeAs(dst);
+ VRegister tmp3 = i.ToSimd128Register(instr->TempAt(0));
+
+ // This 2x64-bit multiplication is performed with several 32-bit
+ // multiplications.
+
+ // 64-bit numbers x and y, can be represented as:
+ // x = a + 2^32(b)
+ // y = c + 2^32(d)
+
+ // A 64-bit multiplication is:
+ // x * y = ac + 2^32(ad + bc) + 2^64(bd)
+ // note: `2^64(bd)` can be ignored, the value is too large to fit in
+ // 64-bits.
+
+ // This sequence implements a 2x64bit multiply, where the registers
+ // `src1` and `src2` are split up into 32-bit components:
+ // src1 = |d|c|b|a|
+ // src2 = |h|g|f|e|
+ //
+ // src1 * src2 = |cg + 2^32(ch + dg)|ae + 2^32(af + be)|
+
+ // Reverse the 32-bit elements in the 64-bit words.
+ // tmp2 = |g|h|e|f|
+ __ Rev64(tmp2.V4S(), src2.V4S());
+
+ // Calculate the high half components.
+ // tmp2 = |dg|ch|be|af|
+ __ Mul(tmp2.V4S(), tmp2.V4S(), src1.V4S());
+
+ // Extract the low half components of src1.
+ // tmp1 = |c|a|
+ __ Xtn(tmp1.V2S(), src1.V2D());
+
+ // Sum the respective high half components.
+ // tmp2 = |dg+ch|be+af||dg+ch|be+af|
+ __ Addp(tmp2.V4S(), tmp2.V4S(), tmp2.V4S());
+
+ // Extract the low half components of src2.
+ // tmp3 = |g|e|
+ __ Xtn(tmp3.V2S(), src2.V2D());
+
+ // Shift the high half components, into the high half.
+ // dst = |dg+ch << 32|be+af << 32|
+ __ Shll(dst.V2D(), tmp2.V2S(), 32);
+
+ // Multiply the low components together, and accumulate with the high
+ // half.
+ // dst = |dst[1] + cg|dst[0] + ae|
+ __ Umlal(dst.V2D(), tmp3.V2S(), tmp1.V2S());
+
+ break;
+ }
SIMD_BINOP_CASE(kArm64I64x2Eq, Cmeq, 2D);
case kArm64I64x2Ne: {
VRegister dst = i.OutputSimd128Register().V2D();
@@ -1915,7 +2040,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_BINOP_CASE(kArm64I64x2GeS, Cmge, 2D);
case kArm64I64x2ShrU: {
VRegister tmp = i.TempSimd128Register(0);
- __ Dup(tmp.V2D(), i.InputRegister64(1));
+ Register shift = i.TempRegister(1);
+ // Take shift value modulo 64.
+ __ And(shift, i.InputRegister64(1), 63);
+ __ Dup(tmp.V2D(), shift);
__ Neg(tmp.V2D(), tmp.V2D());
__ Ushl(i.OutputSimd128Register().V2D(), i.InputSimd128Register(0).V2D(),
tmp.V2D());
@@ -1947,14 +2075,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_UNOP_CASE(kArm64I32x4Neg, Neg, 4S);
case kArm64I32x4Shl: {
VRegister tmp = i.TempSimd128Register(0);
- __ Dup(tmp.V4S(), i.InputRegister32(1));
+ Register shift = i.TempRegister32(1);
+ // Take shift value modulo 32.
+ __ And(shift, i.InputRegister32(1), 31);
+ __ Dup(tmp.V4S(), shift);
__ Sshl(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).V4S(),
tmp.V4S());
break;
}
case kArm64I32x4ShrS: {
VRegister tmp = i.TempSimd128Register(0);
- __ Dup(tmp.V4S(), i.InputRegister32(1));
+ Register shift = i.TempRegister32(1);
+ // Take shift value modulo 32.
+ __ And(shift, i.InputRegister32(1), 31);
+ __ Dup(tmp.V4S(), shift);
__ Neg(tmp.V4S(), tmp.V4S());
__ Sshl(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).V4S(),
tmp.V4S());
@@ -1981,7 +2115,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_WIDENING_UNOP_CASE(kArm64I32x4UConvertI16x8High, Uxtl2, 4S, 8H);
case kArm64I32x4ShrU: {
VRegister tmp = i.TempSimd128Register(0);
- __ Dup(tmp.V4S(), i.InputRegister32(1));
+ Register shift = i.TempRegister32(1);
+ // Take shift value modulo 32.
+ __ And(shift, i.InputRegister32(1), 31);
+ __ Dup(tmp.V4S(), shift);
__ Neg(tmp.V4S(), tmp.V4S());
__ Ushl(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).V4S(),
tmp.V4S());
@@ -1996,7 +2133,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArm64I16x8ExtractLane: {
- __ Smov(i.OutputRegister32(), i.InputSimd128Register(0).V8H(),
+ __ Umov(i.OutputRegister32(), i.InputSimd128Register(0).V8H(),
i.InputInt8(1));
break;
}
@@ -2014,14 +2151,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_UNOP_CASE(kArm64I16x8Neg, Neg, 8H);
case kArm64I16x8Shl: {
VRegister tmp = i.TempSimd128Register(0);
- __ Dup(tmp.V8H(), i.InputRegister32(1));
+ Register shift = i.TempRegister32(1);
+ // Take shift value modulo 16.
+ __ And(shift, i.InputRegister32(1), 15);
+ __ Dup(tmp.V8H(), shift);
__ Sshl(i.OutputSimd128Register().V8H(), i.InputSimd128Register(0).V8H(),
tmp.V8H());
break;
}
case kArm64I16x8ShrS: {
VRegister tmp = i.TempSimd128Register(0);
- __ Dup(tmp.V8H(), i.InputRegister32(1));
+ Register shift = i.TempRegister32(1);
+ // Take shift value modulo 16.
+ __ And(shift, i.InputRegister32(1), 15);
+ __ Dup(tmp.V8H(), shift);
__ Neg(tmp.V8H(), tmp.V8H());
__ Sshl(i.OutputSimd128Register().V8H(), i.InputSimd128Register(0).V8H(),
tmp.V8H());
@@ -2070,7 +2213,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArm64I16x8ShrU: {
VRegister tmp = i.TempSimd128Register(0);
- __ Dup(tmp.V8H(), i.InputRegister32(1));
+ Register shift = i.TempRegister32(1);
+ // Take shift value modulo 16.
+ __ And(shift, i.InputRegister32(1), 15);
+ __ Dup(tmp.V8H(), shift);
__ Neg(tmp.V8H(), tmp.V8H());
__ Ushl(i.OutputSimd128Register().V8H(), i.InputSimd128Register(0).V8H(),
tmp.V8H());
@@ -2101,7 +2247,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArm64I8x16ExtractLane: {
- __ Smov(i.OutputRegister32(), i.InputSimd128Register(0).V16B(),
+ __ Umov(i.OutputRegister32(), i.InputSimd128Register(0).V16B(),
i.InputInt8(1));
break;
}
@@ -2117,14 +2263,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_UNOP_CASE(kArm64I8x16Neg, Neg, 16B);
case kArm64I8x16Shl: {
VRegister tmp = i.TempSimd128Register(0);
- __ Dup(tmp.V16B(), i.InputRegister32(1));
+ Register shift = i.TempRegister32(1);
+ // Take shift value modulo 8.
+ __ And(shift, i.InputRegister32(1), 7);
+ __ Dup(tmp.V16B(), shift);
__ Sshl(i.OutputSimd128Register().V16B(),
i.InputSimd128Register(0).V16B(), tmp.V16B());
break;
}
case kArm64I8x16ShrS: {
VRegister tmp = i.TempSimd128Register(0);
- __ Dup(tmp.V16B(), i.InputRegister32(1));
+ Register shift = i.TempRegister32(1);
+ // Take shift value modulo 8.
+ __ And(shift, i.InputRegister32(1), 7);
+ __ Dup(tmp.V16B(), shift);
__ Neg(tmp.V16B(), tmp.V16B());
__ Sshl(i.OutputSimd128Register().V16B(),
i.InputSimd128Register(0).V16B(), tmp.V16B());
@@ -2163,7 +2315,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_BINOP_CASE(kArm64I8x16GeS, Cmge, 16B);
case kArm64I8x16ShrU: {
VRegister tmp = i.TempSimd128Register(0);
- __ Dup(tmp.V16B(), i.InputRegister32(1));
+ Register shift = i.TempRegister32(1);
+ // Take shift value modulo 8.
+ __ And(shift, i.InputRegister32(1), 7);
+ __ Dup(tmp.V16B(), shift);
__ Neg(tmp.V16B(), tmp.V16B());
__ Ushl(i.OutputSimd128Register().V16B(),
i.InputSimd128Register(0).V16B(), tmp.V16B());
@@ -2277,6 +2432,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1).V16B(), i.InputInt4(2));
break;
}
+ case kArm64S8x16Swizzle: {
+ __ Tbl(i.OutputSimd128Register().V16B(), i.InputSimd128Register(0).V16B(),
+ i.InputSimd128Register(1).V16B());
+ break;
+ }
case kArm64S8x16Shuffle: {
Simd128Register dst = i.OutputSimd128Register().V16B(),
src0 = i.InputSimd128Register(0).V16B(),
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
index 4b56e402c1..880a3fbf9e 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
+++ b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
@@ -70,6 +70,7 @@ namespace compiler {
V(Arm64Sxtb) \
V(Arm64Sxth) \
V(Arm64Sxtw) \
+ V(Arm64Sbfx) \
V(Arm64Sbfx32) \
V(Arm64Ubfx) \
V(Arm64Ubfx32) \
@@ -175,6 +176,7 @@ namespace compiler {
V(Arm64F64x2ReplaceLane) \
V(Arm64F64x2Abs) \
V(Arm64F64x2Neg) \
+ V(Arm64F64x2Sqrt) \
V(Arm64F64x2Add) \
V(Arm64F64x2Sub) \
V(Arm64F64x2Mul) \
@@ -185,6 +187,8 @@ namespace compiler {
V(Arm64F64x2Ne) \
V(Arm64F64x2Lt) \
V(Arm64F64x2Le) \
+ V(Arm64F64x2Qfma) \
+ V(Arm64F64x2Qfms) \
V(Arm64F32x4Splat) \
V(Arm64F32x4ExtractLane) \
V(Arm64F32x4ReplaceLane) \
@@ -192,6 +196,7 @@ namespace compiler {
V(Arm64F32x4UConvertI32x4) \
V(Arm64F32x4Abs) \
V(Arm64F32x4Neg) \
+ V(Arm64F32x4Sqrt) \
V(Arm64F32x4RecipApprox) \
V(Arm64F32x4RecipSqrtApprox) \
V(Arm64F32x4Add) \
@@ -205,6 +210,8 @@ namespace compiler {
V(Arm64F32x4Ne) \
V(Arm64F32x4Lt) \
V(Arm64F32x4Le) \
+ V(Arm64F32x4Qfma) \
+ V(Arm64F32x4Qfms) \
V(Arm64I64x2Splat) \
V(Arm64I64x2ExtractLane) \
V(Arm64I64x2ReplaceLane) \
@@ -213,6 +220,7 @@ namespace compiler {
V(Arm64I64x2ShrS) \
V(Arm64I64x2Add) \
V(Arm64I64x2Sub) \
+ V(Arm64I64x2Mul) \
V(Arm64I64x2Eq) \
V(Arm64I64x2Ne) \
V(Arm64I64x2GtS) \
@@ -331,6 +339,7 @@ namespace compiler {
V(Arm64S8x16TransposeLeft) \
V(Arm64S8x16TransposeRight) \
V(Arm64S8x16Concat) \
+ V(Arm64S8x16Swizzle) \
V(Arm64S8x16Shuffle) \
V(Arm64S32x2Reverse) \
V(Arm64S16x4Reverse) \
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
index 7cba2d50ea..b0f9202968 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
@@ -71,6 +71,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Sxth:
case kArm64Sxth32:
case kArm64Sxtw:
+ case kArm64Sbfx:
case kArm64Sbfx32:
case kArm64Ubfx:
case kArm64Ubfx32:
@@ -142,6 +143,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64F64x2ReplaceLane:
case kArm64F64x2Abs:
case kArm64F64x2Neg:
+ case kArm64F64x2Sqrt:
case kArm64F64x2Add:
case kArm64F64x2Sub:
case kArm64F64x2Mul:
@@ -152,6 +154,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64F64x2Ne:
case kArm64F64x2Lt:
case kArm64F64x2Le:
+ case kArm64F64x2Qfma:
+ case kArm64F64x2Qfms:
case kArm64F32x4Splat:
case kArm64F32x4ExtractLane:
case kArm64F32x4ReplaceLane:
@@ -159,6 +163,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64F32x4UConvertI32x4:
case kArm64F32x4Abs:
case kArm64F32x4Neg:
+ case kArm64F32x4Sqrt:
case kArm64F32x4RecipApprox:
case kArm64F32x4RecipSqrtApprox:
case kArm64F32x4Add:
@@ -172,6 +177,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64F32x4Ne:
case kArm64F32x4Lt:
case kArm64F32x4Le:
+ case kArm64F32x4Qfma:
+ case kArm64F32x4Qfms:
case kArm64I64x2Splat:
case kArm64I64x2ExtractLane:
case kArm64I64x2ReplaceLane:
@@ -180,6 +187,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64I64x2ShrS:
case kArm64I64x2Add:
case kArm64I64x2Sub:
+ case kArm64I64x2Mul:
case kArm64I64x2Eq:
case kArm64I64x2Ne:
case kArm64I64x2GtS:
@@ -298,6 +306,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64S8x16TransposeLeft:
case kArm64S8x16TransposeRight:
case kArm64S8x16Concat:
+ case kArm64S8x16Swizzle:
case kArm64S8x16Shuffle:
case kArm64S32x2Reverse:
case kArm64S16x4Reverse:
@@ -439,6 +448,7 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
case kArm64Clz:
case kArm64Clz32:
+ case kArm64Sbfx:
case kArm64Sbfx32:
case kArm64Sxtb32:
case kArm64Sxth32:
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
index 4abbd68c49..53a289fe6a 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
@@ -153,7 +153,7 @@ void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
void VisitSimdShiftRRR(InstructionSelector* selector, ArchOpcode opcode,
Node* node) {
Arm64OperandGenerator g(selector);
- InstructionOperand temps[] = {g.TempSimd128Register()};
+ InstructionOperand temps[] = {g.TempSimd128Register(), g.TempRegister()};
selector->Emit(opcode, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)),
g.UseRegister(node->InputAt(1)), arraysize(temps), temps);
@@ -499,6 +499,7 @@ void VisitAddSub(InstructionSelector* selector, Node* node, ArchOpcode opcode,
Arm64OperandGenerator g(selector);
Matcher m(node);
if (m.right().HasValue() && (m.right().Value() < 0) &&
+ (m.right().Value() > std::numeric_limits<int>::min()) &&
g.CanBeImmediate(-m.right().Value(), kArithmeticImm)) {
selector->Emit(negate_opcode, g.DefineAsRegister(node),
g.UseRegister(m.left().node()),
@@ -627,9 +628,24 @@ void InstructionSelector::VisitLoad(Node* node) {
#else
UNREACHABLE();
#endif
+#ifdef V8_COMPRESS_POINTERS
+ case MachineRepresentation::kTaggedSigned:
+ opcode = kArm64LdrDecompressTaggedSigned;
+ immediate_mode = kLoadStoreImm32;
+ break;
+ case MachineRepresentation::kTaggedPointer:
+ opcode = kArm64LdrDecompressTaggedPointer;
+ immediate_mode = kLoadStoreImm32;
+ break;
+ case MachineRepresentation::kTagged:
+ opcode = kArm64LdrDecompressAnyTagged;
+ immediate_mode = kLoadStoreImm32;
+ break;
+#else
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
+#endif
case MachineRepresentation::kWord64:
opcode = kArm64Ldr;
immediate_mode = kLoadStoreImm64;
@@ -723,7 +739,7 @@ void InstructionSelector::VisitStore(Node* node) {
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed:
#ifdef V8_COMPRESS_POINTERS
- opcode = kArm64StrW;
+ opcode = kArm64StrCompressTagged;
immediate_mode = kLoadStoreImm32;
break;
#else
@@ -731,7 +747,11 @@ void InstructionSelector::VisitStore(Node* node) {
#endif
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kTagged:
+ opcode = kArm64StrCompressTagged;
+ immediate_mode =
+ COMPRESS_POINTERS_BOOL ? kLoadStoreImm32 : kLoadStoreImm64;
+ break;
case MachineRepresentation::kWord64:
opcode = kArm64Str;
immediate_mode = kLoadStoreImm64;
@@ -770,6 +790,10 @@ void InstructionSelector::VisitProtectedStore(Node* node) {
UNIMPLEMENTED();
}
+void InstructionSelector::VisitSimd128ReverseBytes(Node* node) {
+ UNREACHABLE();
+}
+
// Architecture supports unaligned access, therefore VisitLoad is used instead
void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
@@ -1048,7 +1072,8 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
if (mleft.right().HasValue() && mleft.right().Value() != 0) {
// Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
// shifted into the least-significant bits.
- uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
+ uint32_t mask = static_cast<uint32_t>(mleft.right().Value() >> lsb)
+ << lsb;
unsigned mask_width = base::bits::CountPopulation(mask);
unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
if ((mask_msb + mask_width + lsb) == 32) {
@@ -1091,7 +1116,8 @@ void InstructionSelector::VisitWord64Shr(Node* node) {
if (mleft.right().HasValue() && mleft.right().Value() != 0) {
// Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
// shifted into the least-significant bits.
- uint64_t mask = (mleft.right().Value() >> lsb) << lsb;
+ uint64_t mask = static_cast<uint64_t>(mleft.right().Value() >> lsb)
+ << lsb;
unsigned mask_width = base::bits::CountPopulation(mask);
unsigned mask_msb = base::bits::CountLeadingZeros64(mask);
if ((mask_msb + mask_width + lsb) == 64) {
@@ -1240,7 +1266,8 @@ void InstructionSelector::VisitWord64Ror(Node* node) {
V(Float32Max, kArm64Float32Max) \
V(Float64Max, kArm64Float64Max) \
V(Float32Min, kArm64Float32Min) \
- V(Float64Min, kArm64Float64Min)
+ V(Float64Min, kArm64Float64Min) \
+ V(S8x16Swizzle, kArm64S8x16Swizzle)
#define RR_VISITOR(Name, opcode) \
void InstructionSelector::Visit##Name(Node* node) { \
@@ -1572,9 +1599,22 @@ void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
return;
}
EmitLoad(this, value, opcode, immediate_mode, rep, node);
- } else {
- VisitRR(this, kArm64Sxtw, node);
+ return;
+ }
+
+ if (value->opcode() == IrOpcode::kWord32Sar && CanCover(node, value)) {
+ Int32BinopMatcher m(value);
+ if (m.right().HasValue()) {
+ Arm64OperandGenerator g(this);
+ // Mask the shift amount, to keep the same semantics as Word32Sar.
+ int right = m.right().Value() & 0x1F;
+ Emit(kArm64Sbfx, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(right), g.TempImmediate(32 - right));
+ return;
+ }
}
+
+ VisitRR(this, kArm64Sxtw, node);
}
void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
@@ -1830,31 +1870,6 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
selector->EmitWithContinuation(opcode, left, right, cont);
}
-// Shared routine for multiple word compare operations.
-void VisitWordCompare(InstructionSelector* selector, Node* node,
- InstructionCode opcode, FlagsContinuation* cont,
- ImmediateMode immediate_mode) {
- Arm64OperandGenerator g(selector);
-
- Node* left = node->InputAt(0);
- Node* right = node->InputAt(1);
-
- // If one of the two inputs is an immediate, make sure it's on the right.
- if (!g.CanBeImmediate(right, immediate_mode) &&
- g.CanBeImmediate(left, immediate_mode)) {
- cont->Commute();
- std::swap(left, right);
- }
-
- if (g.CanBeImmediate(right, immediate_mode)) {
- VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
- cont);
- } else {
- VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
- cont);
- }
-}
-
// This function checks whether we can convert:
// ((a <op> b) cmp 0), b.<cond>
// to:
@@ -1986,9 +2001,35 @@ void EmitBranchOrDeoptimize(InstructionSelector* selector,
selector->EmitWithContinuation(opcode, value, cont);
}
+template <int N>
+struct CbzOrTbzMatchTrait {};
+
+template <>
+struct CbzOrTbzMatchTrait<32> {
+ using IntegralType = uint32_t;
+ using BinopMatcher = Int32BinopMatcher;
+ static constexpr IrOpcode::Value kAndOpcode = IrOpcode::kWord32And;
+ static constexpr ArchOpcode kTestAndBranchOpcode = kArm64TestAndBranch32;
+ static constexpr ArchOpcode kCompareAndBranchOpcode =
+ kArm64CompareAndBranch32;
+ static constexpr unsigned kSignBit = kWSignBit;
+};
+
+template <>
+struct CbzOrTbzMatchTrait<64> {
+ using IntegralType = uint64_t;
+ using BinopMatcher = Int64BinopMatcher;
+ static constexpr IrOpcode::Value kAndOpcode = IrOpcode::kWord64And;
+ static constexpr ArchOpcode kTestAndBranchOpcode = kArm64TestAndBranch;
+ static constexpr ArchOpcode kCompareAndBranchOpcode = kArm64CompareAndBranch;
+ static constexpr unsigned kSignBit = kXSignBit;
+};
+
// Try to emit TBZ, TBNZ, CBZ or CBNZ for certain comparisons of {node}
// against {value}, depending on the condition.
-bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, uint32_t value,
+template <int N>
+bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node,
+ typename CbzOrTbzMatchTrait<N>::IntegralType value,
Node* user, FlagsCondition cond, FlagsContinuation* cont) {
// Branch poisoning requires flags to be set, so when it's enabled for
// a particular branch, we shouldn't be applying the cbz/tbz optimization.
@@ -2007,28 +2048,33 @@ bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, uint32_t value,
if (cont->IsDeoptimize()) return false;
Arm64OperandGenerator g(selector);
cont->Overwrite(MapForTbz(cond));
- Int32Matcher m(node);
- if (m.IsFloat64ExtractHighWord32() && selector->CanCover(user, node)) {
- // SignedLessThan(Float64ExtractHighWord32(x), 0) and
- // SignedGreaterThanOrEqual(Float64ExtractHighWord32(x), 0) essentially
- // check the sign bit of a 64-bit floating point value.
- InstructionOperand temp = g.TempRegister();
- selector->Emit(kArm64U64MoveFloat64, temp,
- g.UseRegister(node->InputAt(0)));
- selector->EmitWithContinuation(kArm64TestAndBranch, temp,
- g.TempImmediate(63), cont);
- return true;
+
+ if (N == 32) {
+ Int32Matcher m(node);
+ if (m.IsFloat64ExtractHighWord32() && selector->CanCover(user, node)) {
+ // SignedLessThan(Float64ExtractHighWord32(x), 0) and
+ // SignedGreaterThanOrEqual(Float64ExtractHighWord32(x), 0)
+ // essentially check the sign bit of a 64-bit floating point value.
+ InstructionOperand temp = g.TempRegister();
+ selector->Emit(kArm64U64MoveFloat64, temp,
+ g.UseRegister(node->InputAt(0)));
+ selector->EmitWithContinuation(kArm64TestAndBranch, temp,
+ g.TempImmediate(kDSignBit), cont);
+ return true;
+ }
}
- selector->EmitWithContinuation(kArm64TestAndBranch32, g.UseRegister(node),
- g.TempImmediate(31), cont);
+
+ selector->EmitWithContinuation(
+ CbzOrTbzMatchTrait<N>::kTestAndBranchOpcode, g.UseRegister(node),
+ g.TempImmediate(CbzOrTbzMatchTrait<N>::kSignBit), cont);
return true;
}
case kEqual:
case kNotEqual: {
- if (node->opcode() == IrOpcode::kWord32And) {
+ if (node->opcode() == CbzOrTbzMatchTrait<N>::kAndOpcode) {
// Emit a tbz/tbnz if we are comparing with a single-bit mask:
- // Branch(Word32Equal(Word32And(x, 1 << N), 1 << N), true, false)
- Int32BinopMatcher m_and(node);
+ // Branch(WordEqual(WordAnd(x, 1 << N), 1 << N), true, false)
+ typename CbzOrTbzMatchTrait<N>::BinopMatcher m_and(node);
if (cont->IsBranch() && base::bits::IsPowerOfTwo(value) &&
m_and.right().Is(value) && selector->CanCover(user, node)) {
Arm64OperandGenerator g(selector);
@@ -2036,7 +2082,8 @@ bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, uint32_t value,
// the opposite here so negate the condition.
cont->Negate();
selector->EmitWithContinuation(
- kArm64TestAndBranch32, g.UseRegister(m_and.left().node()),
+ CbzOrTbzMatchTrait<N>::kTestAndBranchOpcode,
+ g.UseRegister(m_and.left().node()),
g.TempImmediate(base::bits::CountTrailingZeros(value)), cont);
return true;
}
@@ -2048,7 +2095,8 @@ bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, uint32_t value,
if (value != 0) return false;
Arm64OperandGenerator g(selector);
cont->Overwrite(MapForCbz(cond));
- EmitBranchOrDeoptimize(selector, kArm64CompareAndBranch32,
+ EmitBranchOrDeoptimize(selector,
+ CbzOrTbzMatchTrait<N>::kCompareAndBranchOpcode,
g.UseRegister(node), cont);
return true;
}
@@ -2057,20 +2105,50 @@ bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, uint32_t value,
}
}
+// Shared routine for multiple word compare operations.
+void VisitWordCompare(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont,
+ ImmediateMode immediate_mode) {
+ Arm64OperandGenerator g(selector);
+
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+
+ // If one of the two inputs is an immediate, make sure it's on the right.
+ if (!g.CanBeImmediate(right, immediate_mode) &&
+ g.CanBeImmediate(left, immediate_mode)) {
+ cont->Commute();
+ std::swap(left, right);
+ }
+
+ if (opcode == kArm64Cmp && !cont->IsPoisoned()) {
+ Int64Matcher m(right);
+ if (m.HasValue()) {
+ if (TryEmitCbzOrTbz<64>(selector, left, m.Value(), node,
+ cont->condition(), cont)) {
+ return;
+ }
+ }
+ }
+
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseOperand(right, immediate_mode), cont);
+}
+
void VisitWord32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
Int32BinopMatcher m(node);
FlagsCondition cond = cont->condition();
if (!cont->IsPoisoned()) {
if (m.right().HasValue()) {
- if (TryEmitCbzOrTbz(selector, m.left().node(), m.right().Value(), node,
- cond, cont)) {
+ if (TryEmitCbzOrTbz<32>(selector, m.left().node(), m.right().Value(),
+ node, cond, cont)) {
return;
}
} else if (m.left().HasValue()) {
FlagsCondition commuted_cond = CommuteFlagsCondition(cond);
- if (TryEmitCbzOrTbz(selector, m.right().node(), m.left().Value(), node,
- commuted_cond, cont)) {
+ if (TryEmitCbzOrTbz<32>(selector, m.right().node(), m.left().Value(),
+ node, commuted_cond, cont)) {
return;
}
}
@@ -2378,13 +2456,6 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
if (CanCover(value, left) && left->opcode() == IrOpcode::kWord64And) {
return VisitWordCompare(this, left, kArm64Tst, cont, kLogical64Imm);
}
- // Merge the Word64Equal(x, 0) comparison into a cbz instruction.
- if ((cont->IsBranch() || cont->IsDeoptimize()) &&
- !cont->IsPoisoned()) {
- EmitBranchOrDeoptimize(this, kArm64CompareAndBranch,
- g.UseRegister(left), cont);
- return;
- }
}
return VisitWordCompare(this, value, kArm64Cmp, cont, kArithmeticImm);
}
@@ -3054,10 +3125,12 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
#define SIMD_UNOP_LIST(V) \
V(F64x2Abs, kArm64F64x2Abs) \
V(F64x2Neg, kArm64F64x2Neg) \
+ V(F64x2Sqrt, kArm64F64x2Sqrt) \
V(F32x4SConvertI32x4, kArm64F32x4SConvertI32x4) \
V(F32x4UConvertI32x4, kArm64F32x4UConvertI32x4) \
V(F32x4Abs, kArm64F32x4Abs) \
V(F32x4Neg, kArm64F32x4Neg) \
+ V(F32x4Sqrt, kArm64F32x4Sqrt) \
V(F32x4RecipApprox, kArm64F32x4RecipApprox) \
V(F32x4RecipSqrtApprox, kArm64F32x4RecipSqrtApprox) \
V(I64x2Neg, kArm64I64x2Neg) \
@@ -3236,6 +3309,14 @@ SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
#undef SIMD_VISIT_BINOP
#undef SIMD_BINOP_LIST
+void InstructionSelector::VisitI64x2Mul(Node* node) {
+ Arm64OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempSimd128Register()};
+ Emit(kArm64I64x2Mul, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
+ arraysize(temps), temps);
+}
+
void InstructionSelector::VisitS128Select(Node* node) {
Arm64OperandGenerator g(this);
Emit(kArm64S128Select, g.DefineSameAsFirst(node),
@@ -3243,6 +3324,19 @@ void InstructionSelector::VisitS128Select(Node* node) {
g.UseRegister(node->InputAt(2)));
}
+#define VISIT_SIMD_QFMOP(op) \
+ void InstructionSelector::Visit##op(Node* node) { \
+ Arm64OperandGenerator g(this); \
+ Emit(kArm64##op, g.DefineSameAsFirst(node), \
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), \
+ g.UseRegister(node->InputAt(2))); \
+ }
+VISIT_SIMD_QFMOP(F64x2Qfma)
+VISIT_SIMD_QFMOP(F64x2Qfms)
+VISIT_SIMD_QFMOP(F32x4Qfma)
+VISIT_SIMD_QFMOP(F32x4Qfms)
+#undef VISIT_SIMD_QFMOP
+
namespace {
struct ShuffleEntry {
diff --git a/deps/v8/src/compiler/backend/code-generator-impl.h b/deps/v8/src/compiler/backend/code-generator-impl.h
index 2bfb009980..530dc0a813 100644
--- a/deps/v8/src/compiler/backend/code-generator-impl.h
+++ b/deps/v8/src/compiler/backend/code-generator-impl.h
@@ -26,7 +26,7 @@ class InstructionOperandConverter {
// -- Instruction operand accesses with conversions --------------------------
- Register InputRegister(size_t index) {
+ Register InputRegister(size_t index) const {
return ToRegister(instr_->InputAt(index));
}
@@ -96,7 +96,7 @@ class InstructionOperandConverter {
return ToRpoNumber(instr_->InputAt(index));
}
- Register OutputRegister(size_t index = 0) {
+ Register OutputRegister(size_t index = 0) const {
return ToRegister(instr_->OutputAt(index));
}
@@ -130,7 +130,7 @@ class InstructionOperandConverter {
return ToConstant(op).ToRpoNumber();
}
- Register ToRegister(InstructionOperand* op) {
+ Register ToRegister(InstructionOperand* op) const {
return LocationOperand::cast(op)->GetRegister();
}
@@ -146,7 +146,7 @@ class InstructionOperandConverter {
return LocationOperand::cast(op)->GetSimd128Register();
}
- Constant ToConstant(InstructionOperand* op) {
+ Constant ToConstant(InstructionOperand* op) const {
if (op->IsImmediate()) {
return gen_->instructions()->GetImmediate(ImmediateOperand::cast(op));
}
diff --git a/deps/v8/src/compiler/backend/code-generator.cc b/deps/v8/src/compiler/backend/code-generator.cc
index e7702bcdf6..43eb4a1f15 100644
--- a/deps/v8/src/compiler/backend/code-generator.cc
+++ b/deps/v8/src/compiler/backend/code-generator.cc
@@ -4,7 +4,7 @@
#include "src/compiler/backend/code-generator.h"
-#include "src/base/adapters.h"
+#include "src/base/iterator.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/macro-assembler-inl.h"
#include "src/codegen/optimized-compilation-info.h"
diff --git a/deps/v8/src/compiler/backend/code-generator.h b/deps/v8/src/compiler/backend/code-generator.h
index e9ebf67590..d56b1edae0 100644
--- a/deps/v8/src/compiler/backend/code-generator.h
+++ b/deps/v8/src/compiler/backend/code-generator.h
@@ -5,6 +5,8 @@
#ifndef V8_COMPILER_BACKEND_CODE_GENERATOR_H_
#define V8_COMPILER_BACKEND_CODE_GENERATOR_H_
+#include <memory>
+
#include "src/base/optional.h"
#include "src/codegen/macro-assembler.h"
#include "src/codegen/safepoint-table.h"
diff --git a/deps/v8/src/compiler/backend/frame-elider.cc b/deps/v8/src/compiler/backend/frame-elider.cc
index 064501b097..293fc9352c 100644
--- a/deps/v8/src/compiler/backend/frame-elider.cc
+++ b/deps/v8/src/compiler/backend/frame-elider.cc
@@ -4,7 +4,7 @@
#include "src/compiler/backend/frame-elider.h"
-#include "src/base/adapters.h"
+#include "src/base/iterator.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
index 4542da643b..068268a3da 100644
--- a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
@@ -479,17 +479,18 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ opcode(i.OutputSimd128Register(), i.InputOperand(1), imm); \
}
-#define ASSEMBLE_SIMD_ALL_TRUE(opcode) \
- do { \
- Register dst = i.OutputRegister(); \
- Operand src = i.InputOperand(0); \
- Register tmp = i.TempRegister(0); \
- __ mov(tmp, Immediate(1)); \
- __ xor_(dst, dst); \
- __ Pxor(kScratchDoubleReg, kScratchDoubleReg); \
- __ opcode(kScratchDoubleReg, src); \
- __ Ptest(kScratchDoubleReg, kScratchDoubleReg); \
- __ cmov(zero, dst, tmp); \
+#define ASSEMBLE_SIMD_ALL_TRUE(opcode) \
+ do { \
+ Register dst = i.OutputRegister(); \
+ Operand src = i.InputOperand(0); \
+ Register tmp = i.TempRegister(0); \
+ XMMRegister tmp_simd = i.TempSimd128Register(1); \
+ __ mov(tmp, Immediate(1)); \
+ __ xor_(dst, dst); \
+ __ Pxor(tmp_simd, tmp_simd); \
+ __ opcode(tmp_simd, src); \
+ __ Ptest(tmp_simd, tmp_simd); \
+ __ cmov(zero, dst, tmp); \
} while (false)
void CodeGenerator::AssembleDeconstructFrame() {
@@ -1266,16 +1267,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kSSEFloat32Abs: {
// TODO(bmeurer): Use 128-bit constants.
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psrlq(kScratchDoubleReg, 33);
- __ andps(i.OutputDoubleRegister(), kScratchDoubleReg);
+ XMMRegister tmp = i.TempSimd128Register(0);
+ __ pcmpeqd(tmp, tmp);
+ __ psrlq(tmp, 33);
+ __ andps(i.OutputDoubleRegister(), tmp);
break;
}
case kSSEFloat32Neg: {
// TODO(bmeurer): Use 128-bit constants.
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psllq(kScratchDoubleReg, 31);
- __ xorps(i.OutputDoubleRegister(), kScratchDoubleReg);
+ XMMRegister tmp = i.TempSimd128Register(0);
+ __ pcmpeqd(tmp, tmp);
+ __ psllq(tmp, 31);
+ __ xorps(i.OutputDoubleRegister(), tmp);
break;
}
case kSSEFloat32Round: {
@@ -1444,16 +1447,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kSSEFloat64Abs: {
// TODO(bmeurer): Use 128-bit constants.
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psrlq(kScratchDoubleReg, 1);
- __ andpd(i.OutputDoubleRegister(), kScratchDoubleReg);
+ XMMRegister tmp = i.TempSimd128Register(0);
+ __ pcmpeqd(tmp, tmp);
+ __ psrlq(tmp, 1);
+ __ andpd(i.OutputDoubleRegister(), tmp);
break;
}
case kSSEFloat64Neg: {
// TODO(bmeurer): Use 128-bit constants.
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psllq(kScratchDoubleReg, 63);
- __ xorpd(i.OutputDoubleRegister(), kScratchDoubleReg);
+ XMMRegister tmp = i.TempSimd128Register(0);
+ __ pcmpeqd(tmp, tmp);
+ __ psllq(tmp, 63);
+ __ xorpd(i.OutputDoubleRegister(), tmp);
break;
}
case kSSEFloat64Sqrt:
@@ -1476,13 +1481,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ cvttss2si(i.OutputRegister(), i.InputOperand(0));
break;
case kSSEFloat32ToUint32:
- __ Cvttss2ui(i.OutputRegister(), i.InputOperand(0), kScratchDoubleReg);
+ __ Cvttss2ui(i.OutputRegister(), i.InputOperand(0),
+ i.TempSimd128Register(0));
break;
case kSSEFloat64ToInt32:
__ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
break;
case kSSEFloat64ToUint32:
- __ Cvttsd2ui(i.OutputRegister(), i.InputOperand(0), kScratchDoubleReg);
+ __ Cvttsd2ui(i.OutputRegister(), i.InputOperand(0),
+ i.TempSimd128Register(0));
break;
case kSSEInt32ToFloat32:
__ cvtsi2ss(i.OutputDoubleRegister(), i.InputOperand(0));
@@ -1577,34 +1584,38 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kAVXFloat32Abs: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psrlq(kScratchDoubleReg, 33);
+ XMMRegister tmp = i.TempSimd128Register(0);
+ __ pcmpeqd(tmp, tmp);
+ __ psrlq(tmp, 33);
CpuFeatureScope avx_scope(tasm(), AVX);
- __ vandps(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
+ __ vandps(i.OutputDoubleRegister(), tmp, i.InputOperand(0));
break;
}
case kAVXFloat32Neg: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psllq(kScratchDoubleReg, 31);
+ XMMRegister tmp = i.TempSimd128Register(0);
+ __ pcmpeqd(tmp, tmp);
+ __ psllq(tmp, 31);
CpuFeatureScope avx_scope(tasm(), AVX);
- __ vxorps(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
+ __ vxorps(i.OutputDoubleRegister(), tmp, i.InputOperand(0));
break;
}
case kAVXFloat64Abs: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psrlq(kScratchDoubleReg, 1);
+ XMMRegister tmp = i.TempSimd128Register(0);
+ __ pcmpeqd(tmp, tmp);
+ __ psrlq(tmp, 1);
CpuFeatureScope avx_scope(tasm(), AVX);
- __ vandpd(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
+ __ vandpd(i.OutputDoubleRegister(), tmp, i.InputOperand(0));
break;
}
case kAVXFloat64Neg: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psllq(kScratchDoubleReg, 63);
+ XMMRegister tmp = i.TempSimd128Register(0);
+ __ pcmpeqd(tmp, tmp);
+ __ psllq(tmp, 63);
CpuFeatureScope avx_scope(tasm(), AVX);
- __ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
+ __ vxorpd(i.OutputDoubleRegister(), tmp, i.InputOperand(0));
break;
}
case kSSEFloat64SilenceNaN:
@@ -1825,6 +1836,164 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
+ case kSSEF64x2Splat: {
+ DCHECK_EQ(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ XMMRegister dst = i.OutputSimd128Register();
+ __ shufpd(dst, dst, 0x0);
+ break;
+ }
+ case kAVXF64x2Splat: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ XMMRegister src = i.InputDoubleRegister(0);
+ __ vshufpd(i.OutputSimd128Register(), src, src, 0x0);
+ break;
+ }
+ case kSSEF64x2ExtractLane: {
+ DCHECK_EQ(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ XMMRegister dst = i.OutputDoubleRegister();
+ int8_t lane = i.InputInt8(1);
+ if (lane != 0) {
+ DCHECK_LT(lane, 4);
+ __ shufpd(dst, dst, lane);
+ }
+ break;
+ }
+ case kAVXF64x2ExtractLane: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ XMMRegister dst = i.OutputDoubleRegister();
+ XMMRegister src = i.InputSimd128Register(0);
+ int8_t lane = i.InputInt8(1);
+ if (lane == 0) {
+ if (dst != src) __ vmovapd(dst, src);
+ } else {
+ DCHECK_LT(lane, 4);
+ __ vshufpd(dst, src, src, lane);
+ }
+ break;
+ }
+ case kSSEF64x2ReplaceLane: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ XMMRegister dst = i.OutputSimd128Register();
+ int8_t lane = i.InputInt8(1);
+ DoubleRegister rep = i.InputDoubleRegister(2);
+
+ // insertps takes a mask which contains (high to low):
+ // - 2 bit specifying source float element to copy
+ // - 2 bit specifying destination float element to write to
+ // - 4 bits specifying which elements of the destination to zero
+ DCHECK_LT(lane, 2);
+ if (lane == 0) {
+ __ insertps(dst, rep, 0b00000000);
+ __ insertps(dst, rep, 0b01010000);
+ } else {
+ __ insertps(dst, rep, 0b00100000);
+ __ insertps(dst, rep, 0b01110000);
+ }
+ break;
+ }
+ case kAVXF64x2ReplaceLane: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(0);
+ int8_t lane = i.InputInt8(1);
+ DoubleRegister rep = i.InputDoubleRegister(2);
+
+ DCHECK_LT(lane, 2);
+ if (lane == 0) {
+ __ vinsertps(dst, src, rep, 0b00000000);
+ __ vinsertps(dst, src, rep, 0b01010000);
+ } else {
+ __ vinsertps(dst, src, rep, 0b10100000);
+ __ vinsertps(dst, src, rep, 0b11110000);
+ }
+ break;
+ }
+ case kIA32F64x2Sqrt: {
+ __ Sqrtpd(i.OutputSimd128Register(), i.InputOperand(0));
+ break;
+ }
+ case kIA32F64x2Add: {
+ __ Addpd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kIA32F64x2Sub: {
+ __ Subpd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kIA32F64x2Mul: {
+ __ Mulpd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kIA32F64x2Div: {
+ __ Divpd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kIA32F64x2Min: {
+ Operand src1 = i.InputOperand(1);
+ XMMRegister dst = i.OutputSimd128Register(),
+ src = i.InputSimd128Register(0),
+ tmp = i.TempSimd128Register(0);
+ // The minpd instruction doesn't propagate NaNs and +0's in its first
+ // operand. Perform minpd in both orders, merge the resuls, and adjust.
+ __ Movapd(tmp, src1);
+ __ Minpd(tmp, tmp, src);
+ __ Minpd(dst, src, src1);
+ // propagate -0's and NaNs, which may be non-canonical.
+ __ Orpd(tmp, dst);
+ // Canonicalize NaNs by quieting and clearing the payload.
+ __ Cmpunordpd(dst, dst, tmp);
+ __ Orpd(tmp, dst);
+ __ Psrlq(dst, 13);
+ __ Andnpd(dst, tmp);
+ break;
+ }
+ case kIA32F64x2Max: {
+ Operand src1 = i.InputOperand(1);
+ XMMRegister dst = i.OutputSimd128Register(),
+ src = i.InputSimd128Register(0),
+ tmp = i.TempSimd128Register(0);
+ // The maxpd instruction doesn't propagate NaNs and +0's in its first
+ // operand. Perform maxpd in both orders, merge the resuls, and adjust.
+ __ Movapd(tmp, src1);
+ __ Maxpd(tmp, tmp, src);
+ __ Maxpd(dst, src, src1);
+ // Find discrepancies.
+ __ Xorpd(dst, tmp);
+ // Propagate NaNs, which may be non-canonical.
+ __ Orpd(tmp, dst);
+ // Propagate sign discrepancy and (subtle) quiet NaNs.
+ __ Subpd(tmp, tmp, dst);
+ // Canonicalize NaNs by clearing the payload. Sign is non-deterministic.
+ __ Cmpunordpd(dst, dst, tmp);
+ __ Psrlq(dst, 13);
+ __ Andnpd(dst, tmp);
+ break;
+ }
+ case kIA32F64x2Eq: {
+ __ Cmpeqpd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kIA32F64x2Ne: {
+ __ Cmpneqpd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kIA32F64x2Lt: {
+ __ Cmpltpd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kIA32F64x2Le: {
+ __ Cmplepd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
case kSSEF32x4Splat: {
DCHECK_EQ(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
XMMRegister dst = i.OutputSimd128Register();
@@ -1951,6 +2120,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputOperand(0));
break;
}
+ case kSSEF32x4Sqrt: {
+ __ sqrtps(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kAVXF32x4Sqrt: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vsqrtps(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
case kIA32F32x4RecipApprox: {
__ Rcpps(i.OutputSimd128Register(), i.InputOperand(0));
break;
@@ -2212,28 +2390,40 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kSSEI32x4Shl: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
XMMRegister tmp = i.TempSimd128Register(0);
- __ movd(tmp, i.InputRegister(1));
+ Register shift = i.InputRegister(1);
+ // Take shift value modulo 32.
+ __ and_(shift, 31);
+ __ movd(tmp, shift);
__ pslld(i.OutputSimd128Register(), tmp);
break;
}
case kAVXI32x4Shl: {
CpuFeatureScope avx_scope(tasm(), AVX);
XMMRegister tmp = i.TempSimd128Register(0);
- __ movd(tmp, i.InputRegister(1));
+ Register shift = i.InputRegister(1);
+ // Take shift value modulo 32.
+ __ and_(shift, 31);
+ __ movd(tmp, shift);
__ vpslld(i.OutputSimd128Register(), i.InputSimd128Register(0), tmp);
break;
}
case kSSEI32x4ShrS: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
XMMRegister tmp = i.TempSimd128Register(0);
- __ movd(tmp, i.InputRegister(1));
+ Register shift = i.InputRegister(1);
+ // Take shift value modulo 32.
+ __ and_(shift, 31);
+ __ movd(tmp, shift);
__ psrad(i.OutputSimd128Register(), tmp);
break;
}
case kAVXI32x4ShrS: {
CpuFeatureScope avx_scope(tasm(), AVX);
XMMRegister tmp = i.TempSimd128Register(0);
- __ movd(tmp, i.InputRegister(1));
+ Register shift = i.InputRegister(1);
+ // Take shift value modulo 32.
+ __ and_(shift, 31);
+ __ movd(tmp, shift);
__ vpsrad(i.OutputSimd128Register(), i.InputSimd128Register(0), tmp);
break;
}
@@ -2430,14 +2620,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kSSEI32x4ShrU: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
XMMRegister tmp = i.TempSimd128Register(0);
- __ movd(tmp, i.InputRegister(1));
+ Register shift = i.InputRegister(1);
+ // Take shift value modulo 32.
+ __ and_(shift, 31);
+ __ movd(tmp, shift);
__ psrld(i.OutputSimd128Register(), tmp);
break;
}
case kAVXI32x4ShrU: {
CpuFeatureScope avx_scope(tasm(), AVX);
XMMRegister tmp = i.TempSimd128Register(0);
- __ movd(tmp, i.InputRegister(1));
+ Register shift = i.InputRegister(1);
+ // Take shift value modulo 32.
+ __ and_(shift, 31);
+ __ movd(tmp, shift);
__ vpsrld(i.OutputSimd128Register(), i.InputSimd128Register(0), tmp);
break;
}
@@ -2514,7 +2710,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kIA32I16x8ExtractLane: {
Register dst = i.OutputRegister();
__ Pextrw(dst, i.InputSimd128Register(0), i.InputInt8(1));
- __ movsx_w(dst, dst);
break;
}
case kSSEI16x8ReplaceLane: {
@@ -2553,28 +2748,40 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kSSEI16x8Shl: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
XMMRegister tmp = i.TempSimd128Register(0);
- __ movd(tmp, i.InputRegister(1));
+ Register shift = i.InputRegister(1);
+ // Take shift value modulo 16.
+ __ and_(shift, 15);
+ __ movd(tmp, shift);
__ psllw(i.OutputSimd128Register(), tmp);
break;
}
case kAVXI16x8Shl: {
CpuFeatureScope avx_scope(tasm(), AVX);
XMMRegister tmp = i.TempSimd128Register(0);
- __ movd(tmp, i.InputRegister(1));
+ Register shift = i.InputRegister(1);
+ // Take shift value modulo 16.
+ __ and_(shift, 15);
+ __ movd(tmp, shift);
__ vpsllw(i.OutputSimd128Register(), i.InputSimd128Register(0), tmp);
break;
}
case kSSEI16x8ShrS: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
XMMRegister tmp = i.TempSimd128Register(0);
- __ movd(tmp, i.InputRegister(1));
+ Register shift = i.InputRegister(1);
+ // Take shift value modulo 16.
+ __ and_(shift, 15);
+ __ movd(tmp, shift);
__ psraw(i.OutputSimd128Register(), tmp);
break;
}
case kAVXI16x8ShrS: {
CpuFeatureScope avx_scope(tasm(), AVX);
XMMRegister tmp = i.TempSimd128Register(0);
- __ movd(tmp, i.InputRegister(1));
+ Register shift = i.InputRegister(1);
+ // Take shift value modulo 16.
+ __ and_(shift, 15);
+ __ movd(tmp, shift);
__ vpsraw(i.OutputSimd128Register(), i.InputSimd128Register(0), tmp);
break;
}
@@ -2745,14 +2952,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kSSEI16x8ShrU: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
XMMRegister tmp = i.TempSimd128Register(0);
- __ movd(tmp, i.InputRegister(1));
+ Register shift = i.InputRegister(1);
+ // Take shift value modulo 16.
+ __ and_(shift, 15);
+ __ movd(tmp, shift);
__ psrlw(i.OutputSimd128Register(), tmp);
break;
}
case kAVXI16x8ShrU: {
CpuFeatureScope avx_scope(tasm(), AVX);
XMMRegister tmp = i.TempSimd128Register(0);
- __ movd(tmp, i.InputRegister(1));
+ Register shift = i.InputRegister(1);
+ // Take shift value modulo 16.
+ __ and_(shift, 15);
+ __ movd(tmp, shift);
__ vpsrlw(i.OutputSimd128Register(), i.InputSimd128Register(0), tmp);
break;
}
@@ -2875,7 +3088,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kIA32I8x16ExtractLane: {
Register dst = i.OutputRegister();
__ Pextrb(dst, i.InputSimd128Register(0), i.InputInt8(1));
- __ movsx_b(dst, dst);
break;
}
case kSSEI8x16ReplaceLane: {
@@ -2919,6 +3131,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register shift = i.InputRegister(1);
Register tmp = i.ToRegister(instr->TempAt(0));
XMMRegister tmp_simd = i.TempSimd128Register(1);
+ // Take shift value modulo 8.
+ __ and_(shift, 7);
// Mask off the unwanted bits before word-shifting.
__ pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
__ mov(tmp, shift);
@@ -2938,6 +3152,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register shift = i.InputRegister(1);
Register tmp = i.ToRegister(instr->TempAt(0));
XMMRegister tmp_simd = i.TempSimd128Register(1);
+ // Take shift value modulo 8.
+ __ and_(shift, 7);
// Mask off the unwanted bits before word-shifting.
__ vpcmpeqw(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
__ mov(tmp, shift);
@@ -2959,6 +3175,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ punpckhbw(kScratchDoubleReg, dst);
__ punpcklbw(dst, dst);
__ mov(tmp, i.InputRegister(1));
+ // Take shift value modulo 8.
+ __ and_(tmp, 7);
__ add(tmp, Immediate(8));
__ movd(tmp_simd, tmp);
__ psraw(kScratchDoubleReg, tmp_simd);
@@ -3223,6 +3441,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ punpckhbw(kScratchDoubleReg, dst);
__ punpcklbw(dst, dst);
__ mov(tmp, i.InputRegister(1));
+ // Take shift value modulo 8.
+ __ and_(tmp, 7);
__ add(tmp, Immediate(8));
__ movd(tmp_simd, tmp);
__ psrlw(kScratchDoubleReg, tmp_simd);
@@ -3365,6 +3585,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vxorps(dst, kScratchDoubleReg, i.InputSimd128Register(2));
break;
}
+ case kIA32S8x16Swizzle: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister mask = i.TempSimd128Register(0);
+
+ // Out-of-range indices should return 0, add 112 so that any value > 15
+ // saturates to 128 (top bit set), so pshufb will zero that lane.
+ __ Move(mask, (uint32_t)0x70707070);
+ __ Pshufd(mask, mask, 0x0);
+ __ Paddusb(mask, i.InputSimd128Register(1));
+ __ Pshufb(dst, mask);
+ break;
+ }
case kIA32S8x16Shuffle: {
XMMRegister dst = i.OutputSimd128Register();
Operand src0 = i.InputOperand(0);
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
index 7530c716b8..a77fb8cd37 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
+++ b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
@@ -116,6 +116,23 @@ namespace compiler {
V(IA32PushSimd128) \
V(IA32Poke) \
V(IA32Peek) \
+ V(SSEF64x2Splat) \
+ V(AVXF64x2Splat) \
+ V(SSEF64x2ExtractLane) \
+ V(AVXF64x2ExtractLane) \
+ V(SSEF64x2ReplaceLane) \
+ V(AVXF64x2ReplaceLane) \
+ V(IA32F64x2Sqrt) \
+ V(IA32F64x2Add) \
+ V(IA32F64x2Sub) \
+ V(IA32F64x2Mul) \
+ V(IA32F64x2Div) \
+ V(IA32F64x2Min) \
+ V(IA32F64x2Max) \
+ V(IA32F64x2Eq) \
+ V(IA32F64x2Ne) \
+ V(IA32F64x2Lt) \
+ V(IA32F64x2Le) \
V(SSEF32x4Splat) \
V(AVXF32x4Splat) \
V(SSEF32x4ExtractLane) \
@@ -129,6 +146,8 @@ namespace compiler {
V(AVXF32x4Abs) \
V(SSEF32x4Neg) \
V(AVXF32x4Neg) \
+ V(SSEF32x4Sqrt) \
+ V(AVXF32x4Sqrt) \
V(IA32F32x4RecipApprox) \
V(IA32F32x4RecipSqrtApprox) \
V(SSEF32x4Add) \
@@ -313,6 +332,7 @@ namespace compiler {
V(AVXS128Xor) \
V(SSES128Select) \
V(AVXS128Select) \
+ V(IA32S8x16Swizzle) \
V(IA32S8x16Shuffle) \
V(IA32S32x4Swizzle) \
V(IA32S32x4Shuffle) \
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
index c2097a6691..287eb49a48 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
@@ -97,6 +97,23 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXFloat32Neg:
case kIA32BitcastFI:
case kIA32BitcastIF:
+ case kSSEF64x2Splat:
+ case kAVXF64x2Splat:
+ case kSSEF64x2ExtractLane:
+ case kAVXF64x2ExtractLane:
+ case kSSEF64x2ReplaceLane:
+ case kAVXF64x2ReplaceLane:
+ case kIA32F64x2Sqrt:
+ case kIA32F64x2Add:
+ case kIA32F64x2Sub:
+ case kIA32F64x2Mul:
+ case kIA32F64x2Div:
+ case kIA32F64x2Min:
+ case kIA32F64x2Max:
+ case kIA32F64x2Eq:
+ case kIA32F64x2Ne:
+ case kIA32F64x2Lt:
+ case kIA32F64x2Le:
case kSSEF32x4Splat:
case kAVXF32x4Splat:
case kSSEF32x4ExtractLane:
@@ -110,6 +127,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXF32x4Abs:
case kSSEF32x4Neg:
case kAVXF32x4Neg:
+ case kSSEF32x4Sqrt:
+ case kAVXF32x4Sqrt:
case kIA32F32x4RecipApprox:
case kIA32F32x4RecipSqrtApprox:
case kSSEF32x4Add:
@@ -294,6 +313,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXS128Xor:
case kSSES128Select:
case kAVXS128Select:
+ case kIA32S8x16Swizzle:
case kIA32S8x16Shuffle:
case kIA32S32x4Swizzle:
case kIA32S32x4Shuffle:
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
index ebef39a93a..a24727aba2 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/base/adapters.h"
+#include "src/base/iterator.h"
#include "src/compiler/backend/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
@@ -200,12 +200,27 @@ namespace {
void VisitRO(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
IA32OperandGenerator g(selector);
- InstructionOperand temps[] = {g.TempRegister()};
Node* input = node->InputAt(0);
// We have to use a byte register as input to movsxb.
InstructionOperand input_op =
opcode == kIA32Movsxbl ? g.UseFixed(input, eax) : g.Use(input);
- selector->Emit(opcode, g.DefineAsRegister(node), input_op, arraysize(temps),
+ selector->Emit(opcode, g.DefineAsRegister(node), input_op);
+}
+
+void VisitROWithTemp(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ IA32OperandGenerator g(selector);
+ InstructionOperand temps[] = {g.TempRegister()};
+ selector->Emit(opcode, g.DefineAsRegister(node), g.Use(node->InputAt(0)),
+ arraysize(temps), temps);
+}
+
+void VisitROWithTempSimd(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ IA32OperandGenerator g(selector);
+ InstructionOperand temps[] = {g.TempSimd128Register()};
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)), arraysize(temps),
temps);
}
@@ -231,10 +246,13 @@ void VisitRROFloat(InstructionSelector* selector, Node* node,
void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
IA32OperandGenerator g(selector);
+ InstructionOperand temps[] = {g.TempSimd128Register()};
if (selector->IsSupported(AVX)) {
- selector->Emit(avx_opcode, g.DefineAsRegister(node), g.Use(input));
+ selector->Emit(avx_opcode, g.DefineAsRegister(node), g.UseUnique(input),
+ arraysize(temps), temps);
} else {
- selector->Emit(sse_opcode, g.DefineSameAsFirst(node), g.UseRegister(input));
+ selector->Emit(sse_opcode, g.DefineSameAsFirst(node),
+ g.UseUniqueRegister(input), arraysize(temps), temps);
}
}
@@ -804,12 +822,8 @@ void InstructionSelector::VisitWord32Ror(Node* node) {
V(ChangeFloat32ToFloat64, kSSEFloat32ToFloat64) \
V(RoundInt32ToFloat32, kSSEInt32ToFloat32) \
V(ChangeInt32ToFloat64, kSSEInt32ToFloat64) \
- V(ChangeUint32ToFloat64, kSSEUint32ToFloat64) \
V(TruncateFloat32ToInt32, kSSEFloat32ToInt32) \
- V(TruncateFloat32ToUint32, kSSEFloat32ToUint32) \
V(ChangeFloat64ToInt32, kSSEFloat64ToInt32) \
- V(ChangeFloat64ToUint32, kSSEFloat64ToUint32) \
- V(TruncateFloat64ToUint32, kSSEFloat64ToUint32) \
V(TruncateFloat64ToFloat32, kSSEFloat64ToFloat32) \
V(RoundFloat64ToInt32, kSSEFloat64ToInt32) \
V(BitcastFloat32ToInt32, kIA32BitcastFI) \
@@ -819,7 +833,15 @@ void InstructionSelector::VisitWord32Ror(Node* node) {
V(Float64ExtractLowWord32, kSSEFloat64ExtractLowWord32) \
V(Float64ExtractHighWord32, kSSEFloat64ExtractHighWord32) \
V(SignExtendWord8ToInt32, kIA32Movsxbl) \
- V(SignExtendWord16ToInt32, kIA32Movsxwl)
+ V(SignExtendWord16ToInt32, kIA32Movsxwl) \
+ V(F64x2Sqrt, kIA32F64x2Sqrt)
+
+#define RO_WITH_TEMP_OP_LIST(V) V(ChangeUint32ToFloat64, kSSEUint32ToFloat64)
+
+#define RO_WITH_TEMP_SIMD_OP_LIST(V) \
+ V(TruncateFloat32ToUint32, kSSEFloat32ToUint32) \
+ V(ChangeFloat64ToUint32, kSSEFloat64ToUint32) \
+ V(TruncateFloat64ToUint32, kSSEFloat64ToUint32)
#define RR_OP_LIST(V) \
V(TruncateFloat64ToWord32, kArchTruncateDoubleToI) \
@@ -841,13 +863,23 @@ void InstructionSelector::VisitWord32Ror(Node* node) {
V(Float32Mul, kAVXFloat32Mul, kSSEFloat32Mul) \
V(Float64Mul, kAVXFloat64Mul, kSSEFloat64Mul) \
V(Float32Div, kAVXFloat32Div, kSSEFloat32Div) \
- V(Float64Div, kAVXFloat64Div, kSSEFloat64Div)
+ V(Float64Div, kAVXFloat64Div, kSSEFloat64Div) \
+ V(F64x2Add, kIA32F64x2Add, kIA32F64x2Add) \
+ V(F64x2Sub, kIA32F64x2Sub, kIA32F64x2Sub) \
+ V(F64x2Mul, kIA32F64x2Mul, kIA32F64x2Mul) \
+ V(F64x2Div, kIA32F64x2Div, kIA32F64x2Div) \
+ V(F64x2Eq, kIA32F64x2Eq, kIA32F64x2Eq) \
+ V(F64x2Ne, kIA32F64x2Ne, kIA32F64x2Ne) \
+ V(F64x2Lt, kIA32F64x2Lt, kIA32F64x2Lt) \
+ V(F64x2Le, kIA32F64x2Le, kIA32F64x2Le)
#define FLOAT_UNOP_LIST(V) \
V(Float32Abs, kAVXFloat32Abs, kSSEFloat32Abs) \
V(Float64Abs, kAVXFloat64Abs, kSSEFloat64Abs) \
V(Float32Neg, kAVXFloat32Neg, kSSEFloat32Neg) \
- V(Float64Neg, kAVXFloat64Neg, kSSEFloat64Neg)
+ V(Float64Neg, kAVXFloat64Neg, kSSEFloat64Neg) \
+ V(F64x2Abs, kAVXFloat64Abs, kSSEFloat64Abs) \
+ V(F64x2Neg, kAVXFloat64Neg, kSSEFloat64Neg)
#define RO_VISITOR(Name, opcode) \
void InstructionSelector::Visit##Name(Node* node) { \
@@ -857,6 +889,22 @@ RO_OP_LIST(RO_VISITOR)
#undef RO_VISITOR
#undef RO_OP_LIST
+#define RO_WITH_TEMP_VISITOR(Name, opcode) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitROWithTemp(this, node, opcode); \
+ }
+RO_WITH_TEMP_OP_LIST(RO_WITH_TEMP_VISITOR)
+#undef RO_WITH_TEMP_VISITOR
+#undef RO_WITH_TEMP_OP_LIST
+
+#define RO_WITH_TEMP_SIMD_VISITOR(Name, opcode) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitROWithTempSimd(this, node, opcode); \
+ }
+RO_WITH_TEMP_SIMD_OP_LIST(RO_WITH_TEMP_SIMD_VISITOR)
+#undef RO_WITH_TEMP_SIMD_VISITOR
+#undef RO_WITH_TEMP_SIMD_OP_LIST
+
#define RR_VISITOR(Name, opcode) \
void InstructionSelector::Visit##Name(Node* node) { \
VisitRR(this, node, opcode); \
@@ -890,6 +938,10 @@ void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
Emit(kIA32Bswap, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)));
}
+void InstructionSelector::VisitSimd128ReverseBytes(Node* node) {
+ UNREACHABLE();
+}
+
void InstructionSelector::VisitInt32Add(Node* node) {
IA32OperandGenerator g(this);
@@ -1971,6 +2023,7 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
#define SIMD_UNOP_PREFIX_LIST(V) \
V(F32x4Abs) \
V(F32x4Neg) \
+ V(F32x4Sqrt) \
V(S128Not)
#define SIMD_ANYTRUE_LIST(V) \
@@ -1995,6 +2048,43 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(I8x16ShrS) \
V(I8x16ShrU)
+void InstructionSelector::VisitF64x2Min(Node* node) {
+ IA32OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempSimd128Register()};
+ InstructionOperand operand0 = g.UseUniqueRegister(node->InputAt(0));
+ InstructionOperand operand1 = g.UseUnique(node->InputAt(1));
+
+ if (IsSupported(AVX)) {
+ Emit(kIA32F64x2Min, g.DefineAsRegister(node), operand0, operand1,
+ arraysize(temps), temps);
+ } else {
+ Emit(kIA32F64x2Min, g.DefineSameAsFirst(node), operand0, operand1,
+ arraysize(temps), temps);
+ }
+}
+
+void InstructionSelector::VisitF64x2Max(Node* node) {
+ IA32OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempSimd128Register()};
+ InstructionOperand operand0 = g.UseUniqueRegister(node->InputAt(0));
+ InstructionOperand operand1 = g.UseUnique(node->InputAt(1));
+ if (IsSupported(AVX)) {
+ Emit(kIA32F64x2Max, g.DefineAsRegister(node), operand0, operand1,
+ arraysize(temps), temps);
+ } else {
+ Emit(kIA32F64x2Max, g.DefineSameAsFirst(node), operand0, operand1,
+ arraysize(temps), temps);
+ }
+}
+
+void InstructionSelector::VisitF64x2Splat(Node* node) {
+ VisitRRSimd(this, node, kAVXF64x2Splat, kSSEF64x2Splat);
+}
+
+void InstructionSelector::VisitF64x2ExtractLane(Node* node) {
+ VisitRRISimd(this, node, kAVXF64x2ExtractLane, kSSEF64x2ExtractLane);
+}
+
void InstructionSelector::VisitF32x4Splat(Node* node) {
VisitRRSimd(this, node, kAVXF32x4Splat, kSSEF32x4Splat);
}
@@ -2086,6 +2176,28 @@ VISIT_SIMD_REPLACE_LANE(F32x4)
#undef VISIT_SIMD_REPLACE_LANE
#undef SIMD_INT_TYPES
+// The difference between this and VISIT_SIMD_REPLACE_LANE is that this forces
+// operand2 to be UseRegister, because the codegen relies on insertps using
+// registers.
+// TODO(v8:9764) Remove this UseRegister requirement
+#define VISIT_SIMD_REPLACE_LANE_USE_REG(Type) \
+ void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
+ IA32OperandGenerator g(this); \
+ InstructionOperand operand0 = g.UseRegister(node->InputAt(0)); \
+ InstructionOperand operand1 = \
+ g.UseImmediate(OpParameter<int32_t>(node->op())); \
+ InstructionOperand operand2 = g.UseRegister(node->InputAt(1)); \
+ if (IsSupported(AVX)) { \
+ Emit(kAVX##Type##ReplaceLane, g.DefineAsRegister(node), operand0, \
+ operand1, operand2); \
+ } else { \
+ Emit(kSSE##Type##ReplaceLane, g.DefineSameAsFirst(node), operand0, \
+ operand1, operand2); \
+ } \
+ }
+VISIT_SIMD_REPLACE_LANE_USE_REG(F64x2)
+#undef VISIT_SIMD_REPLACE_LANE_USE_REG
+
#define VISIT_SIMD_SHIFT(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
VisitRROSimdShift(this, node, kAVX##Opcode, kSSE##Opcode); \
@@ -2132,12 +2244,12 @@ SIMD_ANYTRUE_LIST(VISIT_SIMD_ANYTRUE)
#undef VISIT_SIMD_ANYTRUE
#undef SIMD_ANYTRUE_LIST
-#define VISIT_SIMD_ALLTRUE(Opcode) \
- void InstructionSelector::Visit##Opcode(Node* node) { \
- IA32OperandGenerator g(this); \
- InstructionOperand temps[] = {g.TempRegister()}; \
- Emit(kIA32##Opcode, g.DefineAsRegister(node), g.Use(node->InputAt(0)), \
- arraysize(temps), temps); \
+#define VISIT_SIMD_ALLTRUE(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ IA32OperandGenerator g(this); \
+ InstructionOperand temps[] = {g.TempRegister(), g.TempSimd128Register()}; \
+ Emit(kIA32##Opcode, g.DefineAsRegister(node), \
+ g.UseUnique(node->InputAt(0)), arraysize(temps), temps); \
}
SIMD_ALLTRUE_LIST(VISIT_SIMD_ALLTRUE)
#undef VISIT_SIMD_ALLTRUE
@@ -2489,6 +2601,14 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) {
Emit(opcode, 1, &dst, input_count, inputs, temp_count, temps);
}
+void InstructionSelector::VisitS8x16Swizzle(Node* node) {
+ IA32OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempSimd128Register()};
+ Emit(kIA32S8x16Swizzle, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
+ arraysize(temps), temps);
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/deps/v8/src/compiler/backend/instruction-scheduler.cc b/deps/v8/src/compiler/backend/instruction-scheduler.cc
index dc66813740..d4920cd575 100644
--- a/deps/v8/src/compiler/backend/instruction-scheduler.cc
+++ b/deps/v8/src/compiler/backend/instruction-scheduler.cc
@@ -4,7 +4,7 @@
#include "src/compiler/backend/instruction-scheduler.h"
-#include "src/base/adapters.h"
+#include "src/base/iterator.h"
#include "src/base/utils/random-number-generator.h"
#include "src/execution/isolate.h"
diff --git a/deps/v8/src/compiler/backend/instruction-selector-impl.h b/deps/v8/src/compiler/backend/instruction-selector-impl.h
index a3f62e7ba4..13ea049eba 100644
--- a/deps/v8/src/compiler/backend/instruction-selector-impl.h
+++ b/deps/v8/src/compiler/backend/instruction-selector-impl.h
@@ -29,8 +29,8 @@ inline bool operator<(const CaseInfo& l, const CaseInfo& r) {
// Helper struct containing data about a table or lookup switch.
class SwitchInfo {
public:
- SwitchInfo(ZoneVector<CaseInfo>& cases, // NOLINT(runtime/references)
- int32_t min_value, int32_t max_value, BasicBlock* default_branch)
+ SwitchInfo(ZoneVector<CaseInfo> const& cases, int32_t min_value,
+ int32_t max_value, BasicBlock* default_branch)
: cases_(cases),
min_value_(min_value),
max_value_(max_value),
@@ -193,17 +193,6 @@ class OperandGenerator {
reg.code(), GetVReg(node)));
}
- InstructionOperand UseExplicit(LinkageLocation location) {
- MachineRepresentation rep = InstructionSequence::DefaultRepresentation();
- if (location.IsRegister()) {
- return ExplicitOperand(LocationOperand::REGISTER, rep,
- location.AsRegister());
- } else {
- return ExplicitOperand(LocationOperand::STACK_SLOT, rep,
- location.GetLocation());
- }
- }
-
InstructionOperand UseImmediate(int immediate) {
return sequence()->AddImmediate(Constant(immediate));
}
@@ -275,6 +264,16 @@ class OperandGenerator {
InstructionOperand::kInvalidVirtualRegister);
}
+ template <typename FPRegType>
+ InstructionOperand TempFpRegister(FPRegType reg) {
+ UnallocatedOperand op =
+ UnallocatedOperand(UnallocatedOperand::FIXED_FP_REGISTER, reg.code(),
+ sequence()->NextVirtualRegister());
+ sequence()->MarkAsRepresentation(MachineRepresentation::kSimd128,
+ op.virtual_register());
+ return op;
+ }
+
InstructionOperand TempImmediate(int32_t imm) {
return sequence()->AddImmediate(Constant(imm));
}
diff --git a/deps/v8/src/compiler/backend/instruction-selector.cc b/deps/v8/src/compiler/backend/instruction-selector.cc
index 43193ec2b1..22d81c0c55 100644
--- a/deps/v8/src/compiler/backend/instruction-selector.cc
+++ b/deps/v8/src/compiler/backend/instruction-selector.cc
@@ -6,7 +6,7 @@
#include <limits>
-#include "src/base/adapters.h"
+#include "src/base/iterator.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/tick-counter.h"
#include "src/compiler/backend/instruction-selector-impl.h"
@@ -1439,6 +1439,8 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsWord64(node), VisitWord64ReverseBits(node);
case IrOpcode::kWord64ReverseBytes:
return MarkAsWord64(node), VisitWord64ReverseBytes(node);
+ case IrOpcode::kSimd128ReverseBytes:
+ return MarkAsSimd128(node), VisitSimd128ReverseBytes(node);
case IrOpcode::kInt64AbsWithOverflow:
return MarkAsWord64(node), VisitInt64AbsWithOverflow(node);
case IrOpcode::kWord64Equal:
@@ -1502,7 +1504,7 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kUint64Mod:
return MarkAsWord64(node), VisitUint64Mod(node);
case IrOpcode::kBitcastTaggedToWord:
- case IrOpcode::kBitcastTaggedSignedToWord:
+ case IrOpcode::kBitcastTaggedToWordForTagAndSmiBits:
return MarkAsRepresentation(MachineType::PointerRepresentation(), node),
VisitBitcastTaggedToWord(node);
case IrOpcode::kBitcastWordToTagged:
@@ -1857,6 +1859,8 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitF64x2Abs(node);
case IrOpcode::kF64x2Neg:
return MarkAsSimd128(node), VisitF64x2Neg(node);
+ case IrOpcode::kF64x2Sqrt:
+ return MarkAsSimd128(node), VisitF64x2Sqrt(node);
case IrOpcode::kF64x2Add:
return MarkAsSimd128(node), VisitF64x2Add(node);
case IrOpcode::kF64x2Sub:
@@ -1877,6 +1881,10 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitF64x2Lt(node);
case IrOpcode::kF64x2Le:
return MarkAsSimd128(node), VisitF64x2Le(node);
+ case IrOpcode::kF64x2Qfma:
+ return MarkAsSimd128(node), VisitF64x2Qfma(node);
+ case IrOpcode::kF64x2Qfms:
+ return MarkAsSimd128(node), VisitF64x2Qfms(node);
case IrOpcode::kF32x4Splat:
return MarkAsSimd128(node), VisitF32x4Splat(node);
case IrOpcode::kF32x4ExtractLane:
@@ -1891,6 +1899,8 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitF32x4Abs(node);
case IrOpcode::kF32x4Neg:
return MarkAsSimd128(node), VisitF32x4Neg(node);
+ case IrOpcode::kF32x4Sqrt:
+ return MarkAsSimd128(node), VisitF32x4Sqrt(node);
case IrOpcode::kF32x4RecipApprox:
return MarkAsSimd128(node), VisitF32x4RecipApprox(node);
case IrOpcode::kF32x4RecipSqrtApprox:
@@ -1917,6 +1927,10 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitF32x4Lt(node);
case IrOpcode::kF32x4Le:
return MarkAsSimd128(node), VisitF32x4Le(node);
+ case IrOpcode::kF32x4Qfma:
+ return MarkAsSimd128(node), VisitF32x4Qfma(node);
+ case IrOpcode::kF32x4Qfms:
+ return MarkAsSimd128(node), VisitF32x4Qfms(node);
case IrOpcode::kI64x2Splat:
return MarkAsSimd128(node), VisitI64x2Splat(node);
case IrOpcode::kI64x2ExtractLane:
@@ -2137,6 +2151,8 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitS128Not(node);
case IrOpcode::kS128Select:
return MarkAsSimd128(node), VisitS128Select(node);
+ case IrOpcode::kS8x16Swizzle:
+ return MarkAsSimd128(node), VisitS8x16Swizzle(node);
case IrOpcode::kS8x16Shuffle:
return MarkAsSimd128(node), VisitS8x16Shuffle(node);
case IrOpcode::kS1x2AnyTrue:
@@ -2286,8 +2302,8 @@ void InstructionSelector::VisitFloat64Tanh(Node* node) {
VisitFloat64Ieee754Unop(node, kIeee754Float64Tanh);
}
-void InstructionSelector::EmitTableSwitch(const SwitchInfo& sw,
- InstructionOperand& index_operand) {
+void InstructionSelector::EmitTableSwitch(
+ const SwitchInfo& sw, InstructionOperand const& index_operand) {
OperandGenerator g(this);
size_t input_count = 2 + sw.value_range();
DCHECK_LE(sw.value_range(), std::numeric_limits<size_t>::max() - 2);
@@ -2304,8 +2320,8 @@ void InstructionSelector::EmitTableSwitch(const SwitchInfo& sw,
Emit(kArchTableSwitch, 0, nullptr, input_count, inputs, 0, nullptr);
}
-void InstructionSelector::EmitLookupSwitch(const SwitchInfo& sw,
- InstructionOperand& value_operand) {
+void InstructionSelector::EmitLookupSwitch(
+ const SwitchInfo& sw, InstructionOperand const& value_operand) {
OperandGenerator g(this);
std::vector<CaseInfo> cases = sw.CasesSortedByOriginalOrder();
size_t input_count = 2 + sw.case_count() * 2;
@@ -2322,7 +2338,7 @@ void InstructionSelector::EmitLookupSwitch(const SwitchInfo& sw,
}
void InstructionSelector::EmitBinarySearchSwitch(
- const SwitchInfo& sw, InstructionOperand& value_operand) {
+ const SwitchInfo& sw, InstructionOperand const& value_operand) {
OperandGenerator g(this);
size_t input_count = 2 + sw.case_count() * 2;
DCHECK_LE(sw.case_count(), (std::numeric_limits<size_t>::max() - 2) / 2);
@@ -2607,21 +2623,25 @@ void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
#if !V8_TARGET_ARCH_X64
#if !V8_TARGET_ARCH_ARM64
+#if !V8_TARGET_ARCH_IA32
void InstructionSelector::VisitF64x2Splat(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2ExtractLane(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Abs(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Neg(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2Sqrt(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitS8x16Swizzle(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Add(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Sub(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Mul(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Div(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitF64x2Min(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitF64x2Max(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Eq(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Ne(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Lt(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Le(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2Min(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2Max(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_IA32
void InstructionSelector::VisitI64x2Splat(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2ExtractLane(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); }
@@ -2630,6 +2650,7 @@ void InstructionSelector::VisitI64x2Shl(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2ShrS(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2Add(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2Sub(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2Mul(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2Eq(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2Ne(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2ShrU(Node* node) { UNIMPLEMENTED(); }
@@ -2639,8 +2660,11 @@ void InstructionSelector::VisitI64x2GtU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2GeU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitS1x2AnyTrue(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitS1x2AllTrue(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2Qfma(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2Qfms(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF32x4Qfma(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF32x4Qfms(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_ARM64
-void InstructionSelector::VisitI64x2Mul(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2MinS(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2MaxS(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2MinU(Node* node) { UNIMPLEMENTED(); }
diff --git a/deps/v8/src/compiler/backend/instruction-selector.h b/deps/v8/src/compiler/backend/instruction-selector.h
index eb3e098427..e951c90f95 100644
--- a/deps/v8/src/compiler/backend/instruction-selector.h
+++ b/deps/v8/src/compiler/backend/instruction-selector.h
@@ -502,15 +502,12 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
FeedbackSource const& feedback,
Node* frame_state);
- void EmitTableSwitch(
- const SwitchInfo& sw,
- InstructionOperand& index_operand); // NOLINT(runtime/references)
- void EmitLookupSwitch(
- const SwitchInfo& sw,
- InstructionOperand& value_operand); // NOLINT(runtime/references)
- void EmitBinarySearchSwitch(
- const SwitchInfo& sw,
- InstructionOperand& value_operand); // NOLINT(runtime/references)
+ void EmitTableSwitch(const SwitchInfo& sw,
+ InstructionOperand const& index_operand);
+ void EmitLookupSwitch(const SwitchInfo& sw,
+ InstructionOperand const& value_operand);
+ void EmitBinarySearchSwitch(const SwitchInfo& sw,
+ InstructionOperand const& value_operand);
void TryRename(InstructionOperand* op);
int GetRename(int virtual_register);
diff --git a/deps/v8/src/compiler/backend/instruction.cc b/deps/v8/src/compiler/backend/instruction.cc
index 06158b0c72..076f1b596e 100644
--- a/deps/v8/src/compiler/backend/instruction.cc
+++ b/deps/v8/src/compiler/backend/instruction.cc
@@ -168,7 +168,6 @@ std::ostream& operator<<(std::ostream& os, const InstructionOperand& op) {
return os << "[immediate:" << imm.indexed_value() << "]";
}
}
- case InstructionOperand::EXPLICIT:
case InstructionOperand::ALLOCATED: {
LocationOperand allocated = LocationOperand::cast(op);
if (op.IsStackSlot()) {
@@ -192,9 +191,6 @@ std::ostream& operator<<(std::ostream& os, const InstructionOperand& op) {
os << "[" << Simd128Register::from_code(allocated.register_code())
<< "|R";
}
- if (allocated.IsExplicit()) {
- os << "|E";
- }
switch (allocated.representation()) {
case MachineRepresentation::kNone:
os << "|-";
@@ -294,17 +290,6 @@ void ParallelMove::PrepareInsertAfter(
if (replacement != nullptr) move->set_source(replacement->source());
}
-ExplicitOperand::ExplicitOperand(LocationKind kind, MachineRepresentation rep,
- int index)
- : LocationOperand(EXPLICIT, kind, rep, index) {
- DCHECK_IMPLIES(kind == REGISTER && !IsFloatingPoint(rep),
- GetRegConfig()->IsAllocatableGeneralCode(index));
- DCHECK_IMPLIES(kind == REGISTER && rep == MachineRepresentation::kFloat32,
- GetRegConfig()->IsAllocatableFloatCode(index));
- DCHECK_IMPLIES(kind == REGISTER && (rep == MachineRepresentation::kFloat64),
- GetRegConfig()->IsAllocatableDoubleCode(index));
-}
-
Instruction::Instruction(InstructionCode opcode)
: opcode_(opcode),
bit_field_(OutputCountField::encode(0) | InputCountField::encode(0) |
diff --git a/deps/v8/src/compiler/backend/instruction.h b/deps/v8/src/compiler/backend/instruction.h
index f5f7f64c51..321f069531 100644
--- a/deps/v8/src/compiler/backend/instruction.h
+++ b/deps/v8/src/compiler/backend/instruction.h
@@ -43,9 +43,8 @@ class V8_EXPORT_PRIVATE InstructionOperand {
CONSTANT,
IMMEDIATE,
// Location operand kinds.
- EXPLICIT,
ALLOCATED,
- FIRST_LOCATION_OPERAND_KIND = EXPLICIT
+ FIRST_LOCATION_OPERAND_KIND = ALLOCATED
// Location operand kinds must be last.
};
@@ -68,11 +67,6 @@ class V8_EXPORT_PRIVATE InstructionOperand {
// embedded directly in instructions, e.g. small integers and on some
// platforms Objects.
INSTRUCTION_OPERAND_PREDICATE(Immediate, IMMEDIATE)
- // ExplicitOperands do not participate in register allocation. They are
- // created by the instruction selector for direct access to registers and
- // stack slots, completely bypassing the register allocator. They are never
- // associated with a virtual register
- INSTRUCTION_OPERAND_PREDICATE(Explicit, EXPLICIT)
// AllocatedOperands are registers or stack slots that are assigned by the
// register allocator and are always associated with a virtual register.
INSTRUCTION_OPERAND_PREDICATE(Allocated, ALLOCATED)
@@ -515,19 +509,6 @@ class LocationOperand : public InstructionOperand {
using IndexField = BitField64<int32_t, 35, 29>;
};
-class V8_EXPORT_PRIVATE ExplicitOperand
- : public NON_EXPORTED_BASE(LocationOperand) {
- public:
- ExplicitOperand(LocationKind kind, MachineRepresentation rep, int index);
-
- static ExplicitOperand* New(Zone* zone, LocationKind kind,
- MachineRepresentation rep, int index) {
- return InstructionOperand::New(zone, ExplicitOperand(kind, rep, index));
- }
-
- INSTRUCTION_OPERAND_CASTS(ExplicitOperand, EXPLICIT)
-};
-
class AllocatedOperand : public LocationOperand {
public:
AllocatedOperand(LocationKind kind, MachineRepresentation rep, int index)
@@ -643,7 +624,7 @@ uint64_t InstructionOperand::GetCanonicalizedValue() const {
}
return InstructionOperand::KindField::update(
LocationOperand::RepresentationField::update(this->value_, canonical),
- LocationOperand::EXPLICIT);
+ LocationOperand::ALLOCATED);
}
return this->value_;
}
@@ -776,11 +757,11 @@ class V8_EXPORT_PRIVATE Instruction final {
public:
size_t OutputCount() const { return OutputCountField::decode(bit_field_); }
const InstructionOperand* OutputAt(size_t i) const {
- DCHECK(i < OutputCount());
+ DCHECK_LT(i, OutputCount());
return &operands_[i];
}
InstructionOperand* OutputAt(size_t i) {
- DCHECK(i < OutputCount());
+ DCHECK_LT(i, OutputCount());
return &operands_[i];
}
@@ -790,21 +771,21 @@ class V8_EXPORT_PRIVATE Instruction final {
size_t InputCount() const { return InputCountField::decode(bit_field_); }
const InstructionOperand* InputAt(size_t i) const {
- DCHECK(i < InputCount());
+ DCHECK_LT(i, InputCount());
return &operands_[OutputCount() + i];
}
InstructionOperand* InputAt(size_t i) {
- DCHECK(i < InputCount());
+ DCHECK_LT(i, InputCount());
return &operands_[OutputCount() + i];
}
size_t TempCount() const { return TempCountField::decode(bit_field_); }
const InstructionOperand* TempAt(size_t i) const {
- DCHECK(i < TempCount());
+ DCHECK_LT(i, TempCount());
return &operands_[OutputCount() + InputCount() + i];
}
InstructionOperand* TempAt(size_t i) {
- DCHECK(i < TempCount());
+ DCHECK_LT(i, TempCount());
return &operands_[OutputCount() + InputCount() + i];
}
diff --git a/deps/v8/src/compiler/backend/jump-threading.cc b/deps/v8/src/compiler/backend/jump-threading.cc
index dfb917a58c..ee195bf51e 100644
--- a/deps/v8/src/compiler/backend/jump-threading.cc
+++ b/deps/v8/src/compiler/backend/jump-threading.cc
@@ -69,11 +69,11 @@ bool IsBlockWithBranchPoisoning(InstructionSequence* code,
} // namespace
bool JumpThreading::ComputeForwarding(Zone* local_zone,
- ZoneVector<RpoNumber>& result,
+ ZoneVector<RpoNumber>* result,
InstructionSequence* code,
bool frame_at_start) {
ZoneStack<RpoNumber> stack(local_zone);
- JumpThreadingState state = {false, result, stack};
+ JumpThreadingState state = {false, *result, stack};
state.Clear(code->InstructionBlockCount());
// Iterate over the blocks forward, pushing the blocks onto the stack.
@@ -135,15 +135,15 @@ bool JumpThreading::ComputeForwarding(Zone* local_zone,
}
#ifdef DEBUG
- for (RpoNumber num : result) {
+ for (RpoNumber num : *result) {
DCHECK(num.IsValid());
}
#endif
if (FLAG_trace_turbo_jt) {
- for (int i = 0; i < static_cast<int>(result.size()); i++) {
+ for (int i = 0; i < static_cast<int>(result->size()); i++) {
TRACE("B%d ", i);
- int to = result[i].ToInt();
+ int to = (*result)[i].ToInt();
if (i != to) {
TRACE("-> B%d\n", to);
} else {
@@ -156,7 +156,7 @@ bool JumpThreading::ComputeForwarding(Zone* local_zone,
}
void JumpThreading::ApplyForwarding(Zone* local_zone,
- ZoneVector<RpoNumber>& result,
+ ZoneVector<RpoNumber> const& result,
InstructionSequence* code) {
if (!FLAG_turbo_jt) return;
diff --git a/deps/v8/src/compiler/backend/jump-threading.h b/deps/v8/src/compiler/backend/jump-threading.h
index ce60ebcb2e..ce9e394924 100644
--- a/deps/v8/src/compiler/backend/jump-threading.h
+++ b/deps/v8/src/compiler/backend/jump-threading.h
@@ -17,17 +17,14 @@ class V8_EXPORT_PRIVATE JumpThreading {
public:
// Compute the forwarding map of basic blocks to their ultimate destination.
// Returns {true} if there is at least one block that is forwarded.
- static bool ComputeForwarding(
- Zone* local_zone,
- ZoneVector<RpoNumber>& result, // NOLINT(runtime/references)
- InstructionSequence* code, bool frame_at_start);
+ static bool ComputeForwarding(Zone* local_zone, ZoneVector<RpoNumber>* result,
+ InstructionSequence* code, bool frame_at_start);
// Rewrite the instructions to forward jumps and branches.
// May also negate some branches.
- static void ApplyForwarding(
- Zone* local_zone,
- ZoneVector<RpoNumber>& forwarding, // NOLINT(runtime/references)
- InstructionSequence* code);
+ static void ApplyForwarding(Zone* local_zone,
+ ZoneVector<RpoNumber> const& forwarding,
+ InstructionSequence* code);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
index 239075392a..ee23402e69 100644
--- a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
@@ -265,34 +265,33 @@ Condition FlagsConditionToConditionTst(FlagsCondition condition) {
UNREACHABLE();
}
-FPUCondition FlagsConditionToConditionCmpFPU(
- bool& predicate, // NOLINT(runtime/references)
- FlagsCondition condition) {
+FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
+ FlagsCondition condition) {
switch (condition) {
case kEqual:
- predicate = true;
+ *predicate = true;
return EQ;
case kNotEqual:
- predicate = false;
+ *predicate = false;
return EQ;
case kUnsignedLessThan:
- predicate = true;
+ *predicate = true;
return OLT;
case kUnsignedGreaterThanOrEqual:
- predicate = false;
+ *predicate = false;
return OLT;
case kUnsignedLessThanOrEqual:
- predicate = true;
+ *predicate = true;
return OLE;
case kUnsignedGreaterThan:
- predicate = false;
+ *predicate = false;
return OLE;
case kUnorderedEqual:
case kUnorderedNotEqual:
- predicate = true;
+ *predicate = true;
break;
default:
- predicate = true;
+ *predicate = true;
break;
}
UNREACHABLE();
@@ -303,9 +302,9 @@ FPUCondition FlagsConditionToConditionCmpFPU(
<< "\""; \
UNIMPLEMENTED();
-void EmitWordLoadPoisoningIfNeeded(
- CodeGenerator* codegen, InstructionCode opcode, Instruction* instr,
- MipsOperandConverter& i) { // NOLINT(runtime/references)
+void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
+ InstructionCode opcode, Instruction* instr,
+ MipsOperandConverter const& i) {
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
if (access_mode == kMemoryAccessPoisoned) {
@@ -780,12 +779,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
- Label return_location;
- if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
+ Label start_call;
+ bool isWasmCapiFunction =
+ linkage()->GetIncomingDescriptor()->IsWasmCapiFunction();
+ // from start_call to return address.
+ int offset = 40;
+#if V8_HOST_ARCH_MIPS
+ if (__ emit_debug_code()) {
+ offset += 16;
+ }
+#endif
+ if (isWasmCapiFunction) {
// Put the return address in a stack slot.
- __ LoadAddress(kScratchReg, &return_location);
- __ sw(kScratchReg,
- MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
+ __ mov(kScratchReg, ra);
+ __ bind(&start_call);
+ __ nal();
+ __ nop();
+ __ Addu(ra, ra, offset - 8); // 8 = nop + nal
+ __ sw(ra, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
+ __ mov(ra, kScratchReg);
}
if (instr->InputAt(0)->IsImmediate()) {
ExternalReference ref = i.InputExternalReference(0);
@@ -794,7 +806,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputRegister(0);
__ CallCFunction(func, num_parameters);
}
- __ bind(&return_location);
+ if (isWasmCapiFunction) {
+ CHECK_EQ(offset, __ SizeOfCodeGeneratedSince(&start_call));
+ }
+
RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt);
frame_access_state()->SetFrameAccessToDefault();
// Ideally, we should decrement SP delta to match the change of stack
@@ -1179,7 +1194,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
FPURegister right = i.InputOrZeroSingleRegister(1);
bool predicate;
FPUCondition cc =
- FlagsConditionToConditionCmpFPU(predicate, instr->flags_condition());
+ FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition());
if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
!__ IsDoubleZeroRegSet()) {
@@ -1239,7 +1254,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
FPURegister right = i.InputOrZeroDoubleRegister(1);
bool predicate;
FPUCondition cc =
- FlagsConditionToConditionCmpFPU(predicate, instr->flags_condition());
+ FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition());
if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
!__ IsDoubleZeroRegSet()) {
__ Move(kDoubleRegZero, 0.0);
@@ -2038,6 +2053,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bnegi_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31);
break;
}
+ case kMipsF32x4Sqrt: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ fsqrt_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
case kMipsF32x4RecipApprox: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ frcp_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
@@ -3026,7 +3046,7 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
} else if (instr->arch_opcode() == kMipsCmpS ||
instr->arch_opcode() == kMipsCmpD) {
bool predicate;
- FlagsConditionToConditionCmpFPU(predicate, condition);
+ FlagsConditionToConditionCmpFPU(&predicate, condition);
if (predicate) {
__ BranchTrueF(tlabel);
} else {
@@ -3116,7 +3136,7 @@ void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
case kMipsCmpS:
case kMipsCmpD: {
bool predicate;
- FlagsConditionToConditionCmpFPU(predicate, condition);
+ FlagsConditionToConditionCmpFPU(&predicate, condition);
if (predicate) {
__ LoadZeroIfFPUCondition(kSpeculationPoisonRegister);
} else {
@@ -3314,7 +3334,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
__ Move(kDoubleRegZero, 0.0);
}
bool predicate;
- FlagsConditionToConditionCmpFPU(predicate, condition);
+ FlagsConditionToConditionCmpFPU(&predicate, condition);
if (!IsMipsArchVariant(kMips32r6)) {
__ li(result, Operand(1));
if (predicate) {
diff --git a/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h b/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
index e8020d9e89..af0774f468 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
+++ b/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
@@ -159,6 +159,7 @@ namespace compiler {
V(MipsI32x4MinU) \
V(MipsF32x4Abs) \
V(MipsF32x4Neg) \
+ V(MipsF32x4Sqrt) \
V(MipsF32x4RecipApprox) \
V(MipsF32x4RecipSqrtApprox) \
V(MipsF32x4Add) \
diff --git a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
index 4e6aef52f4..ba17ad2581 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
@@ -54,6 +54,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsF32x4Div:
case kMipsF32x4Ne:
case kMipsF32x4Neg:
+ case kMipsF32x4Sqrt:
case kMipsF32x4RecipApprox:
case kMipsF32x4RecipSqrtApprox:
case kMipsF32x4ReplaceLane:
diff --git a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
index bb47262c6c..7ee5c7c2c7 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/base/adapters.h"
#include "src/base/bits.h"
#include "src/compiler/backend/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
@@ -781,6 +780,10 @@ void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
g.UseRegister(node->InputAt(0)));
}
+void InstructionSelector::VisitSimd128ReverseBytes(Node* node) {
+ UNREACHABLE();
+}
+
void InstructionSelector::VisitWord32Ctz(Node* node) {
MipsOperandGenerator g(this);
Emit(kMipsCtz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
@@ -2015,6 +2018,7 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(F32x4UConvertI32x4, kMipsF32x4UConvertI32x4) \
V(F32x4Abs, kMipsF32x4Abs) \
V(F32x4Neg, kMipsF32x4Neg) \
+ V(F32x4Sqrt, kMipsF32x4Sqrt) \
V(F32x4RecipApprox, kMipsF32x4RecipApprox) \
V(F32x4RecipSqrtApprox, kMipsF32x4RecipSqrtApprox) \
V(I32x4SConvertF32x4, kMipsI32x4SConvertF32x4) \
diff --git a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
index 5682bed71a..9cec463e87 100644
--- a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
@@ -278,42 +278,41 @@ Condition FlagsConditionToConditionOvf(FlagsCondition condition) {
UNREACHABLE();
}
-FPUCondition FlagsConditionToConditionCmpFPU(
- bool& predicate, // NOLINT(runtime/references)
- FlagsCondition condition) {
+FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
+ FlagsCondition condition) {
switch (condition) {
case kEqual:
- predicate = true;
+ *predicate = true;
return EQ;
case kNotEqual:
- predicate = false;
+ *predicate = false;
return EQ;
case kUnsignedLessThan:
- predicate = true;
+ *predicate = true;
return OLT;
case kUnsignedGreaterThanOrEqual:
- predicate = false;
+ *predicate = false;
return OLT;
case kUnsignedLessThanOrEqual:
- predicate = true;
+ *predicate = true;
return OLE;
case kUnsignedGreaterThan:
- predicate = false;
+ *predicate = false;
return OLE;
case kUnorderedEqual:
case kUnorderedNotEqual:
- predicate = true;
+ *predicate = true;
break;
default:
- predicate = true;
+ *predicate = true;
break;
}
UNREACHABLE();
}
-void EmitWordLoadPoisoningIfNeeded(
- CodeGenerator* codegen, InstructionCode opcode, Instruction* instr,
- MipsOperandConverter& i) { // NOLINT(runtime/references)
+void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
+ InstructionCode opcode, Instruction* instr,
+ MipsOperandConverter const& i) {
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
if (access_mode == kMemoryAccessPoisoned) {
@@ -758,12 +757,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
- Label return_location;
- if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
+ Label start_call;
+ bool isWasmCapiFunction =
+ linkage()->GetIncomingDescriptor()->IsWasmCapiFunction();
+ // from start_call to return address.
+ int offset = 48;
+#if V8_HOST_ARCH_MIPS64
+ if (__ emit_debug_code()) {
+ offset += 16;
+ }
+#endif
+ if (isWasmCapiFunction) {
// Put the return address in a stack slot.
- __ LoadAddress(kScratchReg, &return_location);
- __ sd(kScratchReg,
- MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
+ __ mov(kScratchReg, ra);
+ __ bind(&start_call);
+ __ nal();
+ __ nop();
+ __ Daddu(ra, ra, offset - 8); // 8 = nop + nal
+ __ sd(ra, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
+ __ mov(ra, kScratchReg);
}
if (instr->InputAt(0)->IsImmediate()) {
ExternalReference ref = i.InputExternalReference(0);
@@ -772,7 +784,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputRegister(0);
__ CallCFunction(func, num_parameters);
}
- __ bind(&return_location);
+ if (isWasmCapiFunction) {
+ CHECK_EQ(offset, __ SizeOfCodeGeneratedSince(&start_call));
+ }
+
RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt);
frame_access_state()->SetFrameAccessToDefault();
// Ideally, we should decrement SP delta to match the change of stack
@@ -1276,7 +1291,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
FPURegister right = i.InputOrZeroSingleRegister(1);
bool predicate;
FPUCondition cc =
- FlagsConditionToConditionCmpFPU(predicate, instr->flags_condition());
+ FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition());
if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
!__ IsDoubleZeroRegSet()) {
@@ -1339,7 +1354,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
FPURegister right = i.InputOrZeroDoubleRegister(1);
bool predicate;
FPUCondition cc =
- FlagsConditionToConditionCmpFPU(predicate, instr->flags_condition());
+ FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition());
if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
!__ IsDoubleZeroRegSet()) {
__ Move(kDoubleRegZero, 0.0);
@@ -2233,6 +2248,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ftrunc_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
+ case kMips64F32x4Sqrt: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ fsqrt_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
case kMips64I32x4Neg: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
@@ -3151,7 +3171,7 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
} else if (instr->arch_opcode() == kMips64CmpS ||
instr->arch_opcode() == kMips64CmpD) {
bool predicate;
- FlagsConditionToConditionCmpFPU(predicate, condition);
+ FlagsConditionToConditionCmpFPU(&predicate, condition);
if (predicate) {
__ BranchTrueF(tlabel);
} else {
@@ -3261,7 +3281,7 @@ void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
case kMips64CmpS:
case kMips64CmpD: {
bool predicate;
- FlagsConditionToConditionCmpFPU(predicate, condition);
+ FlagsConditionToConditionCmpFPU(&predicate, condition);
if (predicate) {
__ LoadZeroIfFPUCondition(kSpeculationPoisonRegister);
} else {
@@ -3470,7 +3490,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
__ Move(kDoubleRegZero, 0.0);
}
bool predicate;
- FlagsConditionToConditionCmpFPU(predicate, condition);
+ FlagsConditionToConditionCmpFPU(&predicate, condition);
if (kArchVariant != kMips64r6) {
__ li(result, Operand(1));
if (predicate) {
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
index edc8924757..bcf3532b57 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
+++ b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
@@ -189,6 +189,7 @@ namespace compiler {
V(Mips64I32x4MinU) \
V(Mips64F32x4Abs) \
V(Mips64F32x4Neg) \
+ V(Mips64F32x4Sqrt) \
V(Mips64F32x4RecipApprox) \
V(Mips64F32x4RecipSqrtApprox) \
V(Mips64F32x4Add) \
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
index 880b424c41..fe2d33d1db 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
@@ -82,6 +82,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64F32x4Div:
case kMips64F32x4Ne:
case kMips64F32x4Neg:
+ case kMips64F32x4Sqrt:
case kMips64F32x4RecipApprox:
case kMips64F32x4RecipSqrtApprox:
case kMips64F32x4ReplaceLane:
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
index 9c717ab1e9..dfc0ff5bad 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/base/adapters.h"
#include "src/base/bits.h"
#include "src/compiler/backend/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
@@ -823,6 +822,10 @@ void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
g.UseRegister(node->InputAt(0)));
}
+void InstructionSelector::VisitSimd128ReverseBytes(Node* node) {
+ UNREACHABLE();
+}
+
void InstructionSelector::VisitWord32Ctz(Node* node) {
Mips64OperandGenerator g(this);
Emit(kMips64Ctz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
@@ -2678,6 +2681,7 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(F32x4UConvertI32x4, kMips64F32x4UConvertI32x4) \
V(F32x4Abs, kMips64F32x4Abs) \
V(F32x4Neg, kMips64F32x4Neg) \
+ V(F32x4Sqrt, kMips64F32x4Sqrt) \
V(F32x4RecipApprox, kMips64F32x4RecipApprox) \
V(F32x4RecipSqrtApprox, kMips64F32x4RecipSqrtApprox) \
V(I32x4SConvertF32x4, kMips64I32x4SConvertF32x4) \
diff --git a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
index 5c69bc34a1..dde1804adb 100644
--- a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
@@ -263,9 +263,8 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
UNREACHABLE();
}
-void EmitWordLoadPoisoningIfNeeded(
- CodeGenerator* codegen, Instruction* instr,
- PPCOperandConverter& i) { // NOLINT(runtime/references)
+void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
+ PPCOperandConverter const& i) {
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(instr->opcode()));
if (access_mode == kMemoryAccessPoisoned) {
@@ -1024,7 +1023,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Label start_call;
bool isWasmCapiFunction =
linkage()->GetIncomingDescriptor()->IsWasmCapiFunction();
+#if defined(_AIX)
+ // AIX/PPC64BE Linux uses a function descriptor
+ // and emits 2 extra Load instrcutions under CallCFunctionHelper.
+ constexpr int offset = 11 * kInstrSize;
+#else
constexpr int offset = 9 * kInstrSize;
+#endif
if (isWasmCapiFunction) {
__ mflr(r0);
__ bind(&start_call);
@@ -1043,9 +1048,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
// TODO(miladfar): In the above block, kScratchReg must be populated with
// the strictly-correct PC, which is the return address at this spot. The
- // offset is set to 36 (9 * kInstrSize) right now, which is counted from
- // where we are binding to the label and ends at this spot. If failed,
- // replace it with the correct offset suggested. More info on f5ab7d3.
+ // offset is set to 36 (9 * kInstrSize) on pLinux and 44 on AIX, which is
+ // counted from where we are binding to the label and ends at this spot.
+ // If failed, replace it with the correct offset suggested. More info on
+ // f5ab7d3.
if (isWasmCapiFunction)
CHECK_EQ(offset, __ SizeOfCodeGeneratedSince(&start_call));
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
index ef8490a726..2ffd6495d7 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/base/adapters.h"
+#include "src/base/iterator.h"
#include "src/compiler/backend/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
@@ -926,6 +926,12 @@ void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
g.UseRegister(node->InputAt(0)));
}
+void InstructionSelector::VisitSimd128ReverseBytes(Node* node) {
+ // TODO(miladfar): Implement the ppc selector for reversing SIMD bytes.
+ // Check if the input node is a Load and do a Load Reverse at once.
+ UNIMPLEMENTED();
+}
+
void InstructionSelector::VisitInt32Add(Node* node) {
VisitBinop<Int32BinopMatcher>(this, node, kPPC_Add32, kInt16Imm);
}
@@ -2283,6 +2289,8 @@ void InstructionSelector::VisitF32x4Sub(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Mul(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF32x4Sqrt(Node* node) { UNIMPLEMENTED(); }
+
void InstructionSelector::VisitF32x4Div(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Min(Node* node) { UNIMPLEMENTED(); }
diff --git a/deps/v8/src/compiler/backend/register-allocator-verifier.cc b/deps/v8/src/compiler/backend/register-allocator-verifier.cc
index 53349c9c2b..17e0b8ca75 100644
--- a/deps/v8/src/compiler/backend/register-allocator-verifier.cc
+++ b/deps/v8/src/compiler/backend/register-allocator-verifier.cc
@@ -92,7 +92,7 @@ RegisterAllocatorVerifier::RegisterAllocatorVerifier(
void RegisterAllocatorVerifier::VerifyInput(
const OperandConstraint& constraint) {
CHECK_NE(kSameAsFirst, constraint.type_);
- if (constraint.type_ != kImmediate && constraint.type_ != kExplicit) {
+ if (constraint.type_ != kImmediate) {
CHECK_NE(InstructionOperand::kInvalidVirtualRegister,
constraint.virtual_register_);
}
@@ -102,14 +102,12 @@ void RegisterAllocatorVerifier::VerifyTemp(
const OperandConstraint& constraint) {
CHECK_NE(kSameAsFirst, constraint.type_);
CHECK_NE(kImmediate, constraint.type_);
- CHECK_NE(kExplicit, constraint.type_);
CHECK_NE(kConstant, constraint.type_);
}
void RegisterAllocatorVerifier::VerifyOutput(
const OperandConstraint& constraint) {
CHECK_NE(kImmediate, constraint.type_);
- CHECK_NE(kExplicit, constraint.type_);
CHECK_NE(InstructionOperand::kInvalidVirtualRegister,
constraint.virtual_register_);
}
@@ -149,8 +147,6 @@ void RegisterAllocatorVerifier::BuildConstraint(const InstructionOperand* op,
constraint->type_ = kConstant;
constraint->value_ = ConstantOperand::cast(op)->virtual_register();
constraint->virtual_register_ = constraint->value_;
- } else if (op->IsExplicit()) {
- constraint->type_ = kExplicit;
} else if (op->IsImmediate()) {
const ImmediateOperand* imm = ImmediateOperand::cast(op);
int value = imm->type() == ImmediateOperand::INLINE ? imm->inline_value()
@@ -235,9 +231,6 @@ void RegisterAllocatorVerifier::CheckConstraint(
case kFPRegister:
CHECK_WITH_MSG(op->IsFPRegister(), caller_info_);
return;
- case kExplicit:
- CHECK_WITH_MSG(op->IsExplicit(), caller_info_);
- return;
case kFixedRegister:
case kRegisterAndSlot:
CHECK_WITH_MSG(op->IsRegister(), caller_info_);
@@ -503,8 +496,7 @@ void RegisterAllocatorVerifier::VerifyGapMoves() {
instr_constraint.operand_constraints_;
size_t count = 0;
for (size_t i = 0; i < instr->InputCount(); ++i, ++count) {
- if (op_constraints[count].type_ == kImmediate ||
- op_constraints[count].type_ == kExplicit) {
+ if (op_constraints[count].type_ == kImmediate) {
continue;
}
int virtual_register = op_constraints[count].virtual_register_;
diff --git a/deps/v8/src/compiler/backend/register-allocator-verifier.h b/deps/v8/src/compiler/backend/register-allocator-verifier.h
index 68e69c0d16..7110c2eb42 100644
--- a/deps/v8/src/compiler/backend/register-allocator-verifier.h
+++ b/deps/v8/src/compiler/backend/register-allocator-verifier.h
@@ -188,7 +188,6 @@ class RegisterAllocatorVerifier final : public ZoneObject {
kRegisterOrSlot,
kRegisterOrSlotFP,
kRegisterOrSlotOrConstant,
- kExplicit,
kSameAsFirst,
kRegisterAndSlot
};
diff --git a/deps/v8/src/compiler/backend/register-allocator.cc b/deps/v8/src/compiler/backend/register-allocator.cc
index 21eef0485c..945554eb32 100644
--- a/deps/v8/src/compiler/backend/register-allocator.cc
+++ b/deps/v8/src/compiler/backend/register-allocator.cc
@@ -6,7 +6,7 @@
#include <iomanip>
-#include "src/base/adapters.h"
+#include "src/base/iterator.h"
#include "src/base/small-vector.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/tick-counter.h"
@@ -317,7 +317,6 @@ UsePositionHintType UsePosition::HintTypeForOperand(
switch (op.kind()) {
case InstructionOperand::CONSTANT:
case InstructionOperand::IMMEDIATE:
- case InstructionOperand::EXPLICIT:
return UsePositionHintType::kNone;
case InstructionOperand::UNALLOCATED:
return UsePositionHintType::kUnresolved;
@@ -797,12 +796,13 @@ LifetimePosition LiveRange::NextEndAfter(LifetimePosition position) const {
return start_search->end();
}
-LifetimePosition LiveRange::NextStartAfter(LifetimePosition position) const {
+LifetimePosition LiveRange::NextStartAfter(LifetimePosition position) {
UseInterval* start_search = FirstSearchIntervalForPosition(position);
while (start_search->start() < position) {
start_search = start_search->next();
}
- return start_search->start();
+ next_start_ = start_search->start();
+ return next_start_;
}
LifetimePosition LiveRange::FirstIntersection(LiveRange* other) const {
@@ -1940,8 +1940,8 @@ void ConstraintBuilder::MeetConstraintsBefore(int instr_index) {
// Handle fixed input operands of second instruction.
for (size_t i = 0; i < second->InputCount(); i++) {
InstructionOperand* input = second->InputAt(i);
- if (input->IsImmediate() || input->IsExplicit()) {
- continue; // Ignore immediates and explicitly reserved registers.
+ if (input->IsImmediate()) {
+ continue; // Ignore immediates.
}
UnallocatedOperand* cur_input = UnallocatedOperand::cast(input);
if (cur_input->HasFixedPolicy()) {
@@ -2323,8 +2323,8 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
for (size_t i = 0; i < instr->InputCount(); i++) {
InstructionOperand* input = instr->InputAt(i);
- if (input->IsImmediate() || input->IsExplicit()) {
- continue; // Ignore immediates and explicitly reserved registers.
+ if (input->IsImmediate()) {
+ continue; // Ignore immediates.
}
LifetimePosition use_pos;
if (input->IsUnallocated() &&
@@ -2504,10 +2504,10 @@ void LiveRangeBuilder::ProcessPhis(const InstructionBlock* block,
predecessor_hint_preference |= kNotDeferredBlockPreference;
}
- // - Prefer hints from allocated (or explicit) operands.
+ // - Prefer hints from allocated operands.
//
- // Already-allocated or explicit operands are typically assigned using
- // the parallel moves on the last instruction. For example:
+ // Already-allocated operands are typically assigned using the parallel
+ // moves on the last instruction. For example:
//
// gap (v101 = [x0|R|w32]) (v100 = v101)
// ArchJmp
@@ -2515,7 +2515,7 @@ void LiveRangeBuilder::ProcessPhis(const InstructionBlock* block,
// phi: v100 = v101 v102
//
// We have already found the END move, so look for a matching START move
- // from an allocated (or explicit) operand.
+ // from an allocated operand.
//
// Note that we cannot simply look up data()->live_ranges()[vreg] here
// because the live ranges are still being built when this function is
@@ -2527,7 +2527,7 @@ void LiveRangeBuilder::ProcessPhis(const InstructionBlock* block,
for (MoveOperands* move : *moves) {
InstructionOperand& to = move->destination();
if (predecessor_hint->Equals(to)) {
- if (move->source().IsAllocated() || move->source().IsExplicit()) {
+ if (move->source().IsAllocated()) {
predecessor_hint_preference |= kMoveIsAllocatedPreference;
}
break;
@@ -3095,11 +3095,11 @@ LinearScanAllocator::LinearScanAllocator(RegisterAllocationData* data,
: RegisterAllocator(data, kind),
unhandled_live_ranges_(local_zone),
active_live_ranges_(local_zone),
- inactive_live_ranges_(local_zone),
+ inactive_live_ranges_(num_registers(), InactiveLiveRangeQueue(local_zone),
+ local_zone),
next_active_ranges_change_(LifetimePosition::Invalid()),
next_inactive_ranges_change_(LifetimePosition::Invalid()) {
active_live_ranges().reserve(8);
- inactive_live_ranges().reserve(8);
}
void LinearScanAllocator::MaybeSpillPreviousRanges(LiveRange* begin_range,
@@ -3143,15 +3143,15 @@ void LinearScanAllocator::MaybeUndoPreviousSplit(LiveRange* range) {
}
}
-void LinearScanAllocator::SpillNotLiveRanges(RangeWithRegisterSet& to_be_live,
+void LinearScanAllocator::SpillNotLiveRanges(RangeWithRegisterSet* to_be_live,
LifetimePosition position,
SpillMode spill_mode) {
for (auto it = active_live_ranges().begin();
it != active_live_ranges().end();) {
LiveRange* active_range = *it;
TopLevelLiveRange* toplevel = (*it)->TopLevel();
- auto found = to_be_live.find({toplevel, kUnassignedRegister});
- if (found == to_be_live.end()) {
+ auto found = to_be_live->find({toplevel, kUnassignedRegister});
+ if (found == to_be_live->end()) {
// Is not contained in {to_be_live}, spill it.
// Fixed registers are exempt from this. They might have been
// added from inactive at the block boundary but we know that
@@ -3207,7 +3207,7 @@ void LinearScanAllocator::SpillNotLiveRanges(RangeWithRegisterSet& to_be_live,
} else {
// This range is contained in {to_be_live}, so we can keep it.
int expected_register = (*found).expected_register;
- to_be_live.erase(found);
+ to_be_live->erase(found);
if (expected_register == active_range->assigned_register()) {
// Was life and in correct register, simply pass through.
TRACE("Keeping %d:%d in %s\n", toplevel->vreg(),
@@ -3238,31 +3238,22 @@ LiveRange* LinearScanAllocator::AssignRegisterOnReload(LiveRange* range,
// give reloading registers pecedence. That way we would compute the
// intersection for the entire future.
LifetimePosition new_end = range->End();
- for (const auto inactive : inactive_live_ranges()) {
- if (kSimpleFPAliasing || !check_fp_aliasing()) {
- if (inactive->assigned_register() != reg) continue;
- } else {
- bool conflict = inactive->assigned_register() == reg;
- if (!conflict) {
- int alias_base_index = -1;
- int aliases = data()->config()->GetAliases(range->representation(), reg,
- inactive->representation(),
- &alias_base_index);
- DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
- while (aliases-- && !conflict) {
- int aliased_reg = alias_base_index + aliases;
- if (aliased_reg == reg) {
- conflict = true;
- }
- }
- }
- if (!conflict) continue;
+ for (int cur_reg = 0; cur_reg < num_registers(); ++cur_reg) {
+ if ((kSimpleFPAliasing || !check_fp_aliasing()) && cur_reg != reg) {
+ continue;
}
- for (auto interval = inactive->first_interval(); interval != nullptr;
- interval = interval->next()) {
- if (interval->start() > new_end) break;
- if (interval->end() <= range->Start()) continue;
- if (new_end > interval->start()) new_end = interval->start();
+ for (const auto cur_inactive : inactive_live_ranges(cur_reg)) {
+ if (!kSimpleFPAliasing && check_fp_aliasing() &&
+ !data()->config()->AreAliases(cur_inactive->representation(), cur_reg,
+ range->representation(), reg)) {
+ continue;
+ }
+ for (auto interval = cur_inactive->first_interval(); interval != nullptr;
+ interval = interval->next()) {
+ if (interval->start() > new_end) break;
+ if (interval->end() <= range->Start()) continue;
+ if (new_end > interval->start()) new_end = interval->start();
+ }
}
}
if (new_end != range->End()) {
@@ -3275,8 +3266,8 @@ LiveRange* LinearScanAllocator::AssignRegisterOnReload(LiveRange* range,
return range;
}
-void LinearScanAllocator::ReloadLiveRanges(RangeWithRegisterSet& to_be_live,
- LifetimePosition position) {
+void LinearScanAllocator::ReloadLiveRanges(
+ RangeWithRegisterSet const& to_be_live, LifetimePosition position) {
// Assumption: All ranges in {to_be_live} are currently spilled and there are
// no conflicting registers in the active ranges.
// The former is ensured by SpillNotLiveRanges, the latter is by construction
@@ -3558,11 +3549,17 @@ void LinearScanAllocator::UpdateDeferredFixedRanges(SpillMode spill_mode,
Min(updated->End(), next_active_ranges_change_);
});
}
- for (auto inactive : inactive_live_ranges()) {
- split_conflicting(range, inactive, [this](LiveRange* updated) {
- next_inactive_ranges_change_ =
- Min(updated->End(), next_inactive_ranges_change_);
- });
+ for (int reg = 0; reg < num_registers(); ++reg) {
+ if ((kSimpleFPAliasing || !check_fp_aliasing()) &&
+ reg != range->assigned_register()) {
+ continue;
+ }
+ for (auto inactive : inactive_live_ranges(reg)) {
+ split_conflicting(range, inactive, [this](LiveRange* updated) {
+ next_inactive_ranges_change_ =
+ Min(updated->End(), next_inactive_ranges_change_);
+ });
+ }
}
};
if (mode() == GENERAL_REGISTERS) {
@@ -3600,12 +3597,14 @@ void LinearScanAllocator::UpdateDeferredFixedRanges(SpillMode spill_mode,
}
} else {
// Remove all ranges.
- for (auto it = inactive_live_ranges().begin();
- it != inactive_live_ranges().end();) {
- if ((*it)->TopLevel()->IsDeferredFixed()) {
- it = inactive_live_ranges().erase(it);
- } else {
- ++it;
+ for (int reg = 0; reg < num_registers(); ++reg) {
+ for (auto it = inactive_live_ranges(reg).begin();
+ it != inactive_live_ranges(reg).end();) {
+ if ((*it)->TopLevel()->IsDeferredFixed()) {
+ it = inactive_live_ranges(reg).erase(it);
+ } else {
+ ++it;
+ }
}
}
}
@@ -3636,7 +3635,9 @@ bool LinearScanAllocator::HasNonDeferredPredecessor(InstructionBlock* block) {
void LinearScanAllocator::AllocateRegisters() {
DCHECK(unhandled_live_ranges().empty());
DCHECK(active_live_ranges().empty());
- DCHECK(inactive_live_ranges().empty());
+ for (int reg = 0; reg < num_registers(); ++reg) {
+ DCHECK(inactive_live_ranges(reg).empty());
+ }
SplitAndSpillRangesDefinedByMemoryOperand();
data()->ResetSpillState();
@@ -3853,7 +3854,7 @@ void LinearScanAllocator::AllocateRegisters() {
}
if (!no_change_required) {
- SpillNotLiveRanges(to_be_live, next_block_boundary, spill_mode);
+ SpillNotLiveRanges(&to_be_live, next_block_boundary, spill_mode);
ReloadLiveRanges(to_be_live, next_block_boundary);
}
@@ -3941,9 +3942,10 @@ void LinearScanAllocator::AddToActive(LiveRange* range) {
void LinearScanAllocator::AddToInactive(LiveRange* range) {
TRACE("Add live range %d:%d to inactive\n", range->TopLevel()->vreg(),
range->relative_id());
- inactive_live_ranges().push_back(range);
next_inactive_ranges_change_ = std::min(
next_inactive_ranges_change_, range->NextStartAfter(range->Start()));
+ DCHECK(range->HasRegisterAssigned());
+ inactive_live_ranges(range->assigned_register()).insert(range);
}
void LinearScanAllocator::AddToUnhandled(LiveRange* range) {
@@ -3966,30 +3968,36 @@ ZoneVector<LiveRange*>::iterator LinearScanAllocator::ActiveToHandled(
ZoneVector<LiveRange*>::iterator LinearScanAllocator::ActiveToInactive(
const ZoneVector<LiveRange*>::iterator it, LifetimePosition position) {
LiveRange* range = *it;
- inactive_live_ranges().push_back(range);
TRACE("Moving live range %d:%d from active to inactive\n",
(range)->TopLevel()->vreg(), range->relative_id());
+ LifetimePosition next_active = range->NextStartAfter(position);
next_inactive_ranges_change_ =
- std::min(next_inactive_ranges_change_, range->NextStartAfter(position));
+ std::min(next_inactive_ranges_change_, next_active);
+ DCHECK(range->HasRegisterAssigned());
+ inactive_live_ranges(range->assigned_register()).insert(range);
return active_live_ranges().erase(it);
}
-ZoneVector<LiveRange*>::iterator LinearScanAllocator::InactiveToHandled(
- ZoneVector<LiveRange*>::iterator it) {
+LinearScanAllocator::InactiveLiveRangeQueue::iterator
+LinearScanAllocator::InactiveToHandled(InactiveLiveRangeQueue::iterator it) {
+ LiveRange* range = *it;
TRACE("Moving live range %d:%d from inactive to handled\n",
- (*it)->TopLevel()->vreg(), (*it)->relative_id());
- return inactive_live_ranges().erase(it);
+ range->TopLevel()->vreg(), range->relative_id());
+ int reg = range->assigned_register();
+ return inactive_live_ranges(reg).erase(it);
}
-ZoneVector<LiveRange*>::iterator LinearScanAllocator::InactiveToActive(
- ZoneVector<LiveRange*>::iterator it, LifetimePosition position) {
+LinearScanAllocator::InactiveLiveRangeQueue::iterator
+LinearScanAllocator::InactiveToActive(InactiveLiveRangeQueue::iterator it,
+ LifetimePosition position) {
LiveRange* range = *it;
active_live_ranges().push_back(range);
TRACE("Moving live range %d:%d from inactive to active\n",
range->TopLevel()->vreg(), range->relative_id());
next_active_ranges_change_ =
std::min(next_active_ranges_change_, range->NextEndAfter(position));
- return inactive_live_ranges().erase(it);
+ int reg = range->assigned_register();
+ return inactive_live_ranges(reg).erase(it);
}
void LinearScanAllocator::ForwardStateTo(LifetimePosition position) {
@@ -4012,18 +4020,25 @@ void LinearScanAllocator::ForwardStateTo(LifetimePosition position) {
if (position >= next_inactive_ranges_change_) {
next_inactive_ranges_change_ = LifetimePosition::MaxPosition();
- for (auto it = inactive_live_ranges().begin();
- it != inactive_live_ranges().end();) {
- LiveRange* cur_inactive = *it;
- if (cur_inactive->End() <= position) {
- it = InactiveToHandled(it);
- } else if (cur_inactive->Covers(position)) {
- it = InactiveToActive(it, position);
- } else {
- next_inactive_ranges_change_ =
- std::min(next_inactive_ranges_change_,
- cur_inactive->NextStartAfter(position));
- ++it;
+ for (int reg = 0; reg < num_registers(); ++reg) {
+ ZoneVector<LiveRange*> reorder(data()->allocation_zone());
+ for (auto it = inactive_live_ranges(reg).begin();
+ it != inactive_live_ranges(reg).end();) {
+ LiveRange* cur_inactive = *it;
+ if (cur_inactive->End() <= position) {
+ it = InactiveToHandled(it);
+ } else if (cur_inactive->Covers(position)) {
+ it = InactiveToActive(it, position);
+ } else {
+ next_inactive_ranges_change_ =
+ std::min(next_inactive_ranges_change_,
+ cur_inactive->NextStartAfter(position));
+ it = inactive_live_ranges(reg).erase(it);
+ reorder.push_back(cur_inactive);
+ }
+ }
+ for (LiveRange* range : reorder) {
+ inactive_live_ranges(reg).insert(range);
}
}
}
@@ -4094,31 +4109,34 @@ void LinearScanAllocator::FindFreeRegistersForRange(
}
}
- for (LiveRange* cur_inactive : inactive_live_ranges()) {
- DCHECK(cur_inactive->End() > range->Start());
- int cur_reg = cur_inactive->assigned_register();
- // No need to carry out intersections, when this register won't be
- // interesting to this range anyway.
- // TODO(mtrofin): extend to aliased ranges, too.
- if ((kSimpleFPAliasing || !check_fp_aliasing()) &&
- positions[cur_reg] < range->Start()) {
- continue;
- }
-
- LifetimePosition next_intersection = cur_inactive->FirstIntersection(range);
- if (!next_intersection.IsValid()) continue;
- if (kSimpleFPAliasing || !check_fp_aliasing()) {
- positions[cur_reg] = Min(positions[cur_reg], next_intersection);
- TRACE("Register %s is free until pos %d (2)\n", RegisterName(cur_reg),
- Min(positions[cur_reg], next_intersection).value());
- } else {
- int alias_base_index = -1;
- int aliases = data()->config()->GetAliases(
- cur_inactive->representation(), cur_reg, rep, &alias_base_index);
- DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
- while (aliases--) {
- int aliased_reg = alias_base_index + aliases;
- positions[aliased_reg] = Min(positions[aliased_reg], next_intersection);
+ for (int cur_reg = 0; cur_reg < num_regs; ++cur_reg) {
+ for (LiveRange* cur_inactive : inactive_live_ranges(cur_reg)) {
+ DCHECK_GT(cur_inactive->End(), range->Start());
+ CHECK_EQ(cur_inactive->assigned_register(), cur_reg);
+ // No need to carry out intersections, when this register won't be
+ // interesting to this range anyway.
+ // TODO(mtrofin): extend to aliased ranges, too.
+ if ((kSimpleFPAliasing || !check_fp_aliasing()) &&
+ positions[cur_reg] <= cur_inactive->NextStart()) {
+ break;
+ }
+ LifetimePosition next_intersection =
+ cur_inactive->FirstIntersection(range);
+ if (!next_intersection.IsValid()) continue;
+ if (kSimpleFPAliasing || !check_fp_aliasing()) {
+ positions[cur_reg] = std::min(positions[cur_reg], next_intersection);
+ TRACE("Register %s is free until pos %d (2)\n", RegisterName(cur_reg),
+ positions[cur_reg].value());
+ } else {
+ int alias_base_index = -1;
+ int aliases = data()->config()->GetAliases(
+ cur_inactive->representation(), cur_reg, rep, &alias_base_index);
+ DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
+ while (aliases--) {
+ int aliased_reg = alias_base_index + aliases;
+ positions[aliased_reg] =
+ std::min(positions[aliased_reg], next_intersection);
+ }
}
}
}
@@ -4337,46 +4355,46 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current,
}
}
- for (LiveRange* range : inactive_live_ranges()) {
- DCHECK(range->End() > current->Start());
- int cur_reg = range->assigned_register();
- bool is_fixed = range->TopLevel()->IsFixed();
-
- // Don't perform costly intersections if they are guaranteed to not update
- // block_pos or use_pos.
- // TODO(mtrofin): extend to aliased ranges, too.
- if ((kSimpleFPAliasing || !check_fp_aliasing())) {
- if (is_fixed) {
- if (block_pos[cur_reg] < range->Start()) continue;
- } else {
- if (use_pos[cur_reg] < range->Start()) continue;
+ for (int cur_reg = 0; cur_reg < num_registers(); ++cur_reg) {
+ for (LiveRange* range : inactive_live_ranges(cur_reg)) {
+ DCHECK(range->End() > current->Start());
+ DCHECK_EQ(range->assigned_register(), cur_reg);
+ bool is_fixed = range->TopLevel()->IsFixed();
+
+ // Don't perform costly intersections if they are guaranteed to not update
+ // block_pos or use_pos.
+ // TODO(mtrofin): extend to aliased ranges, too.
+ if ((kSimpleFPAliasing || !check_fp_aliasing())) {
+ DCHECK_LE(use_pos[cur_reg], block_pos[cur_reg]);
+ if (block_pos[cur_reg] <= range->NextStart()) break;
+ if (!is_fixed && use_pos[cur_reg] <= range->NextStart()) continue;
}
- }
- LifetimePosition next_intersection = range->FirstIntersection(current);
- if (!next_intersection.IsValid()) continue;
+ LifetimePosition next_intersection = range->FirstIntersection(current);
+ if (!next_intersection.IsValid()) continue;
- if (kSimpleFPAliasing || !check_fp_aliasing()) {
- if (is_fixed) {
- block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
- use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]);
- } else {
- use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection);
- }
- } else {
- int alias_base_index = -1;
- int aliases = data()->config()->GetAliases(
- range->representation(), cur_reg, rep, &alias_base_index);
- DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
- while (aliases--) {
- int aliased_reg = alias_base_index + aliases;
+ if (kSimpleFPAliasing || !check_fp_aliasing()) {
if (is_fixed) {
- block_pos[aliased_reg] =
- Min(block_pos[aliased_reg], next_intersection);
- use_pos[aliased_reg] =
- Min(block_pos[aliased_reg], use_pos[aliased_reg]);
+ block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
+ use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]);
} else {
- use_pos[aliased_reg] = Min(use_pos[aliased_reg], next_intersection);
+ use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection);
+ }
+ } else {
+ int alias_base_index = -1;
+ int aliases = data()->config()->GetAliases(
+ range->representation(), cur_reg, rep, &alias_base_index);
+ DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
+ while (aliases--) {
+ int aliased_reg = alias_base_index + aliases;
+ if (is_fixed) {
+ block_pos[aliased_reg] =
+ Min(block_pos[aliased_reg], next_intersection);
+ use_pos[aliased_reg] =
+ Min(block_pos[aliased_reg], use_pos[aliased_reg]);
+ } else {
+ use_pos[aliased_reg] = Min(use_pos[aliased_reg], next_intersection);
+ }
}
}
}
@@ -4490,40 +4508,38 @@ void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current,
it = ActiveToHandled(it);
}
- for (auto it = inactive_live_ranges().begin();
- it != inactive_live_ranges().end();) {
- LiveRange* range = *it;
- DCHECK(range->End() > current->Start());
- if (range->TopLevel()->IsFixed()) {
- ++it;
- continue;
- }
+ for (int cur_reg = 0; cur_reg < num_registers(); ++cur_reg) {
if (kSimpleFPAliasing || !check_fp_aliasing()) {
- if (range->assigned_register() != reg) {
+ if (cur_reg != reg) continue;
+ }
+ for (auto it = inactive_live_ranges(cur_reg).begin();
+ it != inactive_live_ranges(cur_reg).end();) {
+ LiveRange* range = *it;
+ if (!kSimpleFPAliasing && check_fp_aliasing() &&
+ !data()->config()->AreAliases(current->representation(), reg,
+ range->representation(), cur_reg)) {
++it;
continue;
}
- } else {
- if (!data()->config()->AreAliases(current->representation(), reg,
- range->representation(),
- range->assigned_register())) {
+ DCHECK(range->End() > current->Start());
+ if (range->TopLevel()->IsFixed()) {
++it;
continue;
}
- }
- LifetimePosition next_intersection = range->FirstIntersection(current);
- if (next_intersection.IsValid()) {
- UsePosition* next_pos = range->NextRegisterPosition(current->Start());
- if (next_pos == nullptr) {
- SpillAfter(range, split_pos, spill_mode);
+ LifetimePosition next_intersection = range->FirstIntersection(current);
+ if (next_intersection.IsValid()) {
+ UsePosition* next_pos = range->NextRegisterPosition(current->Start());
+ if (next_pos == nullptr) {
+ SpillAfter(range, split_pos, spill_mode);
+ } else {
+ next_intersection = Min(next_intersection, next_pos->pos());
+ SpillBetween(range, split_pos, next_intersection, spill_mode);
+ }
+ it = InactiveToHandled(it);
} else {
- next_intersection = Min(next_intersection, next_pos->pos());
- SpillBetween(range, split_pos, next_intersection, spill_mode);
+ ++it;
}
- it = InactiveToHandled(it);
- } else {
- ++it;
}
}
}
diff --git a/deps/v8/src/compiler/backend/register-allocator.h b/deps/v8/src/compiler/backend/register-allocator.h
index bc7b09d147..17d664e507 100644
--- a/deps/v8/src/compiler/backend/register-allocator.h
+++ b/deps/v8/src/compiler/backend/register-allocator.h
@@ -335,7 +335,11 @@ class RegisterAllocationData final : public ZoneObject {
return result;
}
- void ResetSpillState() { spill_state_.clear(); }
+ void ResetSpillState() {
+ for (auto& state : spill_state_) {
+ state.clear();
+ }
+ }
TickCounter* tick_counter() { return tick_counter_; }
@@ -626,9 +630,10 @@ class V8_EXPORT_PRIVATE LiveRange : public NON_EXPORTED_BASE(ZoneObject) {
bool ShouldBeAllocatedBefore(const LiveRange* other) const;
bool CanCover(LifetimePosition position) const;
bool Covers(LifetimePosition position) const;
- LifetimePosition NextStartAfter(LifetimePosition position) const;
+ LifetimePosition NextStartAfter(LifetimePosition position);
LifetimePosition NextEndAfter(LifetimePosition position) const;
LifetimePosition FirstIntersection(LiveRange* other) const;
+ LifetimePosition NextStart() const { return next_start_; }
void VerifyChildStructure() const {
VerifyIntervals();
@@ -689,6 +694,8 @@ class V8_EXPORT_PRIVATE LiveRange : public NON_EXPORTED_BASE(ZoneObject) {
// Cache the last position splintering stopped at.
mutable UsePosition* splitting_pointer_;
LiveRangeBundle* bundle_ = nullptr;
+ // Next interval start, relative to the current linear scan position.
+ LifetimePosition next_start_;
DISALLOW_COPY_AND_ASSIGN(LiveRange);
};
@@ -1298,29 +1305,39 @@ class LinearScanAllocator final : public RegisterAllocator {
LifetimePosition begin_pos,
LiveRange* end_range);
void MaybeUndoPreviousSplit(LiveRange* range);
- void SpillNotLiveRanges(
- RangeWithRegisterSet& to_be_live, // NOLINT(runtime/references)
- LifetimePosition position, SpillMode spill_mode);
+ void SpillNotLiveRanges(RangeWithRegisterSet* to_be_live,
+ LifetimePosition position, SpillMode spill_mode);
LiveRange* AssignRegisterOnReload(LiveRange* range, int reg);
- void ReloadLiveRanges(
- RangeWithRegisterSet& to_be_live, // NOLINT(runtime/references)
- LifetimePosition position);
+ void ReloadLiveRanges(RangeWithRegisterSet const& to_be_live,
+ LifetimePosition position);
void UpdateDeferredFixedRanges(SpillMode spill_mode, InstructionBlock* block);
bool BlockIsDeferredOrImmediatePredecessorIsNotDeferred(
const InstructionBlock* block);
bool HasNonDeferredPredecessor(InstructionBlock* block);
- struct LiveRangeOrdering {
+ struct UnhandledLiveRangeOrdering {
bool operator()(const LiveRange* a, const LiveRange* b) const {
return a->ShouldBeAllocatedBefore(b);
}
};
- using LiveRangeQueue = ZoneMultiset<LiveRange*, LiveRangeOrdering>;
- LiveRangeQueue& unhandled_live_ranges() { return unhandled_live_ranges_; }
+
+ struct InactiveLiveRangeOrdering {
+ bool operator()(const LiveRange* a, const LiveRange* b) const {
+ return a->NextStart() < b->NextStart();
+ }
+ };
+
+ using UnhandledLiveRangeQueue =
+ ZoneMultiset<LiveRange*, UnhandledLiveRangeOrdering>;
+ using InactiveLiveRangeQueue =
+ ZoneMultiset<LiveRange*, InactiveLiveRangeOrdering>;
+ UnhandledLiveRangeQueue& unhandled_live_ranges() {
+ return unhandled_live_ranges_;
+ }
ZoneVector<LiveRange*>& active_live_ranges() { return active_live_ranges_; }
- ZoneVector<LiveRange*>& inactive_live_ranges() {
- return inactive_live_ranges_;
+ InactiveLiveRangeQueue& inactive_live_ranges(int reg) {
+ return inactive_live_ranges_[reg];
}
void SetLiveRangeAssignedRegister(LiveRange* range, int reg);
@@ -1333,10 +1350,10 @@ class LinearScanAllocator final : public RegisterAllocator {
ZoneVector<LiveRange*>::iterator it);
ZoneVector<LiveRange*>::iterator ActiveToInactive(
ZoneVector<LiveRange*>::iterator it, LifetimePosition position);
- ZoneVector<LiveRange*>::iterator InactiveToHandled(
- ZoneVector<LiveRange*>::iterator it);
- ZoneVector<LiveRange*>::iterator InactiveToActive(
- ZoneVector<LiveRange*>::iterator it, LifetimePosition position);
+ InactiveLiveRangeQueue::iterator InactiveToHandled(
+ InactiveLiveRangeQueue::iterator it);
+ InactiveLiveRangeQueue::iterator InactiveToActive(
+ InactiveLiveRangeQueue::iterator it, LifetimePosition position);
void ForwardStateTo(LifetimePosition position);
@@ -1386,9 +1403,9 @@ class LinearScanAllocator final : public RegisterAllocator {
void PrintRangeOverview(std::ostream& os);
- LiveRangeQueue unhandled_live_ranges_;
+ UnhandledLiveRangeQueue unhandled_live_ranges_;
ZoneVector<LiveRange*> active_live_ranges_;
- ZoneVector<LiveRange*> inactive_live_ranges_;
+ ZoneVector<InactiveLiveRangeQueue> inactive_live_ranges_;
// Approximate at what position the set of ranges will change next.
// Used to avoid scanning for updates even if none are present.
diff --git a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
index 4c2d862fc4..d0f97eca57 100644
--- a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
@@ -1246,9 +1246,8 @@ void AdjustStackPointerForTailCall(
}
}
-void EmitWordLoadPoisoningIfNeeded(
- CodeGenerator* codegen, Instruction* instr,
- S390OperandConverter& i) { // NOLINT(runtime/references)
+void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
+ S390OperandConverter const& i) {
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(instr->opcode()));
if (access_mode == kMemoryAccessPoisoned) {
diff --git a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
index 7f3277fc68..7b002fe6d3 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/base/adapters.h"
#include "src/compiler/backend/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
@@ -436,68 +435,64 @@ void VisitTryTruncateDouble(InstructionSelector* selector, ArchOpcode opcode,
#endif
template <class CanCombineWithLoad>
-void GenerateRightOperands(
- InstructionSelector* selector, Node* node, Node* right,
- InstructionCode& opcode, // NOLINT(runtime/references)
- OperandModes& operand_mode, // NOLINT(runtime/references)
- InstructionOperand* inputs,
- size_t& input_count, // NOLINT(runtime/references)
- CanCombineWithLoad canCombineWithLoad) {
+void GenerateRightOperands(InstructionSelector* selector, Node* node,
+ Node* right, InstructionCode* opcode,
+ OperandModes* operand_mode,
+ InstructionOperand* inputs, size_t* input_count,
+ CanCombineWithLoad canCombineWithLoad) {
S390OperandGenerator g(selector);
- if ((operand_mode & OperandMode::kAllowImmediate) &&
- g.CanBeImmediate(right, operand_mode)) {
- inputs[input_count++] = g.UseImmediate(right);
+ if ((*operand_mode & OperandMode::kAllowImmediate) &&
+ g.CanBeImmediate(right, *operand_mode)) {
+ inputs[(*input_count)++] = g.UseImmediate(right);
// Can only be RI or RRI
- operand_mode &= OperandMode::kAllowImmediate;
- } else if (operand_mode & OperandMode::kAllowMemoryOperand) {
+ *operand_mode &= OperandMode::kAllowImmediate;
+ } else if (*operand_mode & OperandMode::kAllowMemoryOperand) {
NodeMatcher mright(right);
if (mright.IsLoad() && selector->CanCover(node, right) &&
canCombineWithLoad(SelectLoadOpcode(right))) {
AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
- right, inputs, &input_count, OpcodeImmMode(opcode));
- opcode |= AddressingModeField::encode(mode);
- operand_mode &= ~OperandMode::kAllowImmediate;
- if (operand_mode & OperandMode::kAllowRM)
- operand_mode &= ~OperandMode::kAllowDistinctOps;
- } else if (operand_mode & OperandMode::kAllowRM) {
- DCHECK(!(operand_mode & OperandMode::kAllowRRM));
- inputs[input_count++] = g.UseAnyExceptImmediate(right);
+ right, inputs, input_count, OpcodeImmMode(*opcode));
+ *opcode |= AddressingModeField::encode(mode);
+ *operand_mode &= ~OperandMode::kAllowImmediate;
+ if (*operand_mode & OperandMode::kAllowRM)
+ *operand_mode &= ~OperandMode::kAllowDistinctOps;
+ } else if (*operand_mode & OperandMode::kAllowRM) {
+ DCHECK(!(*operand_mode & OperandMode::kAllowRRM));
+ inputs[(*input_count)++] = g.UseAnyExceptImmediate(right);
// Can not be Immediate
- operand_mode &=
+ *operand_mode &=
~OperandMode::kAllowImmediate & ~OperandMode::kAllowDistinctOps;
- } else if (operand_mode & OperandMode::kAllowRRM) {
- DCHECK(!(operand_mode & OperandMode::kAllowRM));
- inputs[input_count++] = g.UseAnyExceptImmediate(right);
+ } else if (*operand_mode & OperandMode::kAllowRRM) {
+ DCHECK(!(*operand_mode & OperandMode::kAllowRM));
+ inputs[(*input_count)++] = g.UseAnyExceptImmediate(right);
// Can not be Immediate
- operand_mode &= ~OperandMode::kAllowImmediate;
+ *operand_mode &= ~OperandMode::kAllowImmediate;
} else {
UNREACHABLE();
}
} else {
- inputs[input_count++] = g.UseRegister(right);
+ inputs[(*input_count)++] = g.UseRegister(right);
// Can only be RR or RRR
- operand_mode &= OperandMode::kAllowRRR;
+ *operand_mode &= OperandMode::kAllowRRR;
}
}
template <class CanCombineWithLoad>
-void GenerateBinOpOperands(
- InstructionSelector* selector, Node* node, Node* left, Node* right,
- InstructionCode& opcode, // NOLINT(runtime/references)
- OperandModes& operand_mode, // NOLINT(runtime/references)
- InstructionOperand* inputs,
- size_t& input_count, // NOLINT(runtime/references)
- CanCombineWithLoad canCombineWithLoad) {
+void GenerateBinOpOperands(InstructionSelector* selector, Node* node,
+ Node* left, Node* right, InstructionCode* opcode,
+ OperandModes* operand_mode,
+ InstructionOperand* inputs, size_t* input_count,
+ CanCombineWithLoad canCombineWithLoad) {
S390OperandGenerator g(selector);
// left is always register
InstructionOperand const left_input = g.UseRegister(left);
- inputs[input_count++] = left_input;
+ inputs[(*input_count)++] = left_input;
if (left == right) {
- inputs[input_count++] = left_input;
+ inputs[(*input_count)++] = left_input;
// Can only be RR or RRR
- operand_mode &= OperandMode::kAllowRRR;
+ *operand_mode &= OperandMode::kAllowRRR;
} else {
GenerateRightOperands(selector, node, right, opcode, operand_mode, inputs,
input_count, canCombineWithLoad);
@@ -575,8 +570,8 @@ void VisitUnaryOp(InstructionSelector* selector, Node* node,
size_t output_count = 0;
Node* input = node->InputAt(0);
- GenerateRightOperands(selector, node, input, opcode, operand_mode, inputs,
- input_count, canCombineWithLoad);
+ GenerateRightOperands(selector, node, input, &opcode, &operand_mode, inputs,
+ &input_count, canCombineWithLoad);
bool input_is_word32 = ProduceWord32Result(input);
@@ -631,8 +626,8 @@ void VisitBinOp(InstructionSelector* selector, Node* node,
std::swap(left, right);
}
- GenerateBinOpOperands(selector, node, left, right, opcode, operand_mode,
- inputs, input_count, canCombineWithLoad);
+ GenerateBinOpOperands(selector, node, left, right, &opcode, &operand_mode,
+ inputs, &input_count, canCombineWithLoad);
bool left_is_word32 = ProduceWord32Result(left);
@@ -1175,6 +1170,12 @@ void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
g.UseRegister(node->InputAt(0)));
}
+void InstructionSelector::VisitSimd128ReverseBytes(Node* node) {
+ // TODO(miladfar): Implement the s390 selector for reversing SIMD bytes.
+ // Check if the input node is a Load and do a Load Reverse at once.
+ UNIMPLEMENTED();
+}
+
template <class Matcher, ArchOpcode neg_opcode>
static inline bool TryMatchNegFromSub(InstructionSelector* selector,
Node* node) {
@@ -2691,6 +2692,8 @@ void InstructionSelector::VisitF32x4Sub(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Mul(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF32x4Sqrt(Node* node) { UNIMPLEMENTED(); }
+
void InstructionSelector::VisitF32x4Div(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Min(Node* node) { UNIMPLEMENTED(); }
diff --git a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
index a4f82b153b..44da872f26 100644
--- a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
@@ -361,7 +361,6 @@ class WasmProtectedInstructionTrap final : public WasmOutOfLineTrap {
void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr,
- X64OperandConverter& i, // NOLINT(runtime/references)
int pc) {
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
@@ -370,9 +369,9 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
}
}
-void EmitWordLoadPoisoningIfNeeded(
- CodeGenerator* codegen, InstructionCode opcode, Instruction* instr,
- X64OperandConverter& i) { // NOLINT(runtime/references)
+void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
+ InstructionCode opcode, Instruction* instr,
+ X64OperandConverter const& i) {
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
if (access_mode == kMemoryAccessPoisoned) {
@@ -1876,30 +1875,30 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Subsd(i.InputDoubleRegister(0), kScratchDoubleReg);
break;
case kX64Movsxbl:
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movsxbl);
__ AssertZeroExtended(i.OutputRegister());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movzxbl:
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movzxbl);
__ AssertZeroExtended(i.OutputRegister());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movsxbq:
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movsxbq);
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movzxbq:
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movzxbq);
__ AssertZeroExtended(i.OutputRegister());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movb: {
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
@@ -1911,29 +1910,29 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64Movsxwl:
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movsxwl);
__ AssertZeroExtended(i.OutputRegister());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movzxwl:
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movzxwl);
__ AssertZeroExtended(i.OutputRegister());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movsxwq:
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movsxwq);
break;
case kX64Movzxwq:
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movzxwq);
__ AssertZeroExtended(i.OutputRegister());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movw: {
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
@@ -1945,7 +1944,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64Movl:
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
if (instr->HasOutput()) {
if (HasAddressingMode(instr)) {
__ movl(i.OutputRegister(), i.MemoryOperand());
@@ -1969,7 +1968,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movsxlq:
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movsxlq);
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
@@ -2021,7 +2020,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64Movq:
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
if (instr->HasOutput()) {
__ movq(i.OutputRegister(), i.MemoryOperand());
} else {
@@ -2036,7 +2035,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movss:
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
if (instr->HasOutput()) {
__ Movss(i.OutputDoubleRegister(), i.MemoryOperand());
} else {
@@ -2046,7 +2045,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kX64Movsd: {
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
if (instr->HasOutput()) {
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
@@ -2069,7 +2068,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64Movdqu: {
CpuFeatureScope sse_scope(tasm(), SSSE3);
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
if (instr->HasOutput()) {
__ Movdqu(i.OutputSimd128Register(), i.MemoryOperand());
} else {
@@ -2293,6 +2292,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ movq(i.OutputDoubleRegister(), kScratchRegister);
break;
}
+ case kX64F64x2Sqrt: {
+ __ Sqrtpd(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
case kX64F64x2Add: {
ASSEMBLE_SSE_BINOP(addpd);
break;
@@ -2350,22 +2353,48 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64F64x2Eq: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ cmpeqpd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Cmpeqpd(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64F64x2Ne: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ cmpneqpd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Cmpneqpd(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64F64x2Lt: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ cmpltpd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Cmpltpd(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64F64x2Le: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ cmplepd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Cmplepd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64F64x2Qfma: {
+ if (CpuFeatures::IsSupported(FMA3)) {
+ CpuFeatureScope fma3_scope(tasm(), FMA3);
+ __ vfmadd231pd(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(2));
+ } else {
+ XMMRegister tmp = i.TempSimd128Register(0);
+ __ movapd(tmp, i.InputSimd128Register(2));
+ __ mulpd(tmp, i.InputSimd128Register(1));
+ __ addpd(i.OutputSimd128Register(), tmp);
+ }
+ break;
+ }
+ case kX64F64x2Qfms: {
+ if (CpuFeatures::IsSupported(FMA3)) {
+ CpuFeatureScope fma3_scope(tasm(), FMA3);
+ __ vfnmadd231pd(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(2));
+ } else {
+ XMMRegister tmp = i.TempSimd128Register(0);
+ __ movapd(tmp, i.InputSimd128Register(2));
+ __ mulpd(tmp, i.InputSimd128Register(1));
+ __ subpd(i.OutputSimd128Register(), tmp);
+ }
break;
}
// TODO(gdeepti): Get rid of redundant moves for F32x4Splat/Extract below
@@ -2445,6 +2474,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
+ case kX64F32x4Sqrt: {
+ __ sqrtps(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
case kX64F32x4RecipApprox: {
__ rcpps(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
@@ -2538,6 +2571,32 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ cmpleps(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
+ case kX64F32x4Qfma: {
+ if (CpuFeatures::IsSupported(FMA3)) {
+ CpuFeatureScope fma3_scope(tasm(), FMA3);
+ __ vfmadd231ps(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(2));
+ } else {
+ XMMRegister tmp = i.TempSimd128Register(0);
+ __ movaps(tmp, i.InputSimd128Register(2));
+ __ mulps(tmp, i.InputSimd128Register(1));
+ __ addps(i.OutputSimd128Register(), tmp);
+ }
+ break;
+ }
+ case kX64F32x4Qfms: {
+ if (CpuFeatures::IsSupported(FMA3)) {
+ CpuFeatureScope fma3_scope(tasm(), FMA3);
+ __ vfnmadd231ps(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(2));
+ } else {
+ XMMRegister tmp = i.TempSimd128Register(0);
+ __ movaps(tmp, i.InputSimd128Register(2));
+ __ mulps(tmp, i.InputSimd128Register(1));
+ __ subps(i.OutputSimd128Register(), tmp);
+ }
+ break;
+ }
case kX64I64x2Splat: {
CpuFeatureScope sse_scope(tasm(), SSE3);
XMMRegister dst = i.OutputSimd128Register();
@@ -2577,7 +2636,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I64x2Shl: {
XMMRegister tmp = i.TempSimd128Register(0);
- __ movq(tmp, i.InputRegister(1));
+ Register shift = i.InputRegister(1);
+ // Take shift value modulo 8.
+ __ andq(shift, Immediate(63));
+ __ movq(tmp, shift);
__ psllq(i.OutputSimd128Register(), tmp);
break;
}
@@ -2588,6 +2650,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(0);
Register tmp = i.ToRegister(instr->TempAt(0));
+ // Modulo 64 not required as sarq_cl will mask cl to 6 bits.
// lower quadword
__ pextrq(tmp, src, 0x0);
@@ -2640,15 +2703,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (CpuFeatures::IsSupported(SSE4_2)) {
CpuFeatureScope sse_scope_4_2(tasm(), SSE4_2);
XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(1);
+ XMMRegister src0 = i.InputSimd128Register(0);
+ XMMRegister src1 = i.InputSimd128Register(1);
XMMRegister tmp = i.TempSimd128Register(0);
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- DCHECK_EQ(src, xmm0);
+ DCHECK_EQ(tmp, xmm0);
- __ movaps(tmp, src);
- __ pcmpgtq(src, dst);
- __ blendvpd(tmp, dst); // implicit use of xmm0 as mask
- __ movaps(dst, tmp);
+ __ movaps(tmp, src1);
+ __ pcmpgtq(tmp, src0);
+ __ movaps(dst, src1);
+ __ blendvpd(dst, src0); // implicit use of xmm0 as mask
} else {
CpuFeatureScope sse_scope_4_1(tasm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
@@ -2689,11 +2752,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
XMMRegister src = i.InputSimd128Register(1);
XMMRegister tmp = i.TempSimd128Register(0);
DCHECK_EQ(dst, i.InputSimd128Register(0));
- DCHECK_EQ(src, xmm0);
+ DCHECK_EQ(tmp, xmm0);
__ movaps(tmp, src);
- __ pcmpgtq(src, dst);
- __ blendvpd(dst, tmp); // implicit use of xmm0 as mask
+ __ pcmpgtq(tmp, dst);
+ __ blendvpd(dst, src); // implicit use of xmm0 as mask
break;
}
case kX64I64x2Eq: {
@@ -2732,7 +2795,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I64x2ShrU: {
XMMRegister tmp = i.TempSimd128Register(0);
- __ movq(tmp, i.InputRegister(1));
+ Register shift = i.InputRegister(1);
+ // Take shift value modulo 64.
+ __ andq(shift, Immediate(63));
+ __ movq(tmp, shift);
__ psrlq(i.OutputSimd128Register(), tmp);
break;
}
@@ -2740,24 +2806,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CpuFeatureScope sse_scope_4_2(tasm(), SSE4_2);
CpuFeatureScope sse_scope_4_1(tasm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(1);
- XMMRegister src_tmp = i.TempSimd128Register(0);
- XMMRegister dst_tmp = i.TempSimd128Register(1);
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- DCHECK_EQ(src, xmm0);
+ XMMRegister src0 = i.InputSimd128Register(0);
+ XMMRegister src1 = i.InputSimd128Register(1);
+ XMMRegister tmp0 = i.TempSimd128Register(0);
+ XMMRegister tmp1 = i.TempSimd128Register(1);
+ DCHECK_EQ(tmp1, xmm0);
- __ movaps(src_tmp, src);
- __ movaps(dst_tmp, dst);
+ __ movaps(dst, src1);
+ __ movaps(tmp0, src0);
- __ pcmpeqd(src, src);
- __ psllq(src, 63);
+ __ pcmpeqd(tmp1, tmp1);
+ __ psllq(tmp1, 63);
- __ pxor(dst_tmp, src);
- __ pxor(src, src_tmp);
+ __ pxor(tmp0, tmp1);
+ __ pxor(tmp1, dst);
- __ pcmpgtq(src, dst_tmp);
- __ blendvpd(src_tmp, dst); // implicit use of xmm0 as mask
- __ movaps(dst, src_tmp);
+ __ pcmpgtq(tmp1, tmp0);
+ __ blendvpd(dst, src0); // implicit use of xmm0 as mask
break;
}
case kX64I64x2MaxU: {
@@ -2765,22 +2830,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CpuFeatureScope sse_scope_4_1(tasm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(1);
- XMMRegister src_tmp = i.TempSimd128Register(0);
- XMMRegister dst_tmp = i.TempSimd128Register(1);
+ XMMRegister dst_tmp = i.TempSimd128Register(0);
+ XMMRegister tmp = i.TempSimd128Register(1);
DCHECK_EQ(dst, i.InputSimd128Register(0));
- DCHECK_EQ(src, xmm0);
+ DCHECK_EQ(tmp, xmm0);
- __ movaps(src_tmp, src);
__ movaps(dst_tmp, dst);
- __ pcmpeqd(src, src);
- __ psllq(src, 63);
+ __ pcmpeqd(tmp, tmp);
+ __ psllq(tmp, 63);
- __ pxor(dst_tmp, src);
- __ pxor(src, src_tmp);
+ __ pxor(dst_tmp, tmp);
+ __ pxor(tmp, src);
- __ pcmpgtq(src, dst_tmp);
- __ blendvpd(dst, src_tmp); // implicit use of xmm0 as mask
+ __ pcmpgtq(tmp, dst_tmp);
+ __ blendvpd(dst, src); // implicit use of xmm0 as mask
break;
}
case kX64I64x2GtU: {
@@ -2820,11 +2884,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kX64I32x4Splat: {
XMMRegister dst = i.OutputSimd128Register();
if (HasRegisterInput(instr, 0)) {
- __ movd(dst, i.InputRegister(0));
+ __ Movd(dst, i.InputRegister(0));
} else {
- __ movd(dst, i.InputOperand(0));
+ __ Movd(dst, i.InputOperand(0));
}
- __ pshufd(dst, dst, 0x0);
+ __ Pshufd(dst, dst, 0x0);
break;
}
case kX64I32x4ExtractLane: {
@@ -2878,28 +2942,34 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(0);
if (dst == src) {
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psignd(dst, kScratchDoubleReg);
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Psignd(dst, kScratchDoubleReg);
} else {
- __ pxor(dst, dst);
- __ psubd(dst, src);
+ __ Pxor(dst, dst);
+ __ Psubd(dst, src);
}
break;
}
case kX64I32x4Shl: {
XMMRegister tmp = i.TempSimd128Register(0);
- __ movq(tmp, i.InputRegister(1));
- __ pslld(i.OutputSimd128Register(), tmp);
+ Register shift = i.InputRegister(1);
+ // Take shift value modulo 32.
+ __ andq(shift, Immediate(31));
+ __ Movq(tmp, shift);
+ __ Pslld(i.OutputSimd128Register(), tmp);
break;
}
case kX64I32x4ShrS: {
XMMRegister tmp = i.TempSimd128Register(0);
- __ movq(tmp, i.InputRegister(1));
- __ psrad(i.OutputSimd128Register(), tmp);
+ Register shift = i.InputRegister(1);
+ // Take shift value modulo 32.
+ __ andq(shift, Immediate(31));
+ __ Movq(tmp, shift);
+ __ Psrad(i.OutputSimd128Register(), tmp);
break;
}
case kX64I32x4Add: {
- __ paddd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Paddd(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I32x4AddHoriz: {
@@ -2908,45 +2978,45 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I32x4Sub: {
- __ psubd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Psubd(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I32x4Mul: {
CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pmulld(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Pmulld(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I32x4MinS: {
CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pminsd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Pminsd(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I32x4MaxS: {
CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pmaxsd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Pmaxsd(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I32x4Eq: {
- __ pcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Pcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I32x4Ne: {
XMMRegister tmp = i.TempSimd128Register(0);
- __ pcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(1));
- __ pcmpeqd(tmp, tmp);
- __ pxor(i.OutputSimd128Register(), tmp);
+ __ Pcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Pcmpeqd(tmp, tmp);
+ __ Pxor(i.OutputSimd128Register(), tmp);
break;
}
case kX64I32x4GtS: {
- __ pcmpgtd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Pcmpgtd(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I32x4GeS: {
CpuFeatureScope sse_scope(tasm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(1);
- __ pminsd(dst, src);
- __ pcmpeqd(dst, src);
+ __ Pminsd(dst, src);
+ __ Pcmpeqd(dst, src);
break;
}
case kX64I32x4UConvertF32x4: {
@@ -2992,18 +3062,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I32x4ShrU: {
XMMRegister tmp = i.TempSimd128Register(0);
- __ movq(tmp, i.InputRegister(1));
- __ psrld(i.OutputSimd128Register(), tmp);
+ Register shift = i.InputRegister(1);
+ // Take shift value modulo 32.
+ __ andq(shift, Immediate(31));
+ __ Movq(tmp, shift);
+ __ Psrld(i.OutputSimd128Register(), tmp);
break;
}
case kX64I32x4MinU: {
CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pminud(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Pminud(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I32x4MaxU: {
CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pmaxud(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Pmaxud(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I32x4GtU: {
@@ -3011,18 +3084,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(1);
XMMRegister tmp = i.TempSimd128Register(0);
- __ pmaxud(dst, src);
- __ pcmpeqd(dst, src);
- __ pcmpeqd(tmp, tmp);
- __ pxor(dst, tmp);
+ __ Pmaxud(dst, src);
+ __ Pcmpeqd(dst, src);
+ __ Pcmpeqd(tmp, tmp);
+ __ Pxor(dst, tmp);
break;
}
case kX64I32x4GeU: {
CpuFeatureScope sse_scope(tasm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(1);
- __ pminud(dst, src);
- __ pcmpeqd(dst, src);
+ __ Pminud(dst, src);
+ __ Pcmpeqd(dst, src);
break;
}
case kX64S128Zero: {
@@ -3044,17 +3117,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kX64I16x8ExtractLane: {
CpuFeatureScope sse_scope(tasm(), SSE4_1);
Register dst = i.OutputRegister();
- __ pextrw(dst, i.InputSimd128Register(0), i.InputInt8(1));
- __ movsxwl(dst, dst);
+ __ Pextrw(dst, i.InputSimd128Register(0), i.InputInt8(1));
break;
}
case kX64I16x8ReplaceLane: {
CpuFeatureScope sse_scope(tasm(), SSE4_1);
if (HasRegisterInput(instr, 2)) {
- __ pinsrw(i.OutputSimd128Register(), i.InputRegister(2),
+ __ Pinsrw(i.OutputSimd128Register(), i.InputRegister(2),
i.InputInt8(1));
} else {
- __ pinsrw(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
+ __ Pinsrw(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
}
break;
}
@@ -3085,13 +3157,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I16x8Shl: {
XMMRegister tmp = i.TempSimd128Register(0);
- __ movq(tmp, i.InputRegister(1));
+ Register shift = i.InputRegister(1);
+ // Take shift value modulo 16.
+ __ andq(shift, Immediate(15));
+ __ movq(tmp, shift);
__ psllw(i.OutputSimd128Register(), tmp);
break;
}
case kX64I16x8ShrS: {
XMMRegister tmp = i.TempSimd128Register(0);
- __ movq(tmp, i.InputRegister(1));
+ Register shift = i.InputRegister(1);
+ // Take shift value modulo 16.
+ __ andq(shift, Immediate(15));
+ __ movq(tmp, shift);
__ psraw(i.OutputSimd128Register(), tmp);
break;
}
@@ -3173,7 +3251,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I16x8ShrU: {
XMMRegister tmp = i.TempSimd128Register(0);
- __ movq(tmp, i.InputRegister(1));
+ Register shift = i.InputRegister(1);
+ // Take shift value modulo 16.
+ __ andq(shift, Immediate(15));
+ __ movq(tmp, shift);
__ psrlw(i.OutputSimd128Register(), tmp);
break;
}
@@ -3230,28 +3311,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CpuFeatureScope sse_scope(tasm(), SSSE3);
XMMRegister dst = i.OutputSimd128Register();
if (HasRegisterInput(instr, 0)) {
- __ movd(dst, i.InputRegister(0));
+ __ Movd(dst, i.InputRegister(0));
} else {
- __ movd(dst, i.InputOperand(0));
+ __ Movd(dst, i.InputOperand(0));
}
- __ xorps(kScratchDoubleReg, kScratchDoubleReg);
- __ pshufb(dst, kScratchDoubleReg);
+ __ Xorps(kScratchDoubleReg, kScratchDoubleReg);
+ __ Pshufb(dst, kScratchDoubleReg);
break;
}
case kX64I8x16ExtractLane: {
CpuFeatureScope sse_scope(tasm(), SSE4_1);
Register dst = i.OutputRegister();
- __ pextrb(dst, i.InputSimd128Register(0), i.InputInt8(1));
- __ movsxbl(dst, dst);
+ __ Pextrb(dst, i.InputSimd128Register(0), i.InputInt8(1));
break;
}
case kX64I8x16ReplaceLane: {
CpuFeatureScope sse_scope(tasm(), SSE4_1);
if (HasRegisterInput(instr, 2)) {
- __ pinsrb(i.OutputSimd128Register(), i.InputRegister(2),
+ __ Pinsrb(i.OutputSimd128Register(), i.InputRegister(2),
i.InputInt8(1));
} else {
- __ pinsrb(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
+ __ Pinsrb(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
}
break;
}
@@ -3279,15 +3359,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// Temp registers for shift mask andadditional moves to XMM registers.
Register tmp = i.ToRegister(instr->TempAt(0));
XMMRegister tmp_simd = i.TempSimd128Register(1);
+ Register shift = i.InputRegister(1);
// Mask off the unwanted bits before word-shifting.
__ pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
- __ movq(tmp, i.InputRegister(1));
+ // Take shift value modulo 8.
+ __ andq(shift, Immediate(7));
+ __ movq(tmp, shift);
__ addq(tmp, Immediate(8));
__ movq(tmp_simd, tmp);
__ psrlw(kScratchDoubleReg, tmp_simd);
__ packuswb(kScratchDoubleReg, kScratchDoubleReg);
__ pand(dst, kScratchDoubleReg);
- __ movq(tmp_simd, i.InputRegister(1));
+ __ movq(tmp_simd, shift);
__ psllw(dst, tmp_simd);
break;
}
@@ -3302,6 +3385,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ punpcklbw(dst, dst);
// Prepare shift value
__ movq(tmp, i.InputRegister(1));
+ // Take shift value modulo 8.
+ __ andq(tmp, Immediate(7));
__ addq(tmp, Immediate(8));
__ movq(tmp_simd, tmp);
__ psraw(kScratchDoubleReg, tmp_simd);
@@ -3414,6 +3499,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ punpcklbw(dst, dst);
// Prepare shift value
__ movq(tmp, i.InputRegister(1));
+ // Take shift value modulo 8.
+ __ andq(tmp, Immediate(7));
__ addq(tmp, Immediate(8));
__ movq(tmp_simd, tmp);
__ psrlw(kScratchDoubleReg, tmp_simd);
@@ -3422,7 +3509,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I8x16AddSaturateU: {
- __ paddusb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Paddusb(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I8x16SubSaturateU: {
@@ -3487,10 +3574,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kX64S128Select: {
// Mask used here is stored in dst.
XMMRegister dst = i.OutputSimd128Register();
- __ movaps(kScratchDoubleReg, i.InputSimd128Register(1));
- __ xorps(kScratchDoubleReg, i.InputSimd128Register(2));
- __ andps(dst, kScratchDoubleReg);
- __ xorps(dst, i.InputSimd128Register(2));
+ __ Movaps(kScratchDoubleReg, i.InputSimd128Register(1));
+ __ Xorps(kScratchDoubleReg, i.InputSimd128Register(2));
+ __ Andps(dst, kScratchDoubleReg);
+ __ Xorps(dst, i.InputSimd128Register(2));
+ break;
+ }
+ case kX64S8x16Swizzle: {
+ CpuFeatureScope sse_scope(tasm(), SSSE3);
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister mask = i.TempSimd128Register(0);
+
+ // Out-of-range indices should return 0, add 112 so that any value > 15
+ // saturates to 128 (top bit set), so pshufb will zero that lane.
+ __ Move(mask, static_cast<uint32_t>(0x70707070));
+ __ Pshufd(mask, mask, 0x0);
+ __ Paddusb(mask, i.InputSimd128Register(1));
+ __ Pshufb(dst, mask);
break;
}
case kX64S8x16Shuffle: {
@@ -3507,10 +3608,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
SetupShuffleMaskOnStack(tasm(), mask);
- __ pshufb(dst, Operand(rsp, 0));
+ __ Pshufb(dst, Operand(rsp, 0));
} else { // two input operands
DCHECK_EQ(6, instr->InputCount());
- ASSEMBLE_SIMD_INSTR(movups, kScratchDoubleReg, 0);
+ ASSEMBLE_SIMD_INSTR(Movups, kScratchDoubleReg, 0);
uint32_t mask[4] = {};
for (int j = 5; j > 1; j--) {
uint32_t lanes = i.InputUint32(j);
@@ -3520,13 +3621,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
}
SetupShuffleMaskOnStack(tasm(), mask);
- __ pshufb(kScratchDoubleReg, Operand(rsp, 0));
+ __ Pshufb(kScratchDoubleReg, Operand(rsp, 0));
uint32_t mask1[4] = {};
if (instr->InputAt(1)->IsSimd128Register()) {
XMMRegister src1 = i.InputSimd128Register(1);
if (src1 != dst) __ movups(dst, src1);
} else {
- __ movups(dst, i.InputOperand(1));
+ __ Movups(dst, i.InputOperand(1));
}
for (int j = 5; j > 1; j--) {
uint32_t lanes = i.InputUint32(j);
@@ -3536,8 +3637,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
}
SetupShuffleMaskOnStack(tasm(), mask1);
- __ pshufb(dst, Operand(rsp, 0));
- __ por(dst, kScratchDoubleReg);
+ __ Pshufb(dst, Operand(rsp, 0));
+ __ Por(dst, kScratchDoubleReg);
}
__ movq(rsp, tmp);
break;
diff --git a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
index 8a0a45a916..e390c6922c 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
@@ -160,6 +160,7 @@ namespace compiler {
V(X64F64x2ReplaceLane) \
V(X64F64x2Abs) \
V(X64F64x2Neg) \
+ V(X64F64x2Sqrt) \
V(X64F64x2Add) \
V(X64F64x2Sub) \
V(X64F64x2Mul) \
@@ -170,6 +171,8 @@ namespace compiler {
V(X64F64x2Ne) \
V(X64F64x2Lt) \
V(X64F64x2Le) \
+ V(X64F64x2Qfma) \
+ V(X64F64x2Qfms) \
V(X64F32x4Splat) \
V(X64F32x4ExtractLane) \
V(X64F32x4ReplaceLane) \
@@ -177,6 +180,7 @@ namespace compiler {
V(X64F32x4UConvertI32x4) \
V(X64F32x4Abs) \
V(X64F32x4Neg) \
+ V(X64F32x4Sqrt) \
V(X64F32x4RecipApprox) \
V(X64F32x4RecipSqrtApprox) \
V(X64F32x4Add) \
@@ -190,6 +194,8 @@ namespace compiler {
V(X64F32x4Ne) \
V(X64F32x4Lt) \
V(X64F32x4Le) \
+ V(X64F32x4Qfma) \
+ V(X64F32x4Qfms) \
V(X64I64x2Splat) \
V(X64I64x2ExtractLane) \
V(X64I64x2ReplaceLane) \
@@ -300,6 +306,7 @@ namespace compiler {
V(X64S128Or) \
V(X64S128Xor) \
V(X64S128Select) \
+ V(X64S8x16Swizzle) \
V(X64S8x16Shuffle) \
V(X64S32x4Swizzle) \
V(X64S32x4Shuffle) \
diff --git a/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
index e9fa450c38..28a935fd91 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
@@ -129,6 +129,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64F64x2ReplaceLane:
case kX64F64x2Abs:
case kX64F64x2Neg:
+ case kX64F64x2Sqrt:
case kX64F64x2Add:
case kX64F64x2Sub:
case kX64F64x2Mul:
@@ -139,6 +140,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64F64x2Ne:
case kX64F64x2Lt:
case kX64F64x2Le:
+ case kX64F64x2Qfma:
+ case kX64F64x2Qfms:
case kX64F32x4Splat:
case kX64F32x4ExtractLane:
case kX64F32x4ReplaceLane:
@@ -148,6 +151,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64F32x4RecipSqrtApprox:
case kX64F32x4Abs:
case kX64F32x4Neg:
+ case kX64F32x4Sqrt:
case kX64F32x4Add:
case kX64F32x4AddHoriz:
case kX64F32x4Sub:
@@ -159,6 +163,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64F32x4Ne:
case kX64F32x4Lt:
case kX64F32x4Le:
+ case kX64F32x4Qfma:
+ case kX64F32x4Qfms:
case kX64I64x2Splat:
case kX64I64x2ExtractLane:
case kX64I64x2ReplaceLane:
@@ -275,6 +281,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64S1x4AllTrue:
case kX64S1x8AnyTrue:
case kX64S1x8AllTrue:
+ case kX64S8x16Swizzle:
case kX64S8x16Shuffle:
case kX64S32x4Swizzle:
case kX64S32x4Shuffle:
diff --git a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
index 5379074bac..f5d05fdd85 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
@@ -4,7 +4,7 @@
#include <algorithm>
-#include "src/base/adapters.h"
+#include "src/base/iterator.h"
#include "src/base/overflowing-math.h"
#include "src/compiler/backend/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
@@ -250,9 +250,21 @@ ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) {
#else
UNREACHABLE();
#endif
+#ifdef V8_COMPRESS_POINTERS
+ case MachineRepresentation::kTaggedSigned:
+ opcode = kX64MovqDecompressTaggedSigned;
+ break;
+ case MachineRepresentation::kTaggedPointer:
+ opcode = kX64MovqDecompressTaggedPointer;
+ break;
+ case MachineRepresentation::kTagged:
+ opcode = kX64MovqDecompressAnyTagged;
+ break;
+#else
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
+#endif
case MachineRepresentation::kWord64:
opcode = kX64Movq;
break;
@@ -288,7 +300,8 @@ ArchOpcode GetStoreOpcode(StoreRepresentation store_rep) {
#endif
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kTagged:
+ return kX64MovqCompressTagged;
case MachineRepresentation::kWord64:
return kX64Movq;
case MachineRepresentation::kSimd128: // Fall through.
@@ -875,6 +888,10 @@ void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
Emit(kX64Bswap32, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)));
}
+void InstructionSelector::VisitSimd128ReverseBytes(Node* node) {
+ UNREACHABLE();
+}
+
void InstructionSelector::VisitInt32Add(Node* node) {
X64OperandGenerator g(this);
@@ -1843,17 +1860,15 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
node->op()->HasProperty(Operator::kCommutative));
}
-// Shared routine for 64-bit word comparison operations.
-void VisitWord64Compare(InstructionSelector* selector, Node* node,
- FlagsContinuation* cont) {
- X64OperandGenerator g(selector);
+void VisitWord64EqualImpl(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
if (selector->CanUseRootsRegister()) {
+ X64OperandGenerator g(selector);
const RootsTable& roots_table = selector->isolate()->roots_table();
RootIndex root_index;
HeapObjectBinopMatcher m(node);
if (m.right().HasValue() &&
roots_table.IsRootHandle(m.right().Value(), &root_index)) {
- if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
InstructionCode opcode =
kX64Cmp | AddressingModeField::encode(kMode_Root);
return VisitCompare(
@@ -1861,18 +1876,30 @@ void VisitWord64Compare(InstructionSelector* selector, Node* node,
g.TempImmediate(
TurboAssemblerBase::RootRegisterOffsetForRootIndex(root_index)),
g.UseRegister(m.left().node()), cont);
- } else if (m.left().HasValue() &&
- roots_table.IsRootHandle(m.left().Value(), &root_index)) {
+ }
+ }
+ VisitWordCompare(selector, node, kX64Cmp, cont);
+}
+
+void VisitWord32EqualImpl(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ if (COMPRESS_POINTERS_BOOL && selector->CanUseRootsRegister()) {
+ X64OperandGenerator g(selector);
+ const RootsTable& roots_table = selector->isolate()->roots_table();
+ RootIndex root_index;
+ CompressedHeapObjectBinopMatcher m(node);
+ if (m.right().HasValue() &&
+ roots_table.IsRootHandle(m.right().Value(), &root_index)) {
InstructionCode opcode =
- kX64Cmp | AddressingModeField::encode(kMode_Root);
+ kX64Cmp32 | AddressingModeField::encode(kMode_Root);
return VisitCompare(
selector, opcode,
g.TempImmediate(
TurboAssemblerBase::RootRegisterOffsetForRootIndex(root_index)),
- g.UseRegister(m.right().node()), cont);
+ g.UseRegister(m.left().node()), cont);
}
}
- VisitWordCompare(selector, node, kX64Cmp, cont);
+ VisitWordCompare(selector, node, kX64Cmp32, cont);
}
// Shared routine for comparison with zero.
@@ -2048,7 +2075,7 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
switch (value->opcode()) {
case IrOpcode::kWord32Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitWordCompare(this, value, kX64Cmp32, cont);
+ return VisitWord32EqualImpl(this, value, cont);
case IrOpcode::kInt32LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
return VisitWordCompare(this, value, kX64Cmp32, cont);
@@ -2071,7 +2098,7 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
if (CanCover(user, value)) {
switch (value->opcode()) {
case IrOpcode::kInt64Sub:
- return VisitWord64Compare(this, value, cont);
+ return VisitWordCompare(this, value, kX64Cmp, cont);
case IrOpcode::kWord64And:
return VisitWordCompare(this, value, kX64Test, cont);
default:
@@ -2080,20 +2107,20 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
}
return VisitCompareZero(this, user, value, kX64Cmp, cont);
}
- return VisitWord64Compare(this, value, cont);
+ return VisitWord64EqualImpl(this, value, cont);
}
case IrOpcode::kInt64LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
- return VisitWord64Compare(this, value, cont);
+ return VisitWordCompare(this, value, kX64Cmp, cont);
case IrOpcode::kInt64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
- return VisitWord64Compare(this, value, cont);
+ return VisitWordCompare(this, value, kX64Cmp, cont);
case IrOpcode::kUint64LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitWord64Compare(this, value, cont);
+ return VisitWordCompare(this, value, kX64Cmp, cont);
case IrOpcode::kUint64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitWord64Compare(this, value, cont);
+ return VisitWordCompare(this, value, kX64Cmp, cont);
case IrOpcode::kFloat32Equal:
cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
return VisitFloat32Compare(this, value, cont);
@@ -2221,7 +2248,7 @@ void InstructionSelector::VisitWord32Equal(Node* const node) {
if (m.right().Is(0)) {
return VisitWordCompareZero(m.node(), m.left().node(), &cont);
}
- VisitWordCompare(this, node, kX64Cmp32, &cont);
+ VisitWord32EqualImpl(this, node, &cont);
}
void InstructionSelector::VisitInt32LessThan(Node* node) {
@@ -2246,7 +2273,7 @@ void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
VisitWordCompare(this, node, kX64Cmp32, &cont);
}
-void InstructionSelector::VisitWord64Equal(Node* const node) {
+void InstructionSelector::VisitWord64Equal(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
Int64BinopMatcher m(node);
if (m.right().Is(0)) {
@@ -2256,7 +2283,7 @@ void InstructionSelector::VisitWord64Equal(Node* const node) {
if (CanCover(user, value)) {
switch (value->opcode()) {
case IrOpcode::kInt64Sub:
- return VisitWord64Compare(this, value, &cont);
+ return VisitWordCompare(this, value, kX64Cmp, &cont);
case IrOpcode::kWord64And:
return VisitWordCompare(this, value, kX64Test, &cont);
default:
@@ -2264,7 +2291,7 @@ void InstructionSelector::VisitWord64Equal(Node* const node) {
}
}
}
- VisitWord64Compare(this, node, &cont);
+ VisitWord64EqualImpl(this, node, &cont);
}
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
@@ -2287,24 +2314,24 @@ void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
void InstructionSelector::VisitInt64LessThan(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
- VisitWord64Compare(this, node, &cont);
+ VisitWordCompare(this, node, kX64Cmp, &cont);
}
void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
- VisitWord64Compare(this, node, &cont);
+ VisitWordCompare(this, node, kX64Cmp, &cont);
}
void InstructionSelector::VisitUint64LessThan(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
- VisitWord64Compare(this, node, &cont);
+ VisitWordCompare(this, node, kX64Cmp, &cont);
}
void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
- VisitWord64Compare(this, node, &cont);
+ VisitWordCompare(this, node, kX64Cmp, &cont);
}
void InstructionSelector::VisitFloat32Equal(Node* node) {
@@ -2685,9 +2712,11 @@ VISIT_ATOMIC_BINOP(Xor)
V(I8x16GtU)
#define SIMD_UNOP_LIST(V) \
+ V(F64x2Sqrt) \
V(F32x4SConvertI32x4) \
V(F32x4Abs) \
V(F32x4Neg) \
+ V(F32x4Sqrt) \
V(F32x4RecipApprox) \
V(F32x4RecipSqrtApprox) \
V(I64x2Neg) \
@@ -2872,6 +2901,27 @@ void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
g.UseRegister(node->InputAt(0)));
}
+#define VISIT_SIMD_QFMOP(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ X64OperandGenerator g(this); \
+ if (CpuFeatures::IsSupported(FMA3)) { \
+ Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), \
+ g.UseRegister(node->InputAt(2))); \
+ } else { \
+ InstructionOperand temps[] = {g.TempSimd128Register()}; \
+ Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
+ g.UseUniqueRegister(node->InputAt(0)), \
+ g.UseUniqueRegister(node->InputAt(1)), \
+ g.UseRegister(node->InputAt(2)), arraysize(temps), temps); \
+ } \
+ }
+VISIT_SIMD_QFMOP(F64x2Qfma)
+VISIT_SIMD_QFMOP(F64x2Qfms)
+VISIT_SIMD_QFMOP(F32x4Qfma)
+VISIT_SIMD_QFMOP(F32x4Qfms)
+#undef VISIT_SIMD_QFMOP
+
void InstructionSelector::VisitI64x2ShrS(Node* node) {
X64OperandGenerator g(this);
InstructionOperand temps[] = {g.TempRegister()};
@@ -2893,10 +2943,10 @@ void InstructionSelector::VisitI64x2Mul(Node* node) {
void InstructionSelector::VisitI64x2MinS(Node* node) {
X64OperandGenerator g(this);
if (this->IsSupported(SSE4_2)) {
- InstructionOperand temps[] = {g.TempSimd128Register()};
- Emit(kX64I64x2MinS, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)), g.UseFixed(node->InputAt(1), xmm0),
- arraysize(temps), temps);
+ InstructionOperand temps[] = {g.TempFpRegister(xmm0)};
+ Emit(kX64I64x2MinS, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
} else {
InstructionOperand temps[] = {g.TempSimd128Register(), g.TempRegister(),
g.TempRegister()};
@@ -2908,27 +2958,27 @@ void InstructionSelector::VisitI64x2MinS(Node* node) {
void InstructionSelector::VisitI64x2MaxS(Node* node) {
X64OperandGenerator g(this);
- InstructionOperand temps[] = {g.TempSimd128Register()};
+ InstructionOperand temps[] = {g.TempFpRegister(xmm0)};
Emit(kX64I64x2MaxS, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)), g.UseFixed(node->InputAt(1), xmm0),
+ g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
arraysize(temps), temps);
}
void InstructionSelector::VisitI64x2MinU(Node* node) {
X64OperandGenerator g(this);
InstructionOperand temps[] = {g.TempSimd128Register(),
- g.TempSimd128Register()};
- Emit(kX64I64x2MinU, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)), g.UseFixed(node->InputAt(1), xmm0),
- arraysize(temps), temps);
+ g.TempFpRegister(xmm0)};
+ Emit(kX64I64x2MinU, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
}
void InstructionSelector::VisitI64x2MaxU(Node* node) {
X64OperandGenerator g(this);
InstructionOperand temps[] = {g.TempSimd128Register(),
- g.TempSimd128Register()};
+ g.TempFpRegister(xmm0)};
Emit(kX64I64x2MaxU, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)), g.UseFixed(node->InputAt(1), xmm0),
+ g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
arraysize(temps), temps);
}
@@ -3256,6 +3306,14 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) {
Emit(opcode, 1, &dst, input_count, inputs, temp_count, temps);
}
+void InstructionSelector::VisitS8x16Swizzle(Node* node) {
+ X64OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempSimd128Register()};
+ Emit(kX64S8x16Swizzle, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
+ arraysize(temps), temps);
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index b1051be571..17472a305d 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -24,7 +24,7 @@
#include "src/objects/literal-objects-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
-#include "src/objects/template-objects-inl.h"
+#include "src/objects/template-objects.h"
namespace v8 {
namespace internal {
@@ -215,6 +215,9 @@ class BytecodeGraphBuilder {
FeedbackSlot slot);
JSTypeHintLowering::LoweringResult TryBuildSimplifiedConstruct(
const Operator* op, Node* const* args, int arg_count, FeedbackSlot slot);
+ JSTypeHintLowering::LoweringResult TryBuildSimplifiedGetIterator(
+ const Operator* op, Node* receiver, FeedbackSlot load_slot,
+ FeedbackSlot call_slot);
JSTypeHintLowering::LoweringResult TryBuildSimplifiedLoadNamed(
const Operator* op, Node* receiver, FeedbackSlot slot);
JSTypeHintLowering::LoweringResult TryBuildSimplifiedLoadKeyed(
@@ -945,7 +948,7 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
bytecode_array().parameter_count(), bytecode_array().register_count(),
shared_info.object())),
bytecode_iterator_(
- base::make_unique<OffHeapBytecodeArray>(bytecode_array())),
+ std::make_unique<OffHeapBytecodeArray>(bytecode_array())),
bytecode_analysis_(broker_->GetBytecodeAnalysis(
bytecode_array().object(), osr_offset,
flags & BytecodeGraphBuilderFlag::kAnalyzeEnvironmentLiveness,
@@ -971,12 +974,12 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
if (FLAG_concurrent_inlining) {
// With concurrent inlining on, the source position address doesn't change
// because it's been copied from the heap.
- source_position_iterator_ = base::make_unique<SourcePositionTableIterator>(
+ source_position_iterator_ = std::make_unique<SourcePositionTableIterator>(
Vector<const byte>(bytecode_array().source_positions_address(),
bytecode_array().source_positions_size()));
} else {
// Otherwise, we need to access the table through a handle.
- source_position_iterator_ = base::make_unique<SourcePositionTableIterator>(
+ source_position_iterator_ = std::make_unique<SourcePositionTableIterator>(
handle(bytecode_array().object()->SourcePositionTableIfCollected(),
isolate()));
}
@@ -2087,12 +2090,13 @@ void BytecodeGraphBuilder::VisitCloneObject() {
void BytecodeGraphBuilder::VisitGetTemplateObject() {
DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
- FeedbackSlot slot = bytecode_iterator().GetSlotOperand(1);
- ObjectRef description(
+ FeedbackSource source =
+ CreateFeedbackSource(bytecode_iterator().GetIndexOperand(1));
+ TemplateObjectDescriptionRef description(
broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
- JSArrayRef template_object =
- shared_info().GetTemplateObject(description, feedback_vector(), slot);
- environment()->BindAccumulator(jsgraph()->Constant(template_object));
+ Node* template_object = NewNode(javascript()->GetTemplateObject(
+ description.object(), shared_info().object(), source));
+ environment()->BindAccumulator(template_object);
}
Node* const* BytecodeGraphBuilder::GetCallArgumentsFromRegisters(
@@ -3297,19 +3301,21 @@ void BytecodeGraphBuilder::VisitForInStep() {
void BytecodeGraphBuilder::VisitGetIterator() {
PrepareEagerCheckpoint();
- Node* object =
+ Node* receiver =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- FeedbackSource feedback =
+ FeedbackSource load_feedback =
CreateFeedbackSource(bytecode_iterator().GetIndexOperand(1));
- const Operator* op = javascript()->GetIterator(feedback);
+ FeedbackSource call_feedback =
+ CreateFeedbackSource(bytecode_iterator().GetIndexOperand(2));
+ const Operator* op = javascript()->GetIterator(load_feedback, call_feedback);
- JSTypeHintLowering::LoweringResult lowering =
- TryBuildSimplifiedLoadNamed(op, object, feedback.slot);
+ JSTypeHintLowering::LoweringResult lowering = TryBuildSimplifiedGetIterator(
+ op, receiver, load_feedback.slot, call_feedback.slot);
if (lowering.IsExit()) return;
DCHECK(!lowering.Changed());
- Node* node = NewNode(op, object);
- environment()->BindAccumulator(node, Environment::kAttachFrameState);
+ Node* iterator = NewNode(op, receiver);
+ environment()->BindAccumulator(iterator, Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::VisitSuspendGenerator() {
@@ -3776,6 +3782,20 @@ BytecodeGraphBuilder::TryBuildSimplifiedConstruct(const Operator* op,
}
JSTypeHintLowering::LoweringResult
+BytecodeGraphBuilder::TryBuildSimplifiedGetIterator(const Operator* op,
+ Node* receiver,
+ FeedbackSlot load_slot,
+ FeedbackSlot call_slot) {
+ Node* effect = environment()->GetEffectDependency();
+ Node* control = environment()->GetControlDependency();
+ JSTypeHintLowering::LoweringResult early_reduction =
+ type_hint_lowering().ReduceGetIteratorOperation(
+ op, receiver, effect, control, load_slot, call_slot);
+ ApplyEarlyReduction(early_reduction);
+ return early_reduction;
+}
+
+JSTypeHintLowering::LoweringResult
BytecodeGraphBuilder::TryBuildSimplifiedLoadNamed(const Operator* op,
Node* receiver,
FeedbackSlot slot) {
diff --git a/deps/v8/src/compiler/c-linkage.cc b/deps/v8/src/compiler/c-linkage.cc
index 428ba058a7..4c576b771a 100644
--- a/deps/v8/src/compiler/c-linkage.cc
+++ b/deps/v8/src/compiler/c-linkage.cc
@@ -27,7 +27,7 @@ namespace {
// == x64 ====================================================================
// ===========================================================================
-#ifdef _WIN64
+#ifdef V8_TARGET_OS_WIN
// == x64 windows ============================================================
#define STACK_SHADOW_WORDS 4
#define PARAM_REGISTERS rcx, rdx, r8, r9
@@ -39,12 +39,12 @@ namespace {
(1 << xmm9.code()) | (1 << xmm10.code()) | (1 << xmm11.code()) | \
(1 << xmm12.code()) | (1 << xmm13.code()) | (1 << xmm14.code()) | \
(1 << xmm15.code())
-#else
+#else // V8_TARGET_OS_WIN
// == x64 other ==============================================================
#define PARAM_REGISTERS rdi, rsi, rdx, rcx, r8, r9
#define CALLEE_SAVE_REGISTERS \
rbx.bit() | r12.bit() | r13.bit() | r14.bit() | r15.bit()
-#endif
+#endif // V8_TARGET_OS_WIN
#elif V8_TARGET_ARCH_ARM
// ===========================================================================
diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc
index 4f18011463..5b89e1b663 100644
--- a/deps/v8/src/compiler/code-assembler.cc
+++ b/deps/v8/src/compiler/code-assembler.cc
@@ -29,6 +29,7 @@ namespace internal {
constexpr MachineType MachineTypeOf<Smi>::value;
constexpr MachineType MachineTypeOf<Object>::value;
+constexpr MachineType MachineTypeOf<MaybeObject>::value;
namespace compiler {
@@ -1349,8 +1350,8 @@ void CodeAssembler::TailCallStubImpl(const CallInterfaceDescriptor& descriptor,
Node* CodeAssembler::CallStubRImpl(StubCallMode call_mode,
const CallInterfaceDescriptor& descriptor,
- size_t result_size, Node* target,
- SloppyTNode<Object> context,
+ size_t result_size, TNode<Object> target,
+ TNode<Object> context,
std::initializer_list<Node*> args) {
DCHECK(call_mode == StubCallMode::kCallCodeObject ||
call_mode == StubCallMode::kCallBuiltinPointer);
@@ -1369,7 +1370,7 @@ Node* CodeAssembler::CallStubRImpl(StubCallMode call_mode,
inputs.data());
}
-Node* CodeAssembler::TailCallStubThenBytecodeDispatchImpl(
+void CodeAssembler::TailCallStubThenBytecodeDispatchImpl(
const CallInterfaceDescriptor& descriptor, Node* target, Node* context,
std::initializer_list<Node*> args) {
constexpr size_t kMaxNumArgs = 6;
@@ -1389,33 +1390,33 @@ Node* CodeAssembler::TailCallStubThenBytecodeDispatchImpl(
for (auto arg : args) inputs.Add(arg);
inputs.Add(context);
- return raw_assembler()->TailCallN(call_descriptor, inputs.size(),
- inputs.data());
+ raw_assembler()->TailCallN(call_descriptor, inputs.size(), inputs.data());
}
template <class... TArgs>
-Node* CodeAssembler::TailCallBytecodeDispatch(
- const CallInterfaceDescriptor& descriptor, Node* target, TArgs... args) {
+void CodeAssembler::TailCallBytecodeDispatch(
+ const CallInterfaceDescriptor& descriptor, TNode<RawPtrT> target,
+ TArgs... args) {
DCHECK_EQ(descriptor.GetParameterCount(), sizeof...(args));
auto call_descriptor = Linkage::GetBytecodeDispatchCallDescriptor(
zone(), descriptor, descriptor.GetStackParameterCount());
Node* nodes[] = {target, args...};
CHECK_EQ(descriptor.GetParameterCount() + 1, arraysize(nodes));
- return raw_assembler()->TailCallN(call_descriptor, arraysize(nodes), nodes);
+ raw_assembler()->TailCallN(call_descriptor, arraysize(nodes), nodes);
}
// Instantiate TailCallBytecodeDispatch() for argument counts used by
// CSA-generated code
-template V8_EXPORT_PRIVATE Node* CodeAssembler::TailCallBytecodeDispatch(
- const CallInterfaceDescriptor& descriptor, Node* target, Node*, Node*,
- Node*, Node*);
-
-TNode<Object> CodeAssembler::TailCallJSCode(TNode<Code> code,
- TNode<Context> context,
- TNode<JSFunction> function,
- TNode<Object> new_target,
- TNode<Int32T> arg_count) {
+template V8_EXPORT_PRIVATE void CodeAssembler::TailCallBytecodeDispatch(
+ const CallInterfaceDescriptor& descriptor, TNode<RawPtrT> target,
+ TNode<Object>, TNode<IntPtrT>, TNode<BytecodeArray>,
+ TNode<ExternalReference>);
+
+void CodeAssembler::TailCallJSCode(TNode<Code> code, TNode<Context> context,
+ TNode<JSFunction> function,
+ TNode<Object> new_target,
+ TNode<Int32T> arg_count) {
JSTrampolineDescriptor descriptor;
auto call_descriptor = Linkage::GetStubCallDescriptor(
zone(), descriptor, descriptor.GetStackParameterCount(),
@@ -1423,8 +1424,7 @@ TNode<Object> CodeAssembler::TailCallJSCode(TNode<Code> code,
Node* nodes[] = {code, function, new_target, arg_count, context};
CHECK_EQ(descriptor.GetParameterCount() + 2, arraysize(nodes));
- return UncheckedCast<Object>(
- raw_assembler()->TailCallN(call_descriptor, arraysize(nodes), nodes));
+ raw_assembler()->TailCallN(call_descriptor, arraysize(nodes), nodes);
}
Node* CodeAssembler::CallCFunctionN(Signature<MachineType>* signature,
@@ -1914,7 +1914,7 @@ CodeAssemblerScopedExceptionHandler::CodeAssemblerScopedExceptionHandler(
compatibility_label_(label),
exception_(exception) {
if (has_handler_) {
- label_ = base::make_unique<CodeAssemblerExceptionHandlerLabel>(
+ label_ = std::make_unique<CodeAssemblerExceptionHandlerLabel>(
assembler, CodeAssemblerLabel::kDeferred);
assembler_->state()->PushExceptionHandler(label_.get());
}
diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h
index c9adb1601d..036b00b14d 100644
--- a/deps/v8/src/compiler/code-assembler.h
+++ b/deps/v8/src/compiler/code-assembler.h
@@ -17,6 +17,7 @@
#include "src/codegen/code-factory.h"
#include "src/codegen/machine-type.h"
#include "src/codegen/source-position.h"
+#include "src/codegen/tnode.h"
#include "src/heap/heap.h"
#include "src/objects/arguments.h"
#include "src/objects/data-handler.h"
@@ -79,210 +80,6 @@ TORQUE_STRUCT_LIST_GENERATOR(MAKE_FORWARD_DECLARATION, UNUSED)
template <typename T>
class Signature;
-struct UntaggedT {};
-
-struct IntegralT : UntaggedT {};
-
-struct WordT : IntegralT {
- static const MachineRepresentation kMachineRepresentation =
- (kSystemPointerSize == 4) ? MachineRepresentation::kWord32
- : MachineRepresentation::kWord64;
-};
-
-struct RawPtrT : WordT {
- static constexpr MachineType kMachineType = MachineType::Pointer();
-};
-
-template <class To>
-struct RawPtr : RawPtrT {};
-
-struct Word32T : IntegralT {
- static const MachineRepresentation kMachineRepresentation =
- MachineRepresentation::kWord32;
-};
-struct Int32T : Word32T {
- static constexpr MachineType kMachineType = MachineType::Int32();
-};
-struct Uint32T : Word32T {
- static constexpr MachineType kMachineType = MachineType::Uint32();
-};
-struct Int16T : Int32T {
- static constexpr MachineType kMachineType = MachineType::Int16();
-};
-struct Uint16T : Uint32T, Int32T {
- static constexpr MachineType kMachineType = MachineType::Uint16();
-};
-struct Int8T : Int16T {
- static constexpr MachineType kMachineType = MachineType::Int8();
-};
-struct Uint8T : Uint16T, Int16T {
- static constexpr MachineType kMachineType = MachineType::Uint8();
-};
-
-struct Word64T : IntegralT {
- static const MachineRepresentation kMachineRepresentation =
- MachineRepresentation::kWord64;
-};
-struct Int64T : Word64T {
- static constexpr MachineType kMachineType = MachineType::Int64();
-};
-struct Uint64T : Word64T {
- static constexpr MachineType kMachineType = MachineType::Uint64();
-};
-
-struct IntPtrT : WordT {
- static constexpr MachineType kMachineType = MachineType::IntPtr();
-};
-struct UintPtrT : WordT {
- static constexpr MachineType kMachineType = MachineType::UintPtr();
-};
-
-struct Float32T : UntaggedT {
- static const MachineRepresentation kMachineRepresentation =
- MachineRepresentation::kFloat32;
- static constexpr MachineType kMachineType = MachineType::Float32();
-};
-
-struct Float64T : UntaggedT {
- static const MachineRepresentation kMachineRepresentation =
- MachineRepresentation::kFloat64;
- static constexpr MachineType kMachineType = MachineType::Float64();
-};
-
-#ifdef V8_COMPRESS_POINTERS
-using TaggedT = Int32T;
-#else
-using TaggedT = IntPtrT;
-#endif
-
-// Result of a comparison operation.
-struct BoolT : Word32T {};
-
-// Value type of a Turbofan node with two results.
-template <class T1, class T2>
-struct PairT {};
-
-inline constexpr MachineType CommonMachineType(MachineType type1,
- MachineType type2) {
- return (type1 == type2) ? type1
- : ((type1.IsTagged() && type2.IsTagged())
- ? MachineType::AnyTagged()
- : MachineType::None());
-}
-
-template <class Type, class Enable = void>
-struct MachineTypeOf {
- static constexpr MachineType value = Type::kMachineType;
-};
-
-template <class Type, class Enable>
-constexpr MachineType MachineTypeOf<Type, Enable>::value;
-
-template <>
-struct MachineTypeOf<Object> {
- static constexpr MachineType value = MachineType::AnyTagged();
-};
-template <>
-struct MachineTypeOf<MaybeObject> {
- static constexpr MachineType value = MachineType::AnyTagged();
-};
-template <>
-struct MachineTypeOf<Smi> {
- static constexpr MachineType value = MachineType::TaggedSigned();
-};
-template <class HeapObjectSubtype>
-struct MachineTypeOf<HeapObjectSubtype,
- typename std::enable_if<std::is_base_of<
- HeapObject, HeapObjectSubtype>::value>::type> {
- static constexpr MachineType value = MachineType::TaggedPointer();
-};
-
-template <class HeapObjectSubtype>
-constexpr MachineType MachineTypeOf<
- HeapObjectSubtype, typename std::enable_if<std::is_base_of<
- HeapObject, HeapObjectSubtype>::value>::type>::value;
-
-template <class Type, class Enable = void>
-struct MachineRepresentationOf {
- static const MachineRepresentation value = Type::kMachineRepresentation;
-};
-template <class T>
-struct MachineRepresentationOf<
- T, typename std::enable_if<std::is_base_of<Object, T>::value>::type> {
- static const MachineRepresentation value =
- MachineTypeOf<T>::value.representation();
-};
-template <class T>
-struct MachineRepresentationOf<
- T, typename std::enable_if<std::is_base_of<MaybeObject, T>::value>::type> {
- static const MachineRepresentation value =
- MachineTypeOf<T>::value.representation();
-};
-
-template <class T>
-struct is_valid_type_tag {
- static const bool value = std::is_base_of<Object, T>::value ||
- std::is_base_of<UntaggedT, T>::value ||
- std::is_base_of<MaybeObject, T>::value ||
- std::is_same<ExternalReference, T>::value;
- static const bool is_tagged = std::is_base_of<Object, T>::value ||
- std::is_base_of<MaybeObject, T>::value;
-};
-
-template <class T1, class T2>
-struct is_valid_type_tag<PairT<T1, T2>> {
- static const bool value =
- is_valid_type_tag<T1>::value && is_valid_type_tag<T2>::value;
- static const bool is_tagged = false;
-};
-
-template <class T1, class T2>
-struct UnionT;
-
-template <class T1, class T2>
-struct is_valid_type_tag<UnionT<T1, T2>> {
- static const bool is_tagged =
- is_valid_type_tag<T1>::is_tagged && is_valid_type_tag<T2>::is_tagged;
- static const bool value = is_tagged;
-};
-
-template <class T1, class T2>
-struct UnionT {
- static constexpr MachineType kMachineType =
- CommonMachineType(MachineTypeOf<T1>::value, MachineTypeOf<T2>::value);
- static const MachineRepresentation kMachineRepresentation =
- kMachineType.representation();
- static_assert(kMachineRepresentation != MachineRepresentation::kNone,
- "no common representation");
- static_assert(is_valid_type_tag<T1>::is_tagged &&
- is_valid_type_tag<T2>::is_tagged,
- "union types are only possible for tagged values");
-};
-
-using Number = UnionT<Smi, HeapNumber>;
-using Numeric = UnionT<Number, BigInt>;
-
-// A pointer to a builtin function, used by Torque's function pointers.
-using BuiltinPtr = Smi;
-
-class int31_t {
- public:
- int31_t() : value_(0) {}
- int31_t(int value) : value_(value) { // NOLINT(runtime/explicit)
- DCHECK_EQ((value & 0x80000000) != 0, (value & 0x40000000) != 0);
- }
- int31_t& operator=(int value) {
- DCHECK_EQ((value & 0x80000000) != 0, (value & 0x40000000) != 0);
- value_ = value;
- return *this;
- }
- int32_t value() const { return value_; }
- operator int32_t() const { return value_; }
-
- private:
- int32_t value_;
-};
-
#define ENUM_ELEMENT(Name) k##Name,
#define ENUM_STRUCT_ELEMENT(NAME, Name, name) k##Name,
enum class ObjectType {
@@ -334,6 +131,7 @@ class Undetectable;
class UniqueName;
class WasmCapiFunctionData;
class WasmExceptionObject;
+class WasmExceptionPackage;
class WasmExceptionTag;
class WasmExportedFunctionData;
class WasmGlobalObject;
@@ -396,143 +194,6 @@ using CodeAssemblerVariableList = ZoneVector<CodeAssemblerVariable*>;
using CodeAssemblerCallback = std::function<void()>;
-template <class T, class U>
-struct is_subtype {
- static const bool value = std::is_base_of<U, T>::value;
-};
-template <class T1, class T2, class U>
-struct is_subtype<UnionT<T1, T2>, U> {
- static const bool value =
- is_subtype<T1, U>::value && is_subtype<T2, U>::value;
-};
-template <class T, class U1, class U2>
-struct is_subtype<T, UnionT<U1, U2>> {
- static const bool value =
- is_subtype<T, U1>::value || is_subtype<T, U2>::value;
-};
-template <class T1, class T2, class U1, class U2>
-struct is_subtype<UnionT<T1, T2>, UnionT<U1, U2>> {
- static const bool value =
- (is_subtype<T1, U1>::value || is_subtype<T1, U2>::value) &&
- (is_subtype<T2, U1>::value || is_subtype<T2, U2>::value);
-};
-
-template <class T, class U>
-struct types_have_common_values {
- static const bool value = is_subtype<T, U>::value || is_subtype<U, T>::value;
-};
-template <class U>
-struct types_have_common_values<BoolT, U> {
- static const bool value = types_have_common_values<Word32T, U>::value;
-};
-template <class U>
-struct types_have_common_values<Uint32T, U> {
- static const bool value = types_have_common_values<Word32T, U>::value;
-};
-template <class U>
-struct types_have_common_values<Int32T, U> {
- static const bool value = types_have_common_values<Word32T, U>::value;
-};
-template <class U>
-struct types_have_common_values<Uint64T, U> {
- static const bool value = types_have_common_values<Word64T, U>::value;
-};
-template <class U>
-struct types_have_common_values<Int64T, U> {
- static const bool value = types_have_common_values<Word64T, U>::value;
-};
-template <class U>
-struct types_have_common_values<IntPtrT, U> {
- static const bool value = types_have_common_values<WordT, U>::value;
-};
-template <class U>
-struct types_have_common_values<UintPtrT, U> {
- static const bool value = types_have_common_values<WordT, U>::value;
-};
-template <class T1, class T2, class U>
-struct types_have_common_values<UnionT<T1, T2>, U> {
- static const bool value = types_have_common_values<T1, U>::value ||
- types_have_common_values<T2, U>::value;
-};
-
-template <class T, class U1, class U2>
-struct types_have_common_values<T, UnionT<U1, U2>> {
- static const bool value = types_have_common_values<T, U1>::value ||
- types_have_common_values<T, U2>::value;
-};
-template <class T1, class T2, class U1, class U2>
-struct types_have_common_values<UnionT<T1, T2>, UnionT<U1, U2>> {
- static const bool value = types_have_common_values<T1, U1>::value ||
- types_have_common_values<T1, U2>::value ||
- types_have_common_values<T2, U1>::value ||
- types_have_common_values<T2, U2>::value;
-};
-
-template <class T>
-struct types_have_common_values<T, MaybeObject> {
- static const bool value = types_have_common_values<T, Object>::value;
-};
-
-template <class T>
-struct types_have_common_values<MaybeObject, T> {
- static const bool value = types_have_common_values<Object, T>::value;
-};
-
-// TNode<T> is an SSA value with the static type tag T, which is one of the
-// following:
-// - a subclass of internal::Object represents a tagged type
-// - a subclass of internal::UntaggedT represents an untagged type
-// - ExternalReference
-// - PairT<T1, T2> for an operation returning two values, with types T1
-// and T2
-// - UnionT<T1, T2> represents either a value of type T1 or of type T2.
-template <class T>
-class TNode {
- public:
- template <class U,
- typename std::enable_if<is_subtype<U, T>::value, int>::type = 0>
- TNode(const TNode<U>& other) : node_(other) {
- LazyTemplateChecks();
- }
- TNode() : TNode(nullptr) {}
-
- TNode operator=(TNode other) {
- DCHECK_NOT_NULL(other.node_);
- node_ = other.node_;
- return *this;
- }
-
- operator compiler::Node*() const { return node_; }
-
- static TNode UncheckedCast(compiler::Node* node) { return TNode(node); }
-
- protected:
- explicit TNode(compiler::Node* node) : node_(node) { LazyTemplateChecks(); }
-
- private:
- // These checks shouldn't be checked before TNode is actually used.
- void LazyTemplateChecks() {
- static_assert(is_valid_type_tag<T>::value, "invalid type tag");
- }
-
- compiler::Node* node_;
-};
-
-// SloppyTNode<T> is a variant of TNode<T> and allows implicit casts from
-// Node*. It is intended for function arguments as long as some call sites
-// still use untyped Node* arguments.
-// TODO(tebbi): Delete this class once transition is finished.
-template <class T>
-class SloppyTNode : public TNode<T> {
- public:
- SloppyTNode(compiler::Node* node) // NOLINT(runtime/explicit)
- : TNode<T>(node) {}
- template <class U, typename std::enable_if<is_subtype<U, T>::value,
- int>::type = 0>
- SloppyTNode(const TNode<U>& other) // NOLINT(runtime/explicit)
- : TNode<T>(other) {}
-};
-
template <class... Types>
class CodeAssemblerParameterizedLabel;
@@ -627,7 +288,7 @@ TNode<Float64T> Float64Add(TNode<Float64T> a, TNode<Float64T> b);
V(Float64ExtractLowWord32, Uint32T, Float64T) \
V(Float64ExtractHighWord32, Uint32T, Float64T) \
V(BitcastTaggedToWord, IntPtrT, Object) \
- V(BitcastTaggedSignedToWord, IntPtrT, Smi) \
+ V(BitcastTaggedToWordForTagAndSmiBits, IntPtrT, AnyTaggedT) \
V(BitcastMaybeObjectToWord, IntPtrT, MaybeObject) \
V(BitcastWordToTagged, Object, WordT) \
V(BitcastWordToTaggedSigned, Smi, WordT) \
@@ -641,6 +302,7 @@ TNode<Float64T> Float64Add(TNode<Float64T> a, TNode<Float64T> b);
V(ChangeInt32ToInt64, Int64T, Int32T) \
V(ChangeUint32ToFloat64, Float64T, Word32T) \
V(ChangeUint32ToUint64, Uint64T, Word32T) \
+ V(ChangeTaggedToCompressed, TaggedT, AnyTaggedT) \
V(BitcastInt32ToFloat32, Float32T, Word32T) \
V(BitcastFloat32ToInt32, Uint32T, Float32T) \
V(RoundFloat64ToInt32, Int32T, Float64T) \
@@ -1187,8 +849,12 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TNode<RawPtrT> RawPtrAdd(TNode<RawPtrT> left, TNode<IntPtrT> right) {
return ReinterpretCast<RawPtrT>(IntPtrAdd(left, right));
}
- TNode<RawPtrT> RawPtrAdd(TNode<IntPtrT> left, TNode<RawPtrT> right) {
- return ReinterpretCast<RawPtrT>(IntPtrAdd(left, right));
+ TNode<RawPtrT> RawPtrSub(TNode<RawPtrT> left, TNode<IntPtrT> right) {
+ return ReinterpretCast<RawPtrT>(IntPtrSub(left, right));
+ }
+ TNode<IntPtrT> RawPtrSub(TNode<RawPtrT> left, TNode<RawPtrT> right) {
+ return Signed(
+ IntPtrSub(static_cast<Node*>(left), static_cast<Node*>(right)));
}
TNode<WordT> WordShl(SloppyTNode<WordT> value, int shift);
@@ -1243,7 +909,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
template <class Dummy = void>
TNode<IntPtrT> BitcastTaggedToWord(TNode<Smi> node) {
static_assert(sizeof(Dummy) < 0,
- "Should use BitcastTaggedSignedToWord instead.");
+ "Should use BitcastTaggedToWordForTagAndSmiBits instead.");
}
// Changes a double to an inptr_t for pointer arithmetic outside of Smi range.
@@ -1363,26 +1029,26 @@ class V8_EXPORT_PRIVATE CodeAssembler {
void TailCallStub(Callable const& callable, SloppyTNode<Object> context,
TArgs... args) {
TNode<Code> target = HeapConstant(callable.code());
- return TailCallStub(callable.descriptor(), target, context, args...);
+ TailCallStub(callable.descriptor(), target, context, args...);
}
template <class... TArgs>
void TailCallStub(const CallInterfaceDescriptor& descriptor,
SloppyTNode<Code> target, SloppyTNode<Object> context,
TArgs... args) {
- return TailCallStubImpl(descriptor, target, context, {args...});
+ TailCallStubImpl(descriptor, target, context, {args...});
}
template <class... TArgs>
- Node* TailCallBytecodeDispatch(const CallInterfaceDescriptor& descriptor,
- Node* target, TArgs... args);
+ void TailCallBytecodeDispatch(const CallInterfaceDescriptor& descriptor,
+ TNode<RawPtrT> target, TArgs... args);
template <class... TArgs>
- Node* TailCallStubThenBytecodeDispatch(
+ void TailCallStubThenBytecodeDispatch(
const CallInterfaceDescriptor& descriptor, Node* target, Node* context,
TArgs... args) {
- return TailCallStubThenBytecodeDispatchImpl(descriptor, target, context,
- {args...});
+ TailCallStubThenBytecodeDispatchImpl(descriptor, target, context,
+ {args...});
}
// Tailcalls to the given code object with JSCall linkage. The JS arguments
@@ -1392,14 +1058,13 @@ class V8_EXPORT_PRIVATE CodeAssembler {
// Note that no arguments adaption is going on here - all the JavaScript
// arguments are left on the stack unmodified. Therefore, this tail call can
// only be used after arguments adaptation has been performed already.
- TNode<Object> TailCallJSCode(TNode<Code> code, TNode<Context> context,
- TNode<JSFunction> function,
- TNode<Object> new_target,
- TNode<Int32T> arg_count);
+ void TailCallJSCode(TNode<Code> code, TNode<Context> context,
+ TNode<JSFunction> function, TNode<Object> new_target,
+ TNode<Int32T> arg_count);
template <class... TArgs>
- Node* CallJS(Callable const& callable, Node* context, Node* function,
- Node* receiver, TArgs... args) {
+ TNode<Object> CallJS(Callable const& callable, Node* context, Node* function,
+ Node* receiver, TArgs... args) {
int argc = static_cast<int>(sizeof...(args));
TNode<Int32T> arity = Int32Constant(argc);
return CallStub(callable, context, function, arity, receiver, args...);
@@ -1511,15 +1176,14 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TNode<Code> target, TNode<Object> context,
std::initializer_list<Node*> args);
- Node* TailCallStubThenBytecodeDispatchImpl(
+ void TailCallStubThenBytecodeDispatchImpl(
const CallInterfaceDescriptor& descriptor, Node* target, Node* context,
std::initializer_list<Node*> args);
Node* CallStubRImpl(StubCallMode call_mode,
const CallInterfaceDescriptor& descriptor,
- size_t result_size, Node* target,
- SloppyTNode<Object> context,
- std::initializer_list<Node*> args);
+ size_t result_size, TNode<Object> target,
+ TNode<Object> context, std::initializer_list<Node*> args);
// These two don't have definitions and are here only for catching use cases
// where the cast is not necessary.
@@ -1810,7 +1474,7 @@ class V8_EXPORT_PRIVATE CodeAssemblerScopedExceptionHandler {
} // namespace compiler
-#if defined(V8_HOST_ARCH_32_BIT) || defined(V8_COMPRESS_POINTERS)
+#if defined(V8_HOST_ARCH_32_BIT)
#define BINT_IS_SMI
using BInt = Smi;
#elif defined(V8_HOST_ARCH_64_BIT)
diff --git a/deps/v8/src/compiler/compilation-dependencies.cc b/deps/v8/src/compiler/compilation-dependencies.cc
index 592d85440c..33990dfa48 100644
--- a/deps/v8/src/compiler/compilation-dependencies.cc
+++ b/deps/v8/src/compiler/compilation-dependencies.cc
@@ -5,6 +5,7 @@
#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/compilation-dependency.h"
+#include "src/execution/protectors.h"
#include "src/handles/handles-inl.h"
#include "src/objects/allocation-site-inl.h"
#include "src/objects/objects-inl.h"
@@ -155,7 +156,7 @@ class FieldRepresentationDependency final : public CompilationDependency {
public:
// TODO(neis): Once the concurrent compiler frontend is always-on, we no
// longer need to explicitly store the representation.
- FieldRepresentationDependency(const MapRef& owner, int descriptor,
+ FieldRepresentationDependency(const MapRef& owner, InternalIndex descriptor,
Representation representation)
: owner_(owner),
descriptor_(descriptor),
@@ -180,7 +181,7 @@ class FieldRepresentationDependency final : public CompilationDependency {
private:
MapRef owner_;
- int descriptor_;
+ InternalIndex descriptor_;
Representation representation_;
};
@@ -188,7 +189,7 @@ class FieldTypeDependency final : public CompilationDependency {
public:
// TODO(neis): Once the concurrent compiler frontend is always-on, we no
// longer need to explicitly store the type.
- FieldTypeDependency(const MapRef& owner, int descriptor,
+ FieldTypeDependency(const MapRef& owner, InternalIndex descriptor,
const ObjectRef& type)
: owner_(owner), descriptor_(descriptor), type_(type) {
DCHECK(owner_.equals(owner_.FindFieldOwner(descriptor_)));
@@ -210,13 +211,13 @@ class FieldTypeDependency final : public CompilationDependency {
private:
MapRef owner_;
- int descriptor_;
+ InternalIndex descriptor_;
ObjectRef type_;
};
class FieldConstnessDependency final : public CompilationDependency {
public:
- FieldConstnessDependency(const MapRef& owner, int descriptor)
+ FieldConstnessDependency(const MapRef& owner, InternalIndex descriptor)
: owner_(owner), descriptor_(descriptor) {
DCHECK(owner_.equals(owner_.FindFieldOwner(descriptor_)));
DCHECK_EQ(PropertyConstness::kConst,
@@ -238,7 +239,7 @@ class FieldConstnessDependency final : public CompilationDependency {
private:
MapRef owner_;
- int descriptor_;
+ InternalIndex descriptor_;
};
class GlobalPropertyDependency final : public CompilationDependency {
@@ -282,12 +283,12 @@ class GlobalPropertyDependency final : public CompilationDependency {
class ProtectorDependency final : public CompilationDependency {
public:
explicit ProtectorDependency(const PropertyCellRef& cell) : cell_(cell) {
- DCHECK_EQ(cell_.value().AsSmi(), Isolate::kProtectorValid);
+ DCHECK_EQ(cell_.value().AsSmi(), Protectors::kProtectorValid);
}
bool IsValid() const override {
Handle<PropertyCell> cell = cell_.object();
- return cell->value() == Smi::FromInt(Isolate::kProtectorValid);
+ return cell->value() == Smi::FromInt(Protectors::kProtectorValid);
}
void Install(const MaybeObjectHandle& code) const override {
@@ -404,7 +405,7 @@ AllocationType CompilationDependencies::DependOnPretenureMode(
}
PropertyConstness CompilationDependencies::DependOnFieldConstness(
- const MapRef& map, int descriptor) {
+ const MapRef& map, InternalIndex descriptor) {
MapRef owner = map.FindFieldOwner(descriptor);
PropertyConstness constness =
owner.GetPropertyDetails(descriptor).constness();
@@ -426,13 +427,13 @@ PropertyConstness CompilationDependencies::DependOnFieldConstness(
return PropertyConstness::kConst;
}
-void CompilationDependencies::DependOnFieldRepresentation(const MapRef& map,
- int descriptor) {
+void CompilationDependencies::DependOnFieldRepresentation(
+ const MapRef& map, InternalIndex descriptor) {
RecordDependency(FieldRepresentationDependencyOffTheRecord(map, descriptor));
}
void CompilationDependencies::DependOnFieldType(const MapRef& map,
- int descriptor) {
+ InternalIndex descriptor) {
RecordDependency(FieldTypeDependencyOffTheRecord(map, descriptor));
}
@@ -444,7 +445,7 @@ void CompilationDependencies::DependOnGlobalProperty(
}
bool CompilationDependencies::DependOnProtector(const PropertyCellRef& cell) {
- if (cell.value().AsSmi() != Isolate::kProtectorValid) return false;
+ if (cell.value().AsSmi() != Protectors::kProtectorValid) return false;
RecordDependency(new (zone_) ProtectorDependency(cell));
return true;
}
@@ -632,7 +633,7 @@ CompilationDependencies::TransitionDependencyOffTheRecord(
CompilationDependency const*
CompilationDependencies::FieldRepresentationDependencyOffTheRecord(
- const MapRef& map, int descriptor) const {
+ const MapRef& map, InternalIndex descriptor) const {
MapRef owner = map.FindFieldOwner(descriptor);
PropertyDetails details = owner.GetPropertyDetails(descriptor);
DCHECK(details.representation().Equals(
@@ -642,8 +643,8 @@ CompilationDependencies::FieldRepresentationDependencyOffTheRecord(
}
CompilationDependency const*
-CompilationDependencies::FieldTypeDependencyOffTheRecord(const MapRef& map,
- int descriptor) const {
+CompilationDependencies::FieldTypeDependencyOffTheRecord(
+ const MapRef& map, InternalIndex descriptor) const {
MapRef owner = map.FindFieldOwner(descriptor);
ObjectRef type = owner.GetFieldType(descriptor);
DCHECK(type.equals(map.GetFieldType(descriptor)));
diff --git a/deps/v8/src/compiler/compilation-dependencies.h b/deps/v8/src/compiler/compilation-dependencies.h
index cb6cea0685..0b1612487e 100644
--- a/deps/v8/src/compiler/compilation-dependencies.h
+++ b/deps/v8/src/compiler/compilation-dependencies.h
@@ -55,11 +55,11 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject {
// Record the assumption that the field representation of a field does not
// change. The field is identified by the arguments.
- void DependOnFieldRepresentation(const MapRef& map, int descriptor);
+ void DependOnFieldRepresentation(const MapRef& map, InternalIndex descriptor);
// Record the assumption that the field type of a field does not change. The
// field is identified by the arguments.
- void DependOnFieldType(const MapRef& map, int descriptor);
+ void DependOnFieldType(const MapRef& map, InternalIndex descriptor);
// Return a field's constness and, if kConst, record the assumption that it
// remains kConst. The field is identified by the arguments.
@@ -68,7 +68,8 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject {
// kConst if the map is stable (and register stability dependency in that
// case). This is to ensure that fast elements kind transitions cannot be
// used to mutate fields without deoptimization of the dependent code.
- PropertyConstness DependOnFieldConstness(const MapRef& map, int descriptor);
+ PropertyConstness DependOnFieldConstness(const MapRef& map,
+ InternalIndex descriptor);
// Record the assumption that neither {cell}'s {CellType} changes, nor the
// {IsReadOnly()} flag of {cell}'s {PropertyDetails}.
@@ -119,9 +120,9 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject {
CompilationDependency const* TransitionDependencyOffTheRecord(
const MapRef& target_map) const;
CompilationDependency const* FieldRepresentationDependencyOffTheRecord(
- const MapRef& map, int descriptor) const;
+ const MapRef& map, InternalIndex descriptor) const;
CompilationDependency const* FieldTypeDependencyOffTheRecord(
- const MapRef& map, int descriptor) const;
+ const MapRef& map, InternalIndex descriptor) const;
// Exposed only for testing purposes.
bool AreValid() const;
diff --git a/deps/v8/src/compiler/decompression-elimination.cc b/deps/v8/src/compiler/decompression-elimination.cc
index 537744652b..5c0f6b1cfa 100644
--- a/deps/v8/src/compiler/decompression-elimination.cc
+++ b/deps/v8/src/compiler/decompression-elimination.cc
@@ -67,7 +67,6 @@ Reduction DecompressionElimination::ReduceCompress(Node* node) {
Node* input_node = node->InputAt(0);
IrOpcode::Value input_opcode = input_node->opcode();
if (IrOpcode::IsDecompressOpcode(input_opcode)) {
- DCHECK(IsValidDecompress(node->opcode(), input_opcode));
DCHECK_EQ(input_node->InputCount(), 1);
return Replace(input_node->InputAt(0));
} else if (IsReducibleConstantOpcode(input_opcode)) {
@@ -167,6 +166,42 @@ Reduction DecompressionElimination::ReduceTypedStateValues(Node* node) {
return any_change ? Changed(node) : NoChange();
}
+Reduction DecompressionElimination::ReduceWord32Equal(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kWord32Equal);
+
+ DCHECK_EQ(node->InputCount(), 2);
+ Node* lhs = node->InputAt(0);
+ Node* rhs = node->InputAt(1);
+
+ if (!IrOpcode::IsCompressOpcode(lhs->opcode()) ||
+ !IrOpcode::IsCompressOpcode(rhs->opcode())) {
+ return NoChange();
+ }
+ // Input nodes for compress operation.
+ lhs = lhs->InputAt(0);
+ rhs = rhs->InputAt(0);
+
+ bool changed = false;
+
+ if (lhs->opcode() == IrOpcode::kBitcastWordToTaggedSigned) {
+ Node* input = lhs->InputAt(0);
+ if (IsReducibleConstantOpcode(input->opcode())) {
+ node->ReplaceInput(0, GetCompressedConstant(input));
+ changed = true;
+ }
+ }
+
+ if (rhs->opcode() == IrOpcode::kBitcastWordToTaggedSigned) {
+ Node* input = rhs->InputAt(0);
+ if (IsReducibleConstantOpcode(input->opcode())) {
+ node->ReplaceInput(1, GetCompressedConstant(input));
+ changed = true;
+ }
+ }
+
+ return changed ? Changed(node) : NoChange();
+}
+
Reduction DecompressionElimination::ReduceWord64Equal(Node* node) {
DCHECK_EQ(node->opcode(), IrOpcode::kWord64Equal);
@@ -220,6 +255,8 @@ Reduction DecompressionElimination::Reduce(Node* node) {
return ReducePhi(node);
case IrOpcode::kTypedStateValues:
return ReduceTypedStateValues(node);
+ case IrOpcode::kWord32Equal:
+ return ReduceWord32Equal(node);
case IrOpcode::kWord64Equal:
return ReduceWord64Equal(node);
default:
diff --git a/deps/v8/src/compiler/decompression-elimination.h b/deps/v8/src/compiler/decompression-elimination.h
index 85a6c98aa0..6b2be009c6 100644
--- a/deps/v8/src/compiler/decompression-elimination.h
+++ b/deps/v8/src/compiler/decompression-elimination.h
@@ -65,6 +65,11 @@ class V8_EXPORT_PRIVATE DecompressionElimination final
// value of that constant.
Reduction ReduceWord64Equal(Node* node);
+ // This is a workaround for load elimination test.
+ // Replaces Compress -> BitcastWordToTaggedSigned -> ReducibleConstant
+ // to CompressedConstant on both inputs of Word32Equal operation.
+ Reduction ReduceWord32Equal(Node* node);
+
Graph* graph() const { return graph_; }
MachineOperatorBuilder* machine() const { return machine_; }
CommonOperatorBuilder* common() const { return common_; }
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index 8dfe356c34..ceff453164 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -187,8 +187,11 @@ class EffectControlLinearizer {
Node* LowerMaybeGrowFastElements(Node* node, Node* frame_state);
void LowerTransitionElementsKind(Node* node);
Node* LowerLoadFieldByIndex(Node* node);
+ Node* LowerLoadMessage(Node* node);
Node* LowerLoadTypedElement(Node* node);
Node* LowerLoadDataViewElement(Node* node);
+ Node* LowerLoadStackArgument(Node* node);
+ void LowerStoreMessage(Node* node);
void LowerStoreTypedElement(Node* node);
void LowerStoreDataViewElement(Node* node);
void LowerStoreSignedSmallElement(Node* node);
@@ -227,6 +230,8 @@ class EffectControlLinearizer {
Node* LowerStringComparison(Callable const& callable, Node* node);
Node* IsElementsKindGreaterThan(Node* kind, ElementsKind reference_kind);
+ Node* BuildTypedArrayDataPointer(Node* base, Node* external);
+
Node* ChangeInt32ToCompressedSmi(Node* value);
Node* ChangeInt32ToSmi(Node* value);
Node* ChangeInt32ToIntPtr(Node* value);
@@ -247,6 +252,7 @@ class EffectControlLinearizer {
Node* SmiShiftBitsConstant();
void TransitionElementsTo(Node* node, Node* array, ElementsKind from,
ElementsKind to);
+ void ConnectUnreachableToEnd(Node* effect, Node* control);
Factory* factory() const { return isolate()->factory(); }
Isolate* isolate() const { return jsgraph()->isolate(); }
@@ -308,19 +314,8 @@ struct PendingEffectPhi {
: effect_phi(effect_phi), block(block) {}
};
-void ConnectUnreachableToEnd(Node* effect, Node* control, JSGraph* jsgraph) {
- Graph* graph = jsgraph->graph();
- CommonOperatorBuilder* common = jsgraph->common();
- if (effect->opcode() == IrOpcode::kDead) return;
- if (effect->opcode() != IrOpcode::kUnreachable) {
- effect = graph->NewNode(common->Unreachable(), effect, control);
- }
- Node* throw_node = graph->NewNode(common->Throw(), effect, control);
- NodeProperties::MergeControlToEnd(graph, common, throw_node);
-}
-
void UpdateEffectPhi(Node* node, BasicBlock* block,
- BlockEffectControlMap* block_effects, JSGraph* jsgraph) {
+ BlockEffectControlMap* block_effects) {
// Update all inputs to an effect phi with the effects from the given
// block->effect map.
DCHECK_EQ(IrOpcode::kEffectPhi, node->opcode());
@@ -607,7 +602,7 @@ void EffectControlLinearizer::Run() {
// record the effect phi for later processing.
pending_effect_phis.push_back(PendingEffectPhi(effect_phi, block));
} else {
- UpdateEffectPhi(effect_phi, block, &block_effects, jsgraph());
+ UpdateEffectPhi(effect_phi, block, &block_effects);
}
}
@@ -649,7 +644,7 @@ void EffectControlLinearizer::Run() {
if (control->opcode() == IrOpcode::kLoop) {
pending_effect_phis.push_back(PendingEffectPhi(effect, block));
} else {
- UpdateEffectPhi(effect, block, &block_effects, jsgraph());
+ UpdateEffectPhi(effect, block, &block_effects);
}
} else if (control->opcode() == IrOpcode::kIfException) {
// The IfException is connected into the effect chain, so we need
@@ -734,7 +729,7 @@ void EffectControlLinearizer::Run() {
// during the first pass (because they could have incoming back edges).
for (const PendingEffectPhi& pending_effect_phi : pending_effect_phis) {
UpdateEffectPhi(pending_effect_phi.effect_phi, pending_effect_phi.block,
- &block_effects, jsgraph());
+ &block_effects);
}
}
@@ -828,7 +823,7 @@ void EffectControlLinearizer::ProcessNode(Node* node, Node** frame_state,
// Break the effect chain on {Unreachable} and reconnect to the graph end.
// Mark the following code for deletion by connecting to the {Dead} node.
if (node->opcode() == IrOpcode::kUnreachable) {
- ConnectUnreachableToEnd(*effect, *control, jsgraph());
+ ConnectUnreachableToEnd(*effect, *control);
*effect = *control = jsgraph()->Dead();
}
}
@@ -1243,6 +1238,12 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kTransitionElementsKind:
LowerTransitionElementsKind(node);
break;
+ case IrOpcode::kLoadMessage:
+ result = LowerLoadMessage(node);
+ break;
+ case IrOpcode::kStoreMessage:
+ LowerStoreMessage(node);
+ break;
case IrOpcode::kLoadFieldByIndex:
result = LowerLoadFieldByIndex(node);
break;
@@ -1252,6 +1253,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kLoadDataViewElement:
result = LowerLoadDataViewElement(node);
break;
+ case IrOpcode::kLoadStackArgument:
+ result = LowerLoadStackArgument(node);
+ break;
case IrOpcode::kStoreTypedElement:
LowerStoreTypedElement(node);
break;
@@ -1325,6 +1329,13 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
return true;
}
+void EffectControlLinearizer::ConnectUnreachableToEnd(Node* effect,
+ Node* control) {
+ DCHECK_EQ(effect->opcode(), IrOpcode::kUnreachable);
+ Node* throw_node = graph()->NewNode(common()->Throw(), effect, control);
+ NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
+}
+
#define __ gasm()->
Node* EffectControlLinearizer::LowerChangeFloat64ToTagged(Node* node) {
@@ -1601,7 +1612,7 @@ Node* EffectControlLinearizer::LowerTruncateTaggedToBit(Node* node) {
__ Bind(&if_smi);
{
// If {value} is a Smi, then we only need to check that it's not zero.
- __ Goto(&done, __ Word32Equal(__ IntPtrEqual(value, __ IntPtrConstant(0)),
+ __ Goto(&done, __ Word32Equal(__ TaggedEqual(value, __ SmiConstant(0)),
__ Int32Constant(0)));
}
@@ -1952,7 +1963,7 @@ Node* EffectControlLinearizer::LowerCheckReceiverOrNullOrUndefined(
__ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
// Rule out all primitives except oddballs (true, false, undefined, null).
- STATIC_ASSERT(LAST_PRIMITIVE_TYPE == ODDBALL_TYPE);
+ STATIC_ASSERT(LAST_PRIMITIVE_HEAP_OBJECT_TYPE == ODDBALL_TYPE);
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
Node* check0 = __ Uint32LessThanOrEqual(__ Uint32Constant(ODDBALL_TYPE),
value_instance_type);
@@ -2028,9 +2039,8 @@ Node* EffectControlLinearizer::LowerStringConcat(Node* node) {
callable.descriptor().GetStackParameterCount(), CallDescriptor::kNoFlags,
Operator::kNoDeopt | Operator::kNoWrite | Operator::kNoThrow);
- Node* value =
- __ Call(call_descriptor, jsgraph()->HeapConstant(callable.code()), lhs,
- rhs, __ NoContextConstant());
+ Node* value = __ Call(call_descriptor, __ HeapConstant(callable.code()), lhs,
+ rhs, __ NoContextConstant());
return value;
}
@@ -2112,8 +2122,7 @@ Node* EffectControlLinearizer::LowerCheckedInt32Div(Node* node,
// Check if {lhs} is kMinInt and {rhs} is -1, in which case we'd have
// to return -kMinInt, which is not representable as Word32.
- Node* check_lhs_minint = graph()->NewNode(machine()->Word32Equal(), lhs,
- __ Int32Constant(kMinInt));
+ Node* check_lhs_minint = __ Word32Equal(lhs, __ Int32Constant(kMinInt));
__ Branch(check_lhs_minint, &if_lhs_minint, &if_lhs_notminint);
__ Bind(&if_lhs_minint);
@@ -2760,7 +2769,7 @@ Node* EffectControlLinearizer::LowerChangeUint64ToBigInt(Node* node) {
DCHECK(machine()->Is64());
Node* value = node->InputAt(0);
- Node* map = jsgraph()->HeapConstant(factory()->bigint_map());
+ Node* map = __ HeapConstant(factory()->bigint_map());
// BigInts with value 0 must be of size 0 (canonical form).
auto if_zerodigits = __ MakeLabel();
auto if_onedigit = __ MakeLabel();
@@ -2963,10 +2972,11 @@ Node* EffectControlLinearizer::LowerObjectIsArrayBufferView(Node* node) {
Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
Node* value_instance_type =
__ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
- STATIC_ASSERT(JS_TYPED_ARRAY_TYPE + 1 == JS_DATA_VIEW_TYPE);
Node* vfalse = __ Uint32LessThan(
- __ Int32Sub(value_instance_type, __ Int32Constant(JS_TYPED_ARRAY_TYPE)),
- __ Int32Constant(2));
+ __ Int32Sub(value_instance_type,
+ __ Int32Constant(FIRST_JS_ARRAY_BUFFER_VIEW_TYPE)),
+ __ Int32Constant(LAST_JS_ARRAY_BUFFER_VIEW_TYPE -
+ FIRST_JS_ARRAY_BUFFER_VIEW_TYPE + 1));
__ Goto(&done, vfalse);
__ Bind(&if_smi);
@@ -3521,7 +3531,7 @@ Node* EffectControlLinearizer::LowerArgumentsFrame(Node* node) {
__ Load(MachineType::Pointer(), frame,
__ IntPtrConstant(StandardFrameConstants::kCallerFPOffset));
Node* parent_frame_type = __ Load(
- MachineType::TypeCompressedTagged(), parent_frame,
+ MachineType::IntPtr(), parent_frame,
__ IntPtrConstant(CommonFrameConstants::kContextOrFrameTypeOffset));
__ GotoIf(__ IntPtrEqual(parent_frame_type,
@@ -3541,7 +3551,7 @@ Node* EffectControlLinearizer::LowerNewDoubleElements(Node* node) {
auto done = __ MakeLabel(MachineRepresentation::kTaggedPointer);
Node* zero_length = __ IntPtrEqual(length, __ IntPtrConstant(0));
__ GotoIf(zero_length, &done,
- jsgraph()->HeapConstant(factory()->empty_fixed_array()));
+ __ HeapConstant(factory()->empty_fixed_array()));
// Compute the effective size of the backing store.
Node* size = __ IntAdd(__ WordShl(length, __ IntPtrConstant(kDoubleSizeLog2)),
@@ -3589,7 +3599,7 @@ Node* EffectControlLinearizer::LowerNewSmiOrObjectElements(Node* node) {
auto done = __ MakeLabel(MachineRepresentation::kTaggedPointer);
Node* zero_length = __ IntPtrEqual(length, __ IntPtrConstant(0));
__ GotoIf(zero_length, &done,
- jsgraph()->HeapConstant(factory()->empty_fixed_array()));
+ __ HeapConstant(factory()->empty_fixed_array()));
// Compute the effective size of the backing store.
Node* size = __ IntAdd(__ WordShl(length, __ IntPtrConstant(kTaggedSizeLog2)),
@@ -3671,10 +3681,9 @@ Node* EffectControlLinearizer::LowerNewConsString(Node* node) {
__ Branch(__ Word32Equal(encoding, __ Int32Constant(kTwoByteStringTag)),
&if_twobyte, &if_onebyte);
__ Bind(&if_onebyte);
- __ Goto(&done,
- jsgraph()->HeapConstant(factory()->cons_one_byte_string_map()));
+ __ Goto(&done, __ HeapConstant(factory()->cons_one_byte_string_map()));
__ Bind(&if_twobyte);
- __ Goto(&done, jsgraph()->HeapConstant(factory()->cons_string_map()));
+ __ Goto(&done, __ HeapConstant(factory()->cons_string_map()));
__ Bind(&done);
Node* result_map = done.PhiAt(0);
@@ -4287,9 +4296,8 @@ Node* EffectControlLinearizer::LowerBigIntAdd(Node* node, Node* frame_state) {
graph()->zone(), callable.descriptor(),
callable.descriptor().GetStackParameterCount(), CallDescriptor::kNoFlags,
Operator::kFoldable | Operator::kNoThrow);
- Node* value =
- __ Call(call_descriptor, jsgraph()->HeapConstant(callable.code()), lhs,
- rhs, __ NoContextConstant());
+ Node* value = __ Call(call_descriptor, __ HeapConstant(callable.code()), lhs,
+ rhs, __ NoContextConstant());
// Check for exception sentinel: Smi is returned to signal BigIntTooBig.
__ DeoptimizeIf(DeoptimizeReason::kBigIntTooBig, FeedbackSource{},
@@ -4305,9 +4313,8 @@ Node* EffectControlLinearizer::LowerBigIntNegate(Node* node) {
graph()->zone(), callable.descriptor(),
callable.descriptor().GetStackParameterCount(), CallDescriptor::kNoFlags,
Operator::kFoldable | Operator::kNoThrow);
- Node* value =
- __ Call(call_descriptor, jsgraph()->HeapConstant(callable.code()),
- node->InputAt(0), __ NoContextConstant());
+ Node* value = __ Call(call_descriptor, __ HeapConstant(callable.code()),
+ node->InputAt(0), __ NoContextConstant());
return value;
}
@@ -4746,6 +4753,20 @@ void EffectControlLinearizer::LowerTransitionElementsKind(Node* node) {
__ Bind(&done);
}
+Node* EffectControlLinearizer::LowerLoadMessage(Node* node) {
+ Node* offset = node->InputAt(0);
+ Node* object_pattern =
+ __ LoadField(AccessBuilder::ForExternalIntPtr(), offset);
+ return __ BitcastWordToTagged(object_pattern);
+}
+
+void EffectControlLinearizer::LowerStoreMessage(Node* node) {
+ Node* offset = node->InputAt(0);
+ Node* object = node->InputAt(1);
+ Node* object_pattern = __ BitcastTaggedToWord(object);
+ __ StoreField(AccessBuilder::ForExternalIntPtr(), offset, object_pattern);
+}
+
Node* EffectControlLinearizer::LowerLoadFieldByIndex(Node* node) {
Node* object = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -4801,6 +4822,7 @@ Node* EffectControlLinearizer::LowerLoadFieldByIndex(Node* node) {
// architectures, or a mutable HeapNumber.
__ Bind(&if_double);
{
+ auto loaded_field = __ MakeLabel(MachineRepresentation::kTagged);
auto done_double = __ MakeLabel(MachineRepresentation::kFloat64);
index = __ WordSar(index, one);
@@ -4818,10 +4840,9 @@ Node* EffectControlLinearizer::LowerLoadFieldByIndex(Node* node) {
Node* result = __ Load(MachineType::Float64(), object, offset);
__ Goto(&done_double, result);
} else {
- Node* result =
+ Node* field =
__ Load(MachineType::TypeCompressedTagged(), object, offset);
- result = __ LoadField(AccessBuilder::ForHeapNumberValue(), result);
- __ Goto(&done_double, result);
+ __ Goto(&loaded_field, field);
}
}
@@ -4834,10 +4855,24 @@ Node* EffectControlLinearizer::LowerLoadFieldByIndex(Node* node) {
__ IntPtrConstant(kTaggedSizeLog2)),
__ IntPtrConstant((FixedArray::kHeaderSize - kTaggedSize) -
kHeapObjectTag));
- Node* result =
+ Node* field =
__ Load(MachineType::TypeCompressedTagged(), properties, offset);
- result = __ LoadField(AccessBuilder::ForHeapNumberValue(), result);
- __ Goto(&done_double, result);
+ __ Goto(&loaded_field, field);
+ }
+
+ __ Bind(&loaded_field);
+ {
+ Node* field = loaded_field.PhiAt(0);
+ // We may have transitioned in-place away from double, so check that
+ // this is a HeapNumber -- otherwise the load is fine and we don't need
+ // to copy anything anyway.
+ __ GotoIf(ObjectIsSmi(field), &done, field);
+ Node* field_map = __ LoadField(AccessBuilder::ForMap(), field);
+ __ GotoIfNot(__ TaggedEqual(field_map, __ HeapNumberMapConstant()), &done,
+ field);
+
+ Node* value = __ LoadField(AccessBuilder::ForHeapNumberValue(), field);
+ __ Goto(&done_double, value);
}
__ Bind(&done_double);
@@ -4988,6 +5023,35 @@ void EffectControlLinearizer::LowerStoreDataViewElement(Node* node) {
done.PhiAt(0));
}
+// Compute the data pointer, handling the case where the {external} pointer
+// is the effective data pointer (i.e. the {base} is Smi zero).
+Node* EffectControlLinearizer::BuildTypedArrayDataPointer(Node* base,
+ Node* external) {
+ if (IntPtrMatcher(base).Is(0)) {
+ return external;
+ } else {
+ if (COMPRESS_POINTERS_BOOL) {
+ // TurboFan does not support loading of compressed fields without
+ // decompression so we add the following operations to workaround that.
+ // We can't load the base value as word32 because in that case the
+ // value will not be marked as tagged in the pointer map and will not
+ // survive GC.
+ // Compress base value back to in order to be able to decompress by
+ // doing an unsafe add below. Both decompression and compression
+ // will be removed by the decompression elimination pass.
+ base = __ ChangeTaggedToCompressed(base);
+ base = __ BitcastTaggedToWord(base);
+ // Zero-extend Tagged_t to UintPtr according to current compression
+ // scheme so that the addition with |external_pointer| (which already
+ // contains compensated offset value) will decompress the tagged value.
+ // See JSTypedArray::ExternalPointerCompensationForOnHeapArray() for
+ // details.
+ base = ChangeUint32ToUintPtr(base);
+ }
+ return __ UnsafePointerAdd(base, external);
+ }
+}
+
Node* EffectControlLinearizer::LowerLoadTypedElement(Node* node) {
ExternalArrayType array_type = ExternalArrayTypeOf(node->op());
Node* buffer = node->InputAt(0);
@@ -4999,17 +5063,22 @@ Node* EffectControlLinearizer::LowerLoadTypedElement(Node* node) {
// ArrayBuffer (if there's any) as long as we are still operating on it.
__ Retain(buffer);
- // Compute the effective storage pointer, handling the case where the
- // {external} pointer is the effective storage pointer (i.e. the {base}
- // is Smi zero).
- Node* storage = IntPtrMatcher(base).Is(0)
- ? external
- : __ UnsafePointerAdd(base, external);
+ Node* data_ptr = BuildTypedArrayDataPointer(base, external);
// Perform the actual typed element access.
return __ LoadElement(AccessBuilder::ForTypedArrayElement(
array_type, true, LoadSensitivity::kCritical),
- storage, index);
+ data_ptr, index);
+}
+
+Node* EffectControlLinearizer::LowerLoadStackArgument(Node* node) {
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+
+ Node* argument =
+ __ LoadElement(AccessBuilder::ForStackArgument(), base, index);
+
+ return __ BitcastWordToTagged(argument);
}
void EffectControlLinearizer::LowerStoreTypedElement(Node* node) {
@@ -5024,16 +5093,11 @@ void EffectControlLinearizer::LowerStoreTypedElement(Node* node) {
// ArrayBuffer (if there's any) as long as we are still operating on it.
__ Retain(buffer);
- // Compute the effective storage pointer, handling the case where the
- // {external} pointer is the effective storage pointer (i.e. the {base}
- // is Smi zero).
- Node* storage = IntPtrMatcher(base).Is(0)
- ? external
- : __ UnsafePointerAdd(base, external);
+ Node* data_ptr = BuildTypedArrayDataPointer(base, external);
// Perform the actual typed element access.
__ StoreElement(AccessBuilder::ForTypedArrayElement(array_type, true),
- storage, index, value);
+ data_ptr, index, value);
}
void EffectControlLinearizer::TransitionElementsTo(Node* node, Node* array,
@@ -5402,7 +5466,7 @@ void EffectControlLinearizer::LowerRuntimeAbort(Node* node) {
auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
graph()->zone(), id, 1, properties, CallDescriptor::kNoFlags);
__ Call(call_descriptor, __ CEntryStubConstant(1),
- jsgraph()->SmiConstant(static_cast<int>(reason)),
+ __ SmiConstant(static_cast<int>(reason)),
__ ExternalConstant(ExternalReference::Create(id)),
__ Int32Constant(1), __ NoContextConstant());
}
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.cc b/deps/v8/src/compiler/escape-analysis-reducer.cc
index 18ae069b21..b2fb8d10ce 100644
--- a/deps/v8/src/compiler/escape-analysis-reducer.cc
+++ b/deps/v8/src/compiler/escape-analysis-reducer.cc
@@ -326,9 +326,8 @@ void EscapeAnalysisReducer::Finalize() {
TypeCache::Get()->kArgumentsLengthType);
NodeProperties::ReplaceValueInput(load, arguments_frame, 0);
NodeProperties::ReplaceValueInput(load, offset, 1);
- NodeProperties::ChangeOp(load,
- jsgraph()->simplified()->LoadElement(
- AccessBuilder::ForStackArgument()));
+ NodeProperties::ChangeOp(
+ load, jsgraph()->simplified()->LoadStackArgument());
break;
}
case IrOpcode::kLoadField: {
diff --git a/deps/v8/src/compiler/frame-states.cc b/deps/v8/src/compiler/frame-states.cc
index 9478c08c6c..576f6ce542 100644
--- a/deps/v8/src/compiler/frame-states.cc
+++ b/deps/v8/src/compiler/frame-states.cc
@@ -137,13 +137,17 @@ Node* CreateStubBuiltinContinuationFrameState(
// Stack parameters first. Depending on {mode}, final parameters are added
// by the deoptimizer and aren't explicitly passed in the frame state.
int stack_parameter_count =
- descriptor.GetParameterCount() - DeoptimizerParameterCountFor(mode);
- // Reserving space in the vector, except for the case where
- // stack_parameter_count is -1.
- actual_parameters.reserve(stack_parameter_count >= 0
- ? stack_parameter_count +
- descriptor.GetRegisterParameterCount()
- : 0);
+ descriptor.GetStackParameterCount() - DeoptimizerParameterCountFor(mode);
+
+ // Ensure the parameters added by the deoptimizer are passed on the stack.
+ // This check prevents using TFS builtins as continuations while doing the
+ // lazy deopt. Use TFC or TFJ builtin as a lazy deopt continuation which
+ // would pass the result parameter on the stack.
+ DCHECK_GE(stack_parameter_count, 0);
+
+ // Reserving space in the vector.
+ actual_parameters.reserve(stack_parameter_count +
+ descriptor.GetRegisterParameterCount());
for (int i = 0; i < stack_parameter_count; ++i) {
actual_parameters.push_back(
parameters[descriptor.GetRegisterParameterCount() + i]);
diff --git a/deps/v8/src/compiler/functional-list.h b/deps/v8/src/compiler/functional-list.h
index 2345f1d360..6af63030f8 100644
--- a/deps/v8/src/compiler/functional-list.h
+++ b/deps/v8/src/compiler/functional-list.h
@@ -90,6 +90,8 @@ class FunctionalList {
size_t Size() const { return elements_ ? elements_->size : 0; }
+ void Clear() { elements_ = nullptr; }
+
class iterator {
public:
explicit iterator(Cons* cur) : current_(cur) {}
diff --git a/deps/v8/src/compiler/graph-assembler.cc b/deps/v8/src/compiler/graph-assembler.cc
index b4ad81ecda..5c167db980 100644
--- a/deps/v8/src/compiler/graph-assembler.cc
+++ b/deps/v8/src/compiler/graph-assembler.cc
@@ -99,6 +99,10 @@ Node* GraphAssembler::IntPtrEqual(Node* left, Node* right) {
}
Node* GraphAssembler::TaggedEqual(Node* left, Node* right) {
+ if (COMPRESS_POINTERS_BOOL) {
+ return Word32Equal(ChangeTaggedToCompressed(left),
+ ChangeTaggedToCompressed(right));
+ }
return WordEqual(left, right);
}
@@ -232,10 +236,10 @@ Node* GraphAssembler::BitcastTaggedToWord(Node* value) {
current_effect_, current_control_);
}
-Node* GraphAssembler::BitcastTaggedSignedToWord(Node* value) {
+Node* GraphAssembler::BitcastTaggedToWordForTagAndSmiBits(Node* value) {
return current_effect_ =
- graph()->NewNode(machine()->BitcastTaggedSignedToWord(), value,
- current_effect_, current_control_);
+ graph()->NewNode(machine()->BitcastTaggedToWordForTagAndSmiBits(),
+ value, current_effect_, current_control_);
}
Node* GraphAssembler::Word32PoisonOnSpeculation(Node* value) {
diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h
index 0088f867c5..d2df5a75f3 100644
--- a/deps/v8/src/compiler/graph-assembler.h
+++ b/deps/v8/src/compiler/graph-assembler.h
@@ -233,7 +233,7 @@ class GraphAssembler {
Node* ToNumber(Node* value);
Node* BitcastWordToTagged(Node* value);
Node* BitcastTaggedToWord(Node* value);
- Node* BitcastTaggedSignedToWord(Node* value);
+ Node* BitcastTaggedToWordForTagAndSmiBits(Node* value);
Node* Allocate(AllocationType allocation, Node* size);
Node* LoadField(FieldAccess const&, Node* object);
Node* LoadElement(ElementAccess const&, Node* object, Node* index);
diff --git a/deps/v8/src/compiler/graph-visualizer.cc b/deps/v8/src/compiler/graph-visualizer.cc
index 85123261db..dddba7d36f 100644
--- a/deps/v8/src/compiler/graph-visualizer.cc
+++ b/deps/v8/src/compiler/graph-visualizer.cc
@@ -163,7 +163,6 @@ void JsonPrintInlinedFunctionInfo(
void JsonPrintAllSourceWithPositions(std::ostream& os,
OptimizedCompilationInfo* info,
Isolate* isolate) {
- AllowDeferredHandleDereference allow_deference_for_print_code;
os << "\"sources\" : {";
Handle<Script> script =
(info->shared_info().is_null() ||
@@ -1055,15 +1054,9 @@ std::ostream& operator<<(std::ostream& os, const InstructionOperandAsJSON& o) {
}
break;
}
- case InstructionOperand::EXPLICIT:
case InstructionOperand::ALLOCATED: {
const LocationOperand* allocated = LocationOperand::cast(op);
- os << "\"type\": ";
- if (allocated->IsExplicit()) {
- os << "\"explicit\", ";
- } else {
- os << "\"allocated\", ";
- }
+ os << "\"type\": \"allocated\", ";
os << "\"text\": \"";
if (op->IsStackSlot()) {
os << "stack:" << allocated->index();
diff --git a/deps/v8/src/compiler/heap-refs.h b/deps/v8/src/compiler/heap-refs.h
index 9b1aa53eb9..c6322ebe69 100644
--- a/deps/v8/src/compiler/heap-refs.h
+++ b/deps/v8/src/compiler/heap-refs.h
@@ -29,7 +29,6 @@ class NativeContext;
class ScriptContextTable;
namespace compiler {
-
// Whether we are loading a property or storing to a property.
// For a store during literal creation, do not walk up the prototype chain.
enum class AccessMode { kLoad, kStore, kStoreInLiteral, kHas };
@@ -95,10 +94,12 @@ enum class OddballType : uint8_t {
V(PropertyCell) \
V(SharedFunctionInfo) \
V(SourceTextModule) \
+ V(TemplateObjectDescription) \
/* Subtypes of Object */ \
V(HeapObject)
class CompilationDependencies;
+struct FeedbackSource;
class JSHeapBroker;
class ObjectData;
class PerIsolateCompilerCache;
@@ -163,8 +164,8 @@ class V8_EXPORT_PRIVATE ObjectRef {
private:
friend class FunctionTemplateInfoRef;
friend class JSArrayData;
- friend class JSGlobalProxyRef;
- friend class JSGlobalProxyData;
+ friend class JSGlobalObjectData;
+ friend class JSGlobalObjectRef;
friend class JSHeapBroker;
friend class JSObjectData;
friend class StringData;
@@ -329,8 +330,6 @@ class V8_EXPORT_PRIVATE JSFunctionRef : public JSObjectRef {
SharedFunctionInfoRef shared() const;
FeedbackVectorRef feedback_vector() const;
int InitialMapInstanceSizeWithMinSlack() const;
-
- bool IsSerializedForCompilation() const;
};
class JSRegExpRef : public JSObjectRef {
@@ -344,6 +343,8 @@ class JSRegExpRef : public JSObjectRef {
ObjectRef source() const;
ObjectRef flags() const;
ObjectRef last_index() const;
+
+ void SerializeAsRegExpBoilerplate();
};
class HeapNumberRef : public HeapObjectRef {
@@ -496,7 +497,6 @@ class FeedbackVectorRef : public HeapObjectRef {
double invocation_count() const;
void Serialize();
- ObjectRef get(FeedbackSlot slot) const;
FeedbackCellRef GetClosureFeedbackCell(int index) const;
};
@@ -535,6 +535,9 @@ class AllocationSiteRef : public HeapObjectRef {
//
// If PointsToLiteral() is false, then IsFastLiteral() is also false.
bool IsFastLiteral() const;
+
+ void SerializeBoilerplate();
+
// We only serialize boilerplate if IsFastLiteral is true.
base::Optional<JSObjectRef> boilerplate() const;
@@ -585,7 +588,6 @@ class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef {
bool is_migration_target() const;
bool supports_fast_array_iteration() const;
bool supports_fast_array_resize() const;
- bool IsMapOfTargetGlobalProxy() const;
bool is_abandoned_prototype_map() const;
OddballType oddball_type() const;
@@ -609,15 +611,15 @@ class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef {
// Concerning the underlying instance_descriptors:
void SerializeOwnDescriptors();
- void SerializeOwnDescriptor(int descriptor_index);
- bool serialized_own_descriptor(int descriptor_index) const;
- MapRef FindFieldOwner(int descriptor_index) const;
- PropertyDetails GetPropertyDetails(int descriptor_index) const;
- NameRef GetPropertyKey(int descriptor_index) const;
- FieldIndex GetFieldIndexFor(int descriptor_index) const;
- ObjectRef GetFieldType(int descriptor_index) const;
- bool IsUnboxedDoubleField(int descriptor_index) const;
- ObjectRef GetStrongValue(int descriptor_number) const;
+ void SerializeOwnDescriptor(InternalIndex descriptor_index);
+ bool serialized_own_descriptor(InternalIndex descriptor_index) const;
+ MapRef FindFieldOwner(InternalIndex descriptor_index) const;
+ PropertyDetails GetPropertyDetails(InternalIndex descriptor_index) const;
+ NameRef GetPropertyKey(InternalIndex descriptor_index) const;
+ FieldIndex GetFieldIndexFor(InternalIndex descriptor_index) const;
+ ObjectRef GetFieldType(InternalIndex descriptor_index) const;
+ bool IsUnboxedDoubleField(InternalIndex descriptor_index) const;
+ ObjectRef GetStrongValue(InternalIndex descriptor_number) const;
void SerializeRootMap();
base::Optional<MapRef> FindRootMap() const;
@@ -727,7 +729,6 @@ class BytecodeArrayRef : public FixedArrayBaseRef {
Address handler_table_address() const;
int handler_table_size() const;
- bool IsSerializedForCompilation() const;
void SerializeForCompilation();
};
@@ -769,7 +770,8 @@ class ScopeInfoRef : public HeapObjectRef {
V(bool, is_safe_to_skip_arguments_adaptor) \
V(bool, IsInlineable) \
V(int, StartPosition) \
- V(bool, is_compiled)
+ V(bool, is_compiled) \
+ V(bool, IsUserJavaScript)
class V8_EXPORT_PRIVATE SharedFunctionInfoRef : public HeapObjectRef {
public:
@@ -791,7 +793,7 @@ class V8_EXPORT_PRIVATE SharedFunctionInfoRef : public HeapObjectRef {
// wraps the retrieval of the template object and creates it if
// necessary.
JSArrayRef GetTemplateObject(
- ObjectRef description, FeedbackVectorRef vector, FeedbackSlot slot,
+ TemplateObjectDescriptionRef description, FeedbackSource const& source,
SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
void SerializeFunctionTemplateInfo();
@@ -826,7 +828,7 @@ class JSTypedArrayRef : public JSObjectRef {
bool is_on_heap() const;
size_t length() const;
- void* external_pointer() const;
+ void* data_ptr() const;
void Serialize();
bool serialized() const;
@@ -845,6 +847,13 @@ class SourceTextModuleRef : public HeapObjectRef {
base::Optional<CellRef> GetCell(int cell_index) const;
};
+class TemplateObjectDescriptionRef : public HeapObjectRef {
+ public:
+ DEFINE_REF_CONSTRUCTOR(TemplateObjectDescription, HeapObjectRef)
+
+ Handle<TemplateObjectDescription> object() const;
+};
+
class CellRef : public HeapObjectRef {
public:
DEFINE_REF_CONSTRUCTOR(Cell, HeapObjectRef)
@@ -859,13 +868,8 @@ class JSGlobalObjectRef : public JSObjectRef {
DEFINE_REF_CONSTRUCTOR(JSGlobalObject, JSObjectRef)
Handle<JSGlobalObject> object() const;
-};
-
-class JSGlobalProxyRef : public JSObjectRef {
- public:
- DEFINE_REF_CONSTRUCTOR(JSGlobalProxy, JSObjectRef)
- Handle<JSGlobalProxy> object() const;
+ bool IsDetached() const;
// If {serialize} is false:
// If the property is known to exist as a property cell (on the global
@@ -879,6 +883,13 @@ class JSGlobalProxyRef : public JSObjectRef {
SerializationPolicy::kAssumeSerialized) const;
};
+class JSGlobalProxyRef : public JSObjectRef {
+ public:
+ DEFINE_REF_CONSTRUCTOR(JSGlobalProxy, JSObjectRef)
+
+ Handle<JSGlobalProxy> object() const;
+};
+
class CodeRef : public HeapObjectRef {
public:
DEFINE_REF_CONSTRUCTOR(Code, HeapObjectRef)
diff --git a/deps/v8/src/compiler/int64-lowering.h b/deps/v8/src/compiler/int64-lowering.h
index 1e2a36089b..0190d3a9c4 100644
--- a/deps/v8/src/compiler/int64-lowering.h
+++ b/deps/v8/src/compiler/int64-lowering.h
@@ -5,6 +5,8 @@
#ifndef V8_COMPILER_INT64_LOWERING_H_
#define V8_COMPILER_INT64_LOWERING_H_
+#include <memory>
+
#include "src/common/globals.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index 0b7b4a65f4..d400fa2673 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -473,10 +473,10 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) {
if (receiver_map.NumberOfOwnDescriptors() < minimum_nof_descriptors) {
return inference.NoChange();
}
- if (!receiver_map.serialized_own_descriptor(
- JSFunction::kLengthDescriptorIndex) ||
- !receiver_map.serialized_own_descriptor(
- JSFunction::kNameDescriptorIndex)) {
+ const InternalIndex kLengthIndex(JSFunction::kLengthDescriptorIndex);
+ const InternalIndex kNameIndex(JSFunction::kNameDescriptorIndex);
+ if (!receiver_map.serialized_own_descriptor(kLengthIndex) ||
+ !receiver_map.serialized_own_descriptor(kNameIndex)) {
TRACE_BROKER_MISSING(broker(),
"serialized descriptors on map " << receiver_map);
return inference.NoChange();
@@ -485,14 +485,10 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) {
StringRef length_string(broker(), roots.length_string_handle());
StringRef name_string(broker(), roots.name_string_handle());
- if (!receiver_map.GetPropertyKey(JSFunction::kLengthDescriptorIndex)
- .equals(length_string) ||
- !receiver_map.GetStrongValue(JSFunction::kLengthDescriptorIndex)
- .IsAccessorInfo() ||
- !receiver_map.GetPropertyKey(JSFunction::kNameDescriptorIndex)
- .equals(name_string) ||
- !receiver_map.GetStrongValue(JSFunction::kNameDescriptorIndex)
- .IsAccessorInfo()) {
+ if (!receiver_map.GetPropertyKey(kLengthIndex).equals(length_string) ||
+ !receiver_map.GetStrongValue(kLengthIndex).IsAccessorInfo() ||
+ !receiver_map.GetPropertyKey(kNameIndex).equals(name_string) ||
+ !receiver_map.GetStrongValue(kNameIndex).IsAccessorInfo()) {
return inference.NoChange();
}
}
@@ -3013,12 +3009,13 @@ Reduction JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpread(
node->opcode() == IrOpcode::kJSConstructWithArrayLike ||
node->opcode() == IrOpcode::kJSConstructWithSpread);
- // Check if {arguments_list} is an arguments object, and {node} is the only
- // value user of {arguments_list} (except for value uses in frame states).
Node* arguments_list = NodeProperties::GetValueInput(node, arity);
if (arguments_list->opcode() != IrOpcode::kJSCreateArguments) {
return NoChange();
}
+
+ // Check if {node} is the only value user of {arguments_list} (except for
+ // value uses in frame states). If not, we give up for now.
for (Edge edge : arguments_list->use_edges()) {
if (!NodeProperties::IsValueEdge(edge)) continue;
Node* const user = edge.from();
@@ -3704,7 +3701,7 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
case Builtins::kMapIteratorPrototypeNext:
return ReduceCollectionIteratorPrototypeNext(
node, OrderedHashMap::kEntrySize, factory()->empty_ordered_hash_map(),
- FIRST_MAP_ITERATOR_TYPE, LAST_MAP_ITERATOR_TYPE);
+ FIRST_JS_MAP_ITERATOR_TYPE, LAST_JS_MAP_ITERATOR_TYPE);
case Builtins::kSetPrototypeEntries:
return ReduceCollectionIteration(node, CollectionKind::kSet,
IterationKind::kEntries);
@@ -3716,7 +3713,7 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
case Builtins::kSetIteratorPrototypeNext:
return ReduceCollectionIteratorPrototypeNext(
node, OrderedHashSet::kEntrySize, factory()->empty_ordered_hash_set(),
- FIRST_SET_ITERATOR_TYPE, LAST_SET_ITERATOR_TYPE);
+ FIRST_JS_SET_ITERATOR_TYPE, LAST_JS_SET_ITERATOR_TYPE);
case Builtins::kDatePrototypeGetTime:
return ReduceDatePrototypeGetTime(node);
case Builtins::kDateNow:
@@ -5676,8 +5673,6 @@ Reduction JSCallReducer::ReducePromiseConstructor(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- if (!FLAG_experimental_inline_promise_constructor) return NoChange();
-
// Only handle builtins Promises, not subclasses.
if (target != new_target) return NoChange();
diff --git a/deps/v8/src/compiler/js-context-specialization.cc b/deps/v8/src/compiler/js-context-specialization.cc
index 035e8b7ceb..409fc6c9a1 100644
--- a/deps/v8/src/compiler/js-context-specialization.cc
+++ b/deps/v8/src/compiler/js-context-specialization.cc
@@ -38,7 +38,7 @@ Reduction JSContextSpecialization::ReduceParameter(Node* node) {
// Constant-fold the function parameter {node}.
Handle<JSFunction> function;
if (closure().ToHandle(&function)) {
- Node* value = jsgraph()->HeapConstant(function);
+ Node* value = jsgraph()->Constant(JSFunctionRef(broker_, function));
return Replace(value);
}
}
diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc
index cb52ccaccb..6ab54d793a 100644
--- a/deps/v8/src/compiler/js-create-lowering.cc
+++ b/deps/v8/src/compiler/js-create-lowering.cc
@@ -18,6 +18,7 @@
#include "src/compiler/operator-properties.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/state-values-utils.h"
+#include "src/execution/protectors.h"
#include "src/objects/arguments.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/heap-number.h"
@@ -26,6 +27,7 @@
#include "src/objects/js-promise.h"
#include "src/objects/js-regexp-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/template-objects.h"
namespace v8 {
namespace internal {
@@ -84,6 +86,8 @@ Reduction JSCreateLowering::Reduce(Node* node) {
return ReduceJSCreateLiteralArrayOrObject(node);
case IrOpcode::kJSCreateLiteralRegExp:
return ReduceJSCreateLiteralRegExp(node);
+ case IrOpcode::kJSGetTemplateObject:
+ return ReduceJSGetTemplateObject(node);
case IrOpcode::kJSCreateEmptyLiteralArray:
return ReduceJSCreateEmptyLiteralArray(node);
case IrOpcode::kJSCreateEmptyLiteralObject:
@@ -640,10 +644,10 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
allocation = dependencies()->DependOnPretenureMode(*site_ref);
dependencies()->DependOnElementsKind(*site_ref);
} else {
- CellRef array_constructor_protector(
+ PropertyCellRef array_constructor_protector(
broker(), factory()->array_constructor_protector());
- can_inline_call =
- array_constructor_protector.value().AsSmi() == Isolate::kProtectorValid;
+ can_inline_call = array_constructor_protector.value().AsSmi() ==
+ Protectors::kProtectorValid;
}
if (arity == 0) {
@@ -1073,15 +1077,10 @@ Reduction JSCreateLowering::ReduceJSCreateLiteralArrayOrObject(Node* node) {
CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
-
- FeedbackVectorRef feedback_vector(broker(), p.feedback().vector);
- ObjectRef feedback = feedback_vector.get(p.feedback().slot);
- // TODO(turbofan): we should consider creating a ProcessedFeedback for
- // allocation sites/boiler plates so that we use GetFeedback here. Then
- // we can eventually get rid of the additional copy of feedback slots that
- // we currently have in FeedbackVectorData.
- if (feedback.IsAllocationSite()) {
- AllocationSiteRef site = feedback.AsAllocationSite();
+ ProcessedFeedback const& feedback =
+ broker()->GetFeedbackForArrayOrObjectLiteral(p.feedback());
+ if (!feedback.IsInsufficient()) {
+ AllocationSiteRef site = feedback.AsLiteral().value();
if (site.IsFastLiteral()) {
AllocationType allocation = AllocationType::kYoung;
if (FLAG_allocation_site_pretenuring) {
@@ -1095,20 +1094,17 @@ Reduction JSCreateLowering::ReduceJSCreateLiteralArrayOrObject(Node* node) {
return Replace(value);
}
}
+
return NoChange();
}
Reduction JSCreateLowering::ReduceJSCreateEmptyLiteralArray(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateEmptyLiteralArray, node->opcode());
FeedbackParameter const& p = FeedbackParameterOf(node->op());
- FeedbackVectorRef fv(broker(), p.feedback().vector);
- ObjectRef feedback = fv.get(p.feedback().slot);
- // TODO(turbofan): we should consider creating a ProcessedFeedback for
- // allocation sites/boiler plates so that we use GetFeedback here. Then
- // we can eventually get rid of the additional copy of feedback slots that
- // we currently have in FeedbackVectorData.
- if (feedback.IsAllocationSite()) {
- AllocationSiteRef site = feedback.AsAllocationSite();
+ ProcessedFeedback const& feedback =
+ broker()->GetFeedbackForArrayOrObjectLiteral(p.feedback());
+ if (!feedback.IsInsufficient()) {
+ AllocationSiteRef site = feedback.AsLiteral().value();
DCHECK(!site.PointsToLiteral());
MapRef initial_map =
native_context().GetInitialJSArrayMap(site.GetElementsKind());
@@ -1162,22 +1158,30 @@ Reduction JSCreateLowering::ReduceJSCreateLiteralRegExp(Node* node) {
CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
-
- FeedbackVectorRef feedback_vector(broker(), p.feedback().vector);
- ObjectRef feedback = feedback_vector.get(p.feedback().slot);
- // TODO(turbofan): we should consider creating a ProcessedFeedback for
- // allocation sites/boiler plates so that we use GetFeedback here. Then
- // we can eventually get rid of the additional copy of feedback slots that
- // we currently have in FeedbackVectorData.
- if (feedback.IsJSRegExp()) {
- JSRegExpRef boilerplate = feedback.AsJSRegExp();
- Node* value = effect = AllocateLiteralRegExp(effect, control, boilerplate);
+ ProcessedFeedback const& feedback =
+ broker()->GetFeedbackForRegExpLiteral(p.feedback());
+ if (!feedback.IsInsufficient()) {
+ JSRegExpRef literal = feedback.AsRegExpLiteral().value();
+ Node* value = effect = AllocateLiteralRegExp(effect, control, literal);
ReplaceWithValue(node, value, effect, control);
return Replace(value);
}
return NoChange();
}
+Reduction JSCreateLowering::ReduceJSGetTemplateObject(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSGetTemplateObject, node->opcode());
+ GetTemplateObjectParameters const& parameters =
+ GetTemplateObjectParametersOf(node->op());
+ SharedFunctionInfoRef shared(broker(), parameters.shared());
+ JSArrayRef template_object = shared.GetTemplateObject(
+ TemplateObjectDescriptionRef(broker(), parameters.description()),
+ parameters.feedback());
+ Node* value = jsgraph()->Constant(template_object);
+ ReplaceWithValue(node, value);
+ return Replace(value);
+}
+
Reduction JSCreateLowering::ReduceJSCreateFunctionContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateFunctionContext, node->opcode());
const CreateFunctionContextParameters& parameters =
@@ -1628,7 +1632,7 @@ Node* JSCreateLowering::AllocateFastLiteral(Node* effect, Node* control,
ZoneVector<std::pair<FieldAccess, Node*>> inobject_fields(zone());
inobject_fields.reserve(boilerplate_map.GetInObjectProperties());
int const boilerplate_nof = boilerplate_map.NumberOfOwnDescriptors();
- for (int i = 0; i < boilerplate_nof; ++i) {
+ for (InternalIndex i : InternalIndex::Range(boilerplate_nof)) {
PropertyDetails const property_details =
boilerplate_map.GetPropertyDetails(i);
if (property_details.location() != kField) continue;
diff --git a/deps/v8/src/compiler/js-create-lowering.h b/deps/v8/src/compiler/js-create-lowering.h
index 44a3b213b7..2fb28ebfd4 100644
--- a/deps/v8/src/compiler/js-create-lowering.h
+++ b/deps/v8/src/compiler/js-create-lowering.h
@@ -67,6 +67,7 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
Reduction ReduceJSCreateCatchContext(Node* node);
Reduction ReduceJSCreateBlockContext(Node* node);
Reduction ReduceJSCreateGeneratorObject(Node* node);
+ Reduction ReduceJSGetTemplateObject(Node* node);
Reduction ReduceNewArray(
Node* node, Node* length, MapRef initial_map, ElementsKind elements_kind,
AllocationType allocation,
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index d2a9b675f9..d419a804a5 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -236,14 +236,15 @@ void JSGenericLowering::LowerJSLoadGlobal(Node* node) {
}
void JSGenericLowering::LowerJSGetIterator(Node* node) {
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- const PropertyAccess& p = PropertyAccessOf(node->op());
- node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.feedback().index()));
- Node* vector = jsgraph()->HeapConstant(p.feedback().vector);
- node->InsertInput(zone(), 2, vector);
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kGetIteratorWithFeedback);
- ReplaceWithStubCall(node, callable, flags);
+ // TODO(v8:9625): Currently, the GetIterator operator is desugared in the
+ // native context specialization phase. Thus, the following generic lowering
+ // would never be reachable. We can add a check in native context
+ // specialization to avoid desugaring the GetIterator operator when in the
+ // case of megamorphic feedback and here, add a call to the
+ // 'GetIteratorWithFeedback' builtin. This would reduce the size of the
+ // compiled code as it would insert 1 call to the builtin instead of 2 calls
+ // resulting from the generic lowering of the LoadNamed and Call operators.
+ UNREACHABLE();
}
void JSGenericLowering::LowerJSStoreProperty(Node* node) {
@@ -561,6 +562,10 @@ void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) {
}
}
+void JSGenericLowering::LowerJSGetTemplateObject(Node* node) {
+ UNREACHABLE(); // Eliminated in native context specialization.
+}
+
void JSGenericLowering::LowerJSCreateEmptyLiteralArray(Node* node) {
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
FeedbackParameter const& p = FeedbackParameterOf(node->op());
diff --git a/deps/v8/src/compiler/js-heap-broker.cc b/deps/v8/src/compiler/js-heap-broker.cc
index 05048f7f4b..9a725eb4e9 100644
--- a/deps/v8/src/compiler/js-heap-broker.cc
+++ b/deps/v8/src/compiler/js-heap-broker.cc
@@ -16,6 +16,7 @@
#include "src/compiler/bytecode-analysis.h"
#include "src/compiler/graph-reducer.h"
#include "src/compiler/per-isolate-compiler-cache.h"
+#include "src/execution/protectors-inl.h"
#include "src/init/bootstrapper.h"
#include "src/objects/allocation-site-inl.h"
#include "src/objects/api-callbacks.h"
@@ -86,6 +87,11 @@ class ObjectData : public ZoneObject {
ObjectDataKind kind() const { return kind_; }
bool is_smi() const { return kind_ == kSmi; }
+#ifdef DEBUG
+ enum class Usage{kUnused, kOnlyIdentityUsed, kDataUsed};
+ mutable Usage used_status = Usage::kUnused;
+#endif // DEBUG
+
private:
Handle<Object> const object_;
ObjectDataKind const kind_;
@@ -420,7 +426,7 @@ class JSTypedArrayData : public JSObjectData {
bool is_on_heap() const { return is_on_heap_; }
size_t length() const { return length_; }
- void* external_pointer() const { return external_pointer_; }
+ void* data_ptr() const { return data_ptr_; }
void Serialize(JSHeapBroker* broker);
bool serialized() const { return serialized_; }
@@ -430,7 +436,7 @@ class JSTypedArrayData : public JSObjectData {
private:
bool const is_on_heap_;
size_t const length_;
- void* const external_pointer_;
+ void* const data_ptr_;
bool serialized_ = false;
HeapObjectData* buffer_ = nullptr;
@@ -441,7 +447,7 @@ JSTypedArrayData::JSTypedArrayData(JSHeapBroker* broker, ObjectData** storage,
: JSObjectData(broker, storage, object),
is_on_heap_(object->is_on_heap()),
length_(object->length()),
- external_pointer_(object->external_pointer()) {}
+ data_ptr_(object->DataPtr()) {}
void JSTypedArrayData::Serialize(JSHeapBroker* broker) {
if (serialized_) return;
@@ -833,8 +839,7 @@ bool IsFastLiteralHelper(Handle<JSObject> boilerplate, int max_depth,
// Check the in-object properties.
Handle<DescriptorArray> descriptors(boilerplate->map().instance_descriptors(),
isolate);
- int limit = boilerplate->map().NumberOfOwnDescriptors();
- for (int i = 0; i < limit; i++) {
+ for (InternalIndex i : boilerplate->map().IterateOwnDescriptors()) {
PropertyDetails details = descriptors->GetDetails(i);
if (details.location() != kField) continue;
DCHECK_EQ(kData, details.kind());
@@ -962,9 +967,6 @@ class MapData : public HeapObjectData {
bool supports_fast_array_resize() const {
return supports_fast_array_resize_;
}
- bool IsMapOfTargetGlobalProxy() const {
- return is_map_of_target_global_proxy_;
- }
bool is_abandoned_prototype_map() const {
return is_abandoned_prototype_map_;
}
@@ -979,9 +981,10 @@ class MapData : public HeapObjectData {
// Serialize a single (or all) own slot(s) of the descriptor array and recurse
// on field owner(s).
- void SerializeOwnDescriptor(JSHeapBroker* broker, int descriptor_index);
+ void SerializeOwnDescriptor(JSHeapBroker* broker,
+ InternalIndex descriptor_index);
void SerializeOwnDescriptors(JSHeapBroker* broker);
- ObjectData* GetStrongValue(int descriptor_index) const;
+ ObjectData* GetStrongValue(InternalIndex descriptor_index) const;
DescriptorArrayData* instance_descriptors() const {
return instance_descriptors_;
}
@@ -1027,7 +1030,6 @@ class MapData : public HeapObjectData {
int const unused_property_fields_;
bool const supports_fast_array_iteration_;
bool const supports_fast_array_resize_;
- bool const is_map_of_target_global_proxy_;
bool const is_abandoned_prototype_map_;
bool serialized_elements_kind_generalizations_ = false;
@@ -1109,8 +1111,9 @@ bool IsReadOnlyLengthDescriptor(Isolate* isolate, Handle<Map> jsarray_map) {
DCHECK(!jsarray_map->is_dictionary_map());
Handle<Name> length_string = isolate->factory()->length_string();
DescriptorArray descriptors = jsarray_map->instance_descriptors();
- int number = descriptors.Search(*length_string, *jsarray_map);
- DCHECK_NE(DescriptorArray::kNotFound, number);
+ // TODO(jkummerow): We could skip the search and hardcode number == 0.
+ InternalIndex number = descriptors.Search(*length_string, *jsarray_map);
+ DCHECK(number.is_found());
return descriptors.GetDetails(number).IsReadOnly();
}
@@ -1120,7 +1123,7 @@ bool SupportsFastArrayIteration(Isolate* isolate, Handle<Map> map) {
map->prototype().IsJSArray() &&
isolate->IsAnyInitialArrayPrototype(
handle(JSArray::cast(map->prototype()), isolate)) &&
- isolate->IsNoElementsProtectorIntact();
+ Protectors::IsNoElementsIntact(isolate);
}
bool SupportsFastArrayResize(Isolate* isolate, Handle<Map> map) {
@@ -1154,8 +1157,6 @@ MapData::MapData(JSHeapBroker* broker, ObjectData** storage, Handle<Map> object)
SupportsFastArrayIteration(broker->isolate(), object)),
supports_fast_array_resize_(
SupportsFastArrayResize(broker->isolate(), object)),
- is_map_of_target_global_proxy_(
- object->IsMapOfGlobalProxy(broker->target_native_context().object())),
is_abandoned_prototype_map_(object->is_abandoned_prototype_map()),
elements_kind_generalizations_(broker->zone()) {}
@@ -1268,7 +1269,6 @@ class FeedbackVectorData : public HeapObjectData {
double invocation_count() const { return invocation_count_; }
void Serialize(JSHeapBroker* broker);
- const ZoneVector<ObjectData*>& feedback() { return feedback_; }
FeedbackCellData* GetClosureFeedbackCell(JSHeapBroker* broker,
int index) const;
@@ -1276,7 +1276,6 @@ class FeedbackVectorData : public HeapObjectData {
double const invocation_count_;
bool serialized_ = false;
- ZoneVector<ObjectData*> feedback_;
ZoneVector<ObjectData*> closure_feedback_cell_array_;
};
@@ -1285,7 +1284,6 @@ FeedbackVectorData::FeedbackVectorData(JSHeapBroker* broker,
Handle<FeedbackVector> object)
: HeapObjectData(broker, storage, object),
invocation_count_(object->invocation_count()),
- feedback_(broker->zone()),
closure_feedback_cell_array_(broker->zone()) {}
FeedbackCellData* FeedbackVectorData::GetClosureFeedbackCell(
@@ -1309,26 +1307,6 @@ void FeedbackVectorData::Serialize(JSHeapBroker* broker) {
TraceScope tracer(broker, this, "FeedbackVectorData::Serialize");
Handle<FeedbackVector> vector = Handle<FeedbackVector>::cast(object());
- DCHECK(feedback_.empty());
- feedback_.reserve(vector->length());
- for (int i = 0; i < vector->length(); ++i) {
- MaybeObject value = vector->get(i);
- ObjectData* slot_value =
- value->IsObject() ? broker->GetOrCreateData(value->cast<Object>())
- : nullptr;
- feedback_.push_back(slot_value);
- if (slot_value == nullptr) continue;
-
- if (slot_value->IsAllocationSite() &&
- slot_value->AsAllocationSite()->IsFastLiteral()) {
- slot_value->AsAllocationSite()->SerializeBoilerplate(broker);
- } else if (slot_value->IsJSRegExp()) {
- slot_value->AsJSRegExp()->SerializeAsRegExpBoilerplate(broker);
- }
- }
- DCHECK_EQ(vector->length(), feedback_.size());
- TRACE(broker, "Copied " << feedback_.size() << " slots");
-
DCHECK(closure_feedback_cell_array_.empty());
int length = vector->closure_feedback_cell_array().length();
closure_feedback_cell_array_.reserve(length);
@@ -1496,10 +1474,6 @@ class BytecodeArrayData : public FixedArrayBaseData {
return *(Handle<Smi>::cast(constant_pool_[index]->object()));
}
- bool IsSerializedForCompilation() const {
- return is_serialized_for_compilation_;
- }
-
void SerializeForCompilation(JSHeapBroker* broker) {
if (is_serialized_for_compilation_) return;
@@ -1843,23 +1817,15 @@ class JSGlobalObjectData : public JSObjectData {
public:
JSGlobalObjectData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSGlobalObject> object);
-};
-
-JSGlobalObjectData::JSGlobalObjectData(JSHeapBroker* broker,
- ObjectData** storage,
- Handle<JSGlobalObject> object)
- : JSObjectData(broker, storage, object) {}
-
-class JSGlobalProxyData : public JSObjectData {
- public:
- JSGlobalProxyData(JSHeapBroker* broker, ObjectData** storage,
- Handle<JSGlobalProxy> object);
+ bool IsDetached() const { return is_detached_; }
PropertyCellData* GetPropertyCell(
JSHeapBroker* broker, NameData* name,
SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
private:
+ bool const is_detached_;
+
// Properties that either
// (1) are known to exist as property cells on the global object, or
// (2) are known not to (possibly they don't exist at all).
@@ -1867,9 +1833,22 @@ class JSGlobalProxyData : public JSObjectData {
ZoneVector<std::pair<NameData*, PropertyCellData*>> properties_;
};
+JSGlobalObjectData::JSGlobalObjectData(JSHeapBroker* broker,
+ ObjectData** storage,
+ Handle<JSGlobalObject> object)
+ : JSObjectData(broker, storage, object),
+ is_detached_(object->IsDetached()),
+ properties_(broker->zone()) {}
+
+class JSGlobalProxyData : public JSObjectData {
+ public:
+ JSGlobalProxyData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<JSGlobalProxy> object);
+};
+
JSGlobalProxyData::JSGlobalProxyData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSGlobalProxy> object)
- : JSObjectData(broker, storage, object), properties_(broker->zone()) {}
+ : JSObjectData(broker, storage, object) {}
namespace {
base::Optional<PropertyCellRef> GetPropertyCellFromHeap(JSHeapBroker* broker,
@@ -1888,7 +1867,7 @@ base::Optional<PropertyCellRef> GetPropertyCellFromHeap(JSHeapBroker* broker,
}
} // namespace
-PropertyCellData* JSGlobalProxyData::GetPropertyCell(
+PropertyCellData* JSGlobalObjectData::GetPropertyCell(
JSHeapBroker* broker, NameData* name, SerializationPolicy policy) {
CHECK_NOT_NULL(name);
for (auto const& p : properties_) {
@@ -1911,6 +1890,13 @@ PropertyCellData* JSGlobalProxyData::GetPropertyCell(
return result;
}
+class TemplateObjectDescriptionData : public HeapObjectData {
+ public:
+ TemplateObjectDescriptionData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<TemplateObjectDescription> object)
+ : HeapObjectData(broker, storage, object) {}
+};
+
class CodeData : public HeapObjectData {
public:
CodeData(JSHeapBroker* broker, ObjectData** storage, Handle<Code> object)
@@ -2001,20 +1987,20 @@ void MapData::SerializeOwnDescriptors(JSHeapBroker* broker) {
Handle<Map> map = Handle<Map>::cast(object());
int const number_of_own = map->NumberOfOwnDescriptors();
- for (int i = 0; i < number_of_own; ++i) {
+ for (InternalIndex i : InternalIndex::Range(number_of_own)) {
SerializeOwnDescriptor(broker, i);
}
}
-ObjectData* MapData::GetStrongValue(int descriptor_index) const {
- auto data = instance_descriptors_->contents().find(descriptor_index);
+ObjectData* MapData::GetStrongValue(InternalIndex descriptor_index) const {
+ auto data = instance_descriptors_->contents().find(descriptor_index.as_int());
if (data == instance_descriptors_->contents().end()) return nullptr;
return data->second.value;
}
void MapData::SerializeOwnDescriptor(JSHeapBroker* broker,
- int descriptor_index) {
+ InternalIndex descriptor_index) {
TraceScope tracer(broker, this, "MapData::SerializeOwnDescriptor");
Handle<Map> map = Handle<Map>::cast(object());
@@ -2025,8 +2011,8 @@ void MapData::SerializeOwnDescriptor(JSHeapBroker* broker,
ZoneMap<int, PropertyDescriptor>& contents =
instance_descriptors()->contents();
- CHECK_LT(descriptor_index, map->NumberOfOwnDescriptors());
- if (contents.find(descriptor_index) != contents.end()) return;
+ CHECK_LT(descriptor_index.as_int(), map->NumberOfOwnDescriptors());
+ if (contents.find(descriptor_index.as_int()) != contents.end()) return;
Isolate* const isolate = broker->isolate();
auto descriptors =
@@ -2051,14 +2037,14 @@ void MapData::SerializeOwnDescriptor(JSHeapBroker* broker,
broker->GetOrCreateData(descriptors->GetFieldType(descriptor_index));
d.is_unboxed_double_field = map->IsUnboxedDoubleField(d.field_index);
}
- contents[descriptor_index] = d;
+ contents[descriptor_index.as_int()] = d;
if (d.details.location() == kField) {
// Recurse on the owner map.
d.field_owner->SerializeOwnDescriptor(broker, descriptor_index);
}
- TRACE(broker, "Copied descriptor " << descriptor_index << " into "
+ TRACE(broker, "Copied descriptor " << descriptor_index.as_int() << " into "
<< instance_descriptors_ << " ("
<< contents.size() << " total)");
}
@@ -2146,8 +2132,7 @@ void JSObjectData::SerializeRecursiveAsBoilerplate(JSHeapBroker* broker,
// Check the in-object properties.
Handle<DescriptorArray> descriptors(boilerplate->map().instance_descriptors(),
isolate);
- int const limit = boilerplate->map().NumberOfOwnDescriptors();
- for (int i = 0; i < limit; i++) {
+ for (InternalIndex i : boilerplate->map().IterateOwnDescriptors()) {
PropertyDetails details = descriptors->GetDetails(i);
if (details.location() != kField) continue;
DCHECK_EQ(kData, details.kind());
@@ -2210,6 +2195,12 @@ void JSRegExpData::SerializeAsRegExpBoilerplate(JSHeapBroker* broker) {
}
bool ObjectRef::equals(const ObjectRef& other) const {
+#ifdef DEBUG
+ if (broker()->mode() == JSHeapBroker::kSerialized &&
+ data_->used_status == ObjectData::Usage::kUnused) {
+ data_->used_status = ObjectData::Usage::kOnlyIdentityUsed;
+ }
+#endif // DEBUG
return data_ == other.data_;
}
@@ -2269,7 +2260,7 @@ JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone,
TRACE(this, "Constructing heap broker");
}
-std::ostream& JSHeapBroker::Trace() {
+std::ostream& JSHeapBroker::Trace() const {
return trace_out_ << "[" << this << "] "
<< std::string(trace_indentation_ * 2, ' ');
}
@@ -2280,10 +2271,92 @@ void JSHeapBroker::StopSerializing() {
mode_ = kSerialized;
}
+#ifdef DEBUG
+void JSHeapBroker::PrintRefsAnalysis() const {
+ // Usage counts
+ size_t used_total = 0, unused_total = 0, identity_used_total = 0;
+ for (RefsMap::Entry* ref = refs_->Start(); ref != nullptr;
+ ref = refs_->Next(ref)) {
+ switch (ref->value->used_status) {
+ case ObjectData::Usage::kUnused:
+ ++unused_total;
+ break;
+ case ObjectData::Usage::kOnlyIdentityUsed:
+ ++identity_used_total;
+ break;
+ case ObjectData::Usage::kDataUsed:
+ ++used_total;
+ break;
+ }
+ }
+
+ // Ref types analysis
+ TRACE_BROKER_MEMORY(
+ this, "Refs: " << refs_->occupancy() << "; data used: " << used_total
+ << "; only identity used: " << identity_used_total
+ << "; unused: " << unused_total);
+ size_t used_smis = 0, unused_smis = 0, identity_used_smis = 0;
+ size_t used[LAST_TYPE + 1] = {0};
+ size_t unused[LAST_TYPE + 1] = {0};
+ size_t identity_used[LAST_TYPE + 1] = {0};
+ for (RefsMap::Entry* ref = refs_->Start(); ref != nullptr;
+ ref = refs_->Next(ref)) {
+ if (ref->value->is_smi()) {
+ switch (ref->value->used_status) {
+ case ObjectData::Usage::kUnused:
+ ++unused_smis;
+ break;
+ case ObjectData::Usage::kOnlyIdentityUsed:
+ ++identity_used_smis;
+ break;
+ case ObjectData::Usage::kDataUsed:
+ ++used_smis;
+ break;
+ }
+ } else {
+ InstanceType instance_type =
+ static_cast<const HeapObjectData*>(ref->value)
+ ->map()
+ ->instance_type();
+ CHECK_LE(FIRST_TYPE, instance_type);
+ CHECK_LE(instance_type, LAST_TYPE);
+ switch (ref->value->used_status) {
+ case ObjectData::Usage::kUnused:
+ ++unused[instance_type];
+ break;
+ case ObjectData::Usage::kOnlyIdentityUsed:
+ ++identity_used[instance_type];
+ break;
+ case ObjectData::Usage::kDataUsed:
+ ++used[instance_type];
+ break;
+ }
+ }
+ }
+
+ TRACE_BROKER_MEMORY(
+ this, "Smis: " << used_smis + identity_used_smis + unused_smis
+ << "; data used: " << used_smis << "; only identity used: "
+ << identity_used_smis << "; unused: " << unused_smis);
+ for (uint16_t i = FIRST_TYPE; i <= LAST_TYPE; ++i) {
+ size_t total = used[i] + identity_used[i] + unused[i];
+ if (total == 0) continue;
+ TRACE_BROKER_MEMORY(
+ this, InstanceType(i) << ": " << total << "; data used: " << used[i]
+ << "; only identity used: " << identity_used[i]
+ << "; unused: " << unused[i]);
+ }
+}
+#endif // DEBUG
+
void JSHeapBroker::Retire() {
CHECK_EQ(mode_, kSerialized);
TRACE(this, "Retiring");
mode_ = kRetired;
+
+#ifdef DEBUG
+ PrintRefsAnalysis();
+#endif // DEBUG
}
bool JSHeapBroker::SerializingAllowed() const { return mode() == kSerializing; }
@@ -2473,6 +2546,7 @@ void JSHeapBroker::InitializeAndStartSerializing(
GetOrCreateData(f->empty_fixed_array());
GetOrCreateData(f->empty_string());
GetOrCreateData(f->eval_context_map());
+ GetOrCreateData(f->exec_string());
GetOrCreateData(f->false_string());
GetOrCreateData(f->false_value());
GetOrCreateData(f->fixed_array_map());
@@ -2480,11 +2554,13 @@ void JSHeapBroker::InitializeAndStartSerializing(
GetOrCreateData(f->fixed_double_array_map());
GetOrCreateData(f->function_context_map());
GetOrCreateData(f->function_string());
+ GetOrCreateData(f->has_instance_symbol());
GetOrCreateData(f->heap_number_map());
GetOrCreateData(f->length_string());
GetOrCreateData(f->many_closures_cell_map());
GetOrCreateData(f->minus_zero_value());
GetOrCreateData(f->name_dictionary_map());
+ GetOrCreateData(f->name_string());
GetOrCreateData(f->NaN_string());
GetOrCreateData(f->null_map());
GetOrCreateData(f->null_string());
@@ -2495,6 +2571,7 @@ void JSHeapBroker::InitializeAndStartSerializing(
GetOrCreateData(f->optimized_out());
GetOrCreateData(f->optimized_out_map());
GetOrCreateData(f->property_array_map());
+ GetOrCreateData(f->prototype_string());
GetOrCreateData(f->ReflectHas_string());
GetOrCreateData(f->ReflectGet_string());
GetOrCreateData(f->sloppy_arguments_elements_map());
@@ -2505,6 +2582,7 @@ void JSHeapBroker::InitializeAndStartSerializing(
GetOrCreateData(f->termination_exception_map());
GetOrCreateData(f->the_hole_map());
GetOrCreateData(f->the_hole_value());
+ GetOrCreateData(f->then_string());
GetOrCreateData(f->true_string());
GetOrCreateData(f->true_value());
GetOrCreateData(f->undefined_map());
@@ -2517,7 +2595,9 @@ void JSHeapBroker::InitializeAndStartSerializing(
GetOrCreateData(f->array_buffer_detaching_protector())
->AsPropertyCell()
->Serialize(this);
- GetOrCreateData(f->array_constructor_protector())->AsCell()->Serialize(this);
+ GetOrCreateData(f->array_constructor_protector())
+ ->AsPropertyCell()
+ ->Serialize(this);
GetOrCreateData(f->array_iterator_protector())
->AsPropertyCell()
->Serialize(this);
@@ -2537,7 +2617,9 @@ void JSHeapBroker::InitializeAndStartSerializing(
GetOrCreateData(f->promise_then_protector())
->AsPropertyCell()
->Serialize(this);
- GetOrCreateData(f->string_length_protector())->AsCell()->Serialize(this);
+ GetOrCreateData(f->string_length_protector())
+ ->AsPropertyCell()
+ ->Serialize(this);
// - CEntry stub
GetOrCreateData(
CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs, kArgvOnStack, true));
@@ -2719,16 +2801,6 @@ bool MapRef::supports_fast_array_resize() const {
return data()->AsMap()->supports_fast_array_resize();
}
-bool MapRef::IsMapOfTargetGlobalProxy() const {
- if (broker()->mode() == JSHeapBroker::kDisabled) {
- AllowHandleDereference allow_handle_dereference;
- AllowHandleAllocation handle_allocation;
- return object()->IsMapOfGlobalProxy(
- broker()->target_native_context().object());
- }
- return data()->AsMap()->IsMapOfTargetGlobalProxy();
-}
-
int JSFunctionRef::InitialMapInstanceSizeWithMinSlack() const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleDereference allow_handle_dereference;
@@ -2785,18 +2857,6 @@ OddballType MapRef::oddball_type() const {
return OddballType::kOther;
}
-ObjectRef FeedbackVectorRef::get(FeedbackSlot slot) const {
- if (broker()->mode() == JSHeapBroker::kDisabled) {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference handle_dereference;
- Handle<Object> value(object()->Get(slot)->cast<Object>(),
- broker()->isolate());
- return ObjectRef(broker(), value);
- }
- int i = FeedbackVector::GetIndex(slot);
- return ObjectRef(broker(), data()->AsFeedbackVector()->feedback().at(i));
-}
-
FeedbackCellRef FeedbackVectorRef::GetClosureFeedbackCell(int index) const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleAllocation handle_allocation;
@@ -2854,6 +2914,11 @@ bool AllocationSiteRef::IsFastLiteral() const {
return data()->AsAllocationSite()->IsFastLiteral();
}
+void AllocationSiteRef::SerializeBoilerplate() {
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+ data()->AsAllocationSite()->SerializeBoilerplate(broker());
+}
+
void JSObjectRef::SerializeElements() {
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
data()->AsJSObject()->SerializeElements(broker());
@@ -2880,13 +2945,13 @@ void JSObjectRef::EnsureElementsTenured() {
CHECK(data()->AsJSObject()->cow_or_empty_elements_tenured());
}
-FieldIndex MapRef::GetFieldIndexFor(int descriptor_index) const {
+FieldIndex MapRef::GetFieldIndexFor(InternalIndex descriptor_index) const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleDereference allow_handle_dereference;
return FieldIndex::ForDescriptor(*object(), descriptor_index);
}
DescriptorArrayData* descriptors = data()->AsMap()->instance_descriptors();
- return descriptors->contents().at(descriptor_index).field_index;
+ return descriptors->contents().at(descriptor_index.as_int()).field_index;
}
int MapRef::GetInObjectPropertyOffset(int i) const {
@@ -2897,16 +2962,17 @@ int MapRef::GetInObjectPropertyOffset(int i) const {
return (GetInObjectPropertiesStartInWords() + i) * kTaggedSize;
}
-PropertyDetails MapRef::GetPropertyDetails(int descriptor_index) const {
+PropertyDetails MapRef::GetPropertyDetails(
+ InternalIndex descriptor_index) const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleDereference allow_handle_dereference;
return object()->instance_descriptors().GetDetails(descriptor_index);
}
DescriptorArrayData* descriptors = data()->AsMap()->instance_descriptors();
- return descriptors->contents().at(descriptor_index).details;
+ return descriptors->contents().at(descriptor_index.as_int()).details;
}
-NameRef MapRef::GetPropertyKey(int descriptor_index) const {
+NameRef MapRef::GetPropertyKey(InternalIndex descriptor_index) const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleAllocation handle_allocation;
AllowHandleDereference allow_handle_dereference;
@@ -2916,7 +2982,8 @@ NameRef MapRef::GetPropertyKey(int descriptor_index) const {
broker()->isolate()));
}
DescriptorArrayData* descriptors = data()->AsMap()->instance_descriptors();
- return NameRef(broker(), descriptors->contents().at(descriptor_index).key);
+ return NameRef(broker(),
+ descriptors->contents().at(descriptor_index.as_int()).key);
}
bool MapRef::IsFixedCowArrayMap() const {
@@ -2926,10 +2993,10 @@ bool MapRef::IsFixedCowArrayMap() const {
}
bool MapRef::IsPrimitiveMap() const {
- return instance_type() <= LAST_PRIMITIVE_TYPE;
+ return instance_type() <= LAST_PRIMITIVE_HEAP_OBJECT_TYPE;
}
-MapRef MapRef::FindFieldOwner(int descriptor_index) const {
+MapRef MapRef::FindFieldOwner(InternalIndex descriptor_index) const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleAllocation handle_allocation;
AllowHandleDereference allow_handle_dereference;
@@ -2939,11 +3006,12 @@ MapRef MapRef::FindFieldOwner(int descriptor_index) const {
return MapRef(broker(), owner);
}
DescriptorArrayData* descriptors = data()->AsMap()->instance_descriptors();
- return MapRef(broker(),
- descriptors->contents().at(descriptor_index).field_owner);
+ return MapRef(
+ broker(),
+ descriptors->contents().at(descriptor_index.as_int()).field_owner);
}
-ObjectRef MapRef::GetFieldType(int descriptor_index) const {
+ObjectRef MapRef::GetFieldType(InternalIndex descriptor_index) const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleAllocation handle_allocation;
AllowHandleDereference allow_handle_dereference;
@@ -2953,18 +3021,21 @@ ObjectRef MapRef::GetFieldType(int descriptor_index) const {
return ObjectRef(broker(), field_type);
}
DescriptorArrayData* descriptors = data()->AsMap()->instance_descriptors();
- return ObjectRef(broker(),
- descriptors->contents().at(descriptor_index).field_type);
+ return ObjectRef(
+ broker(),
+ descriptors->contents().at(descriptor_index.as_int()).field_type);
}
-bool MapRef::IsUnboxedDoubleField(int descriptor_index) const {
+bool MapRef::IsUnboxedDoubleField(InternalIndex descriptor_index) const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleDereference allow_handle_dereference;
return object()->IsUnboxedDoubleField(
FieldIndex::ForDescriptor(*object(), descriptor_index));
}
DescriptorArrayData* descriptors = data()->AsMap()->instance_descriptors();
- return descriptors->contents().at(descriptor_index).is_unboxed_double_field;
+ return descriptors->contents()
+ .at(descriptor_index.as_int())
+ .is_unboxed_double_field;
}
uint16_t StringRef::GetFirstChar() {
@@ -3074,11 +3145,6 @@ Smi BytecodeArrayRef::GetConstantAtIndexAsSmi(int index) const {
return data()->AsBytecodeArray()->GetConstantAtIndexAsSmi(index);
}
-bool BytecodeArrayRef::IsSerializedForCompilation() const {
- if (broker()->mode() == JSHeapBroker::kDisabled) return true;
- return data()->AsBytecodeArray()->IsSerializedForCompilation();
-}
-
void BytecodeArrayRef::SerializeForCompilation() {
if (broker()->mode() == JSHeapBroker::kDisabled) return;
data()->AsBytecodeArray()->SerializeForCompilation(broker());
@@ -3191,6 +3257,8 @@ BIMODAL_ACCESSOR(JSFunction, Object, prototype)
BIMODAL_ACCESSOR(JSFunction, SharedFunctionInfo, shared)
BIMODAL_ACCESSOR(JSFunction, FeedbackVector, feedback_vector)
+BIMODAL_ACCESSOR_C(JSGlobalObject, bool, IsDetached)
+
BIMODAL_ACCESSOR_C(JSTypedArray, bool, is_on_heap)
BIMODAL_ACCESSOR_C(JSTypedArray, size_t, length)
BIMODAL_ACCESSOR(JSTypedArray, HeapObject, buffer)
@@ -3345,7 +3413,7 @@ BIMODAL_ACCESSOR_C(String, int, length)
BIMODAL_ACCESSOR(FeedbackCell, HeapObject, value)
-ObjectRef MapRef::GetStrongValue(int descriptor_index) const {
+ObjectRef MapRef::GetStrongValue(InternalIndex descriptor_index) const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleDereference allow_handle_dereference;
return ObjectRef(broker(),
@@ -3376,12 +3444,12 @@ base::Optional<MapRef> MapRef::FindRootMap() const {
return base::nullopt;
}
-void* JSTypedArrayRef::external_pointer() const {
+void* JSTypedArrayRef::data_ptr() const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleDereference allow_handle_dereference;
- return object()->external_pointer();
+ return object()->DataPtr();
}
- return data()->AsJSTypedArray()->external_pointer();
+ return data()->AsJSTypedArray()->data_ptr();
}
bool MapRef::IsInobjectSlackTrackingInProgress() const {
@@ -3774,12 +3842,37 @@ ObjectRef JSRegExpRef::source() const {
return ObjectRef(broker(), ObjectRef::data()->AsJSRegExp()->source());
}
-Handle<Object> ObjectRef::object() const { return data_->object(); }
+void JSRegExpRef::SerializeAsRegExpBoilerplate() {
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+ JSObjectRef::data()->AsJSRegExp()->SerializeAsRegExpBoilerplate(broker());
+}
+Handle<Object> ObjectRef::object() const {
+#ifdef DEBUG
+ if (broker()->mode() == JSHeapBroker::kSerialized &&
+ data_->used_status == ObjectData::Usage::kUnused) {
+ data_->used_status = ObjectData::Usage::kOnlyIdentityUsed;
+ }
+#endif // DEBUG
+ return data_->object();
+}
+
+#ifdef DEBUG
#define DEF_OBJECT_GETTER(T) \
Handle<T> T##Ref::object() const { \
+ if (broker()->mode() == JSHeapBroker::kSerialized && \
+ data_->used_status == ObjectData::Usage::kUnused) { \
+ data_->used_status = ObjectData::Usage::kOnlyIdentityUsed; \
+ } \
return Handle<T>(reinterpret_cast<Address*>(data_->object().address())); \
}
+#else
+#define DEF_OBJECT_GETTER(T) \
+ Handle<T> T##Ref::object() const { \
+ return Handle<T>(reinterpret_cast<Address*>(data_->object().address())); \
+ }
+#endif // DEBUG
+
HEAP_BROKER_OBJECT_LIST(DEF_OBJECT_GETTER)
#undef DEF_OBJECT_GETTER
@@ -3791,7 +3884,12 @@ ObjectData* ObjectRef::data() const {
CHECK_NE(data_->kind(), kSerializedHeapObject);
return data_;
case JSHeapBroker::kSerializing:
+ CHECK_NE(data_->kind(), kUnserializedHeapObject);
+ return data_;
case JSHeapBroker::kSerialized:
+#ifdef DEBUG
+ data_->used_status = ObjectData::Usage::kDataUsed;
+#endif // DEBUG
CHECK_NE(data_->kind(), kUnserializedHeapObject);
return data_;
case JSHeapBroker::kRetired:
@@ -3857,60 +3955,50 @@ bool JSFunctionRef::serialized() const {
return data()->AsJSFunction()->serialized();
}
-bool JSFunctionRef::IsSerializedForCompilation() const {
- if (broker()->mode() == JSHeapBroker::kDisabled) {
- return handle(object()->shared(), broker()->isolate())->HasBytecodeArray();
- }
-
- // We get a crash if we try to access the shared() getter without
- // checking for `serialized` first. Also it's possible to have a
- // JSFunctionRef without a feedback vector.
- return serialized() && has_feedback_vector() &&
- shared().IsSerializedForCompilation(feedback_vector());
-}
-
JSArrayRef SharedFunctionInfoRef::GetTemplateObject(
- ObjectRef description, FeedbackVectorRef vector, FeedbackSlot slot,
+ TemplateObjectDescriptionRef description, FeedbackSource const& source,
SerializationPolicy policy) {
- // Look in the feedback vector for the array. A Smi indicates that it's
- // not yet cached here.
- ObjectRef candidate = vector.get(slot);
- if (!candidate.IsSmi()) {
- return candidate.AsJSArray();
+ // First, see if we have processed feedback from the vector, respecting
+ // the serialization policy.
+ ProcessedFeedback const& feedback =
+ policy == SerializationPolicy::kSerializeIfNeeded
+ ? broker()->ProcessFeedbackForTemplateObject(source)
+ : broker()->GetFeedbackForTemplateObject(source);
+
+ if (!feedback.IsInsufficient()) {
+ return feedback.AsTemplateObject().value();
}
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleAllocation handle_allocation;
AllowHandleDereference allow_handle_dereference;
- Handle<TemplateObjectDescription> tod =
- Handle<TemplateObjectDescription>::cast(description.object());
Handle<JSArray> template_object =
TemplateObjectDescription::GetTemplateObject(
- broker()->isolate(), broker()->target_native_context().object(),
- tod, object(), slot.ToInt());
+ isolate(), broker()->target_native_context().object(),
+ description.object(), object(), source.slot.ToInt());
return JSArrayRef(broker(), template_object);
}
- JSArrayData* array = data()->AsSharedFunctionInfo()->GetTemplateObject(slot);
+ JSArrayData* array =
+ data()->AsSharedFunctionInfo()->GetTemplateObject(source.slot);
if (array != nullptr) return JSArrayRef(broker(), array);
CHECK_EQ(policy, SerializationPolicy::kSerializeIfNeeded);
CHECK(broker()->SerializingAllowed());
- Handle<TemplateObjectDescription> tod =
- Handle<TemplateObjectDescription>::cast(description.object());
Handle<JSArray> template_object =
TemplateObjectDescription::GetTemplateObject(
- broker()->isolate(), broker()->target_native_context().object(), tod,
- object(), slot.ToInt());
+ broker()->isolate(), broker()->target_native_context().object(),
+ description.object(), object(), source.slot.ToInt());
array = broker()->GetOrCreateData(template_object)->AsJSArray();
- data()->AsSharedFunctionInfo()->SetTemplateObject(slot, array);
+ data()->AsSharedFunctionInfo()->SetTemplateObject(source.slot, array);
return JSArrayRef(broker(), array);
}
void SharedFunctionInfoRef::SetSerializedForCompilation(
FeedbackVectorRef feedback) {
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+ CHECK(HasBytecodeArray());
data()->AsSharedFunctionInfo()->SetSerializedForCompilation(broker(),
feedback);
}
@@ -3937,7 +4025,7 @@ SharedFunctionInfoRef::function_template_info() const {
bool SharedFunctionInfoRef::IsSerializedForCompilation(
FeedbackVectorRef feedback) const {
- if (broker()->mode() == JSHeapBroker::kDisabled) return true;
+ if (broker()->mode() == JSHeapBroker::kDisabled) return HasBytecodeArray();
return data()->AsSharedFunctionInfo()->IsSerializedForCompilation(feedback);
}
@@ -3953,19 +4041,19 @@ void MapRef::SerializeOwnDescriptors() {
data()->AsMap()->SerializeOwnDescriptors(broker());
}
-void MapRef::SerializeOwnDescriptor(int descriptor_index) {
+void MapRef::SerializeOwnDescriptor(InternalIndex descriptor_index) {
if (broker()->mode() == JSHeapBroker::kDisabled) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
data()->AsMap()->SerializeOwnDescriptor(broker(), descriptor_index);
}
-bool MapRef::serialized_own_descriptor(int descriptor_index) const {
- CHECK_LT(descriptor_index, NumberOfOwnDescriptors());
+bool MapRef::serialized_own_descriptor(InternalIndex descriptor_index) const {
+ CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
if (broker()->mode() == JSHeapBroker::kDisabled) return true;
DescriptorArrayData* desc_array_data =
data()->AsMap()->instance_descriptors();
if (!desc_array_data) return false;
- return desc_array_data->contents().find(descriptor_index) !=
+ return desc_array_data->contents().find(descriptor_index.as_int()) !=
desc_array_data->contents().end();
}
@@ -4027,14 +4115,14 @@ void FunctionTemplateInfoRef::SerializeCallCode() {
data()->AsFunctionTemplateInfo()->SerializeCallCode(broker());
}
-base::Optional<PropertyCellRef> JSGlobalProxyRef::GetPropertyCell(
+base::Optional<PropertyCellRef> JSGlobalObjectRef::GetPropertyCell(
NameRef const& name, SerializationPolicy policy) const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
return GetPropertyCellFromHeap(broker(), name.object());
}
PropertyCellData* property_cell_data =
- data()->AsJSGlobalProxy()->GetPropertyCell(broker(),
- name.data()->AsName(), policy);
+ data()->AsJSGlobalObject()->GetPropertyCell(
+ broker(), name.data()->AsName(), policy);
if (property_cell_data == nullptr) return base::nullopt;
return PropertyCellRef(broker(), property_cell_data);
}
@@ -4115,7 +4203,6 @@ GlobalAccessFeedback::GlobalAccessFeedback(PropertyCellRef cell,
GlobalAccessFeedback::GlobalAccessFeedback(FeedbackSlotKind slot_kind)
: ProcessedFeedback(kGlobalAccess, slot_kind),
- cell_or_context_(base::nullopt),
index_and_immutable_(0 /* doesn't matter */) {
DCHECK(IsGlobalICKind(slot_kind));
}
@@ -4265,6 +4352,7 @@ void JSHeapBroker::SetFeedback(FeedbackSource const& source,
}
bool JSHeapBroker::HasFeedback(FeedbackSource const& source) const {
+ DCHECK(source.IsValid());
return feedback_.find(source) != feedback_.end();
}
@@ -4315,7 +4403,6 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForPropertyAccess(
MapHandles maps;
nexus.ExtractMaps(&maps);
- DCHECK_NE(nexus.ic_state(), PREMONOMORPHIC);
if (!maps.empty()) {
maps = GetRelevantReceiverMaps(isolate(), maps);
if (maps.empty()) return *new (zone()) InsufficientFeedback(kind);
@@ -4424,6 +4511,47 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForInstanceOf(
return *new (zone()) InstanceOfFeedback(optional_constructor, nexus.kind());
}
+ProcessedFeedback const& JSHeapBroker::ReadFeedbackForArrayOrObjectLiteral(
+ FeedbackSource const& source) {
+ FeedbackNexus nexus(source.vector, source.slot);
+ HeapObject object;
+ if (nexus.IsUninitialized() || !nexus.GetFeedback()->GetHeapObject(&object)) {
+ return *new (zone()) InsufficientFeedback(nexus.kind());
+ }
+
+ AllocationSiteRef site(this, handle(object, isolate()));
+ if (site.IsFastLiteral()) {
+ site.SerializeBoilerplate();
+ }
+
+ return *new (zone()) LiteralFeedback(site, nexus.kind());
+}
+
+ProcessedFeedback const& JSHeapBroker::ReadFeedbackForRegExpLiteral(
+ FeedbackSource const& source) {
+ FeedbackNexus nexus(source.vector, source.slot);
+ HeapObject object;
+ if (nexus.IsUninitialized() || !nexus.GetFeedback()->GetHeapObject(&object)) {
+ return *new (zone()) InsufficientFeedback(nexus.kind());
+ }
+
+ JSRegExpRef regexp(this, handle(object, isolate()));
+ regexp.SerializeAsRegExpBoilerplate();
+ return *new (zone()) RegExpLiteralFeedback(regexp, nexus.kind());
+}
+
+ProcessedFeedback const& JSHeapBroker::ReadFeedbackForTemplateObject(
+ FeedbackSource const& source) {
+ FeedbackNexus nexus(source.vector, source.slot);
+ HeapObject object;
+ if (nexus.IsUninitialized() || !nexus.GetFeedback()->GetHeapObject(&object)) {
+ return *new (zone()) InsufficientFeedback(nexus.kind());
+ }
+
+ JSArrayRef array(this, handle(object, isolate()));
+ return *new (zone()) TemplateObjectFeedback(array, nexus.kind());
+}
+
ProcessedFeedback const& JSHeapBroker::ReadFeedbackForCall(
FeedbackSource const& source) {
FeedbackNexus nexus(source.vector, source.slot);
@@ -4495,6 +4623,50 @@ ProcessedFeedback const& JSHeapBroker::GetFeedbackForGlobalAccess(
: ProcessFeedbackForGlobalAccess(source);
}
+ProcessedFeedback const& JSHeapBroker::GetFeedbackForArrayOrObjectLiteral(
+ FeedbackSource const& source) {
+ return FLAG_concurrent_inlining
+ ? GetFeedback(source)
+ : ProcessFeedbackForArrayOrObjectLiteral(source);
+}
+
+ProcessedFeedback const& JSHeapBroker::GetFeedbackForRegExpLiteral(
+ FeedbackSource const& source) {
+ return FLAG_concurrent_inlining ? GetFeedback(source)
+ : ProcessFeedbackForRegExpLiteral(source);
+}
+
+ProcessedFeedback const& JSHeapBroker::GetFeedbackForTemplateObject(
+ FeedbackSource const& source) {
+ return FLAG_concurrent_inlining ? GetFeedback(source)
+ : ProcessFeedbackForTemplateObject(source);
+}
+
+ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForArrayOrObjectLiteral(
+ FeedbackSource const& source) {
+ if (HasFeedback(source)) return GetFeedback(source);
+ ProcessedFeedback const& feedback =
+ ReadFeedbackForArrayOrObjectLiteral(source);
+ SetFeedback(source, &feedback);
+ return feedback;
+}
+
+ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForRegExpLiteral(
+ FeedbackSource const& source) {
+ if (HasFeedback(source)) return GetFeedback(source);
+ ProcessedFeedback const& feedback = ReadFeedbackForRegExpLiteral(source);
+ SetFeedback(source, &feedback);
+ return feedback;
+}
+
+ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForTemplateObject(
+ FeedbackSource const& source) {
+ if (HasFeedback(source)) return GetFeedback(source);
+ ProcessedFeedback const& feedback = ReadFeedbackForTemplateObject(source);
+ SetFeedback(source, &feedback);
+ return feedback;
+}
+
ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForBinaryOperation(
FeedbackSource const& source) {
if (HasFeedback(source)) return GetFeedback(source);
@@ -4650,9 +4822,10 @@ void ElementAccessFeedback::AddGroup(TransitionGroup&& group) {
}
std::ostream& operator<<(std::ostream& os, const ObjectRef& ref) {
- if (ref.broker()->mode() == JSHeapBroker::kDisabled) {
- // If the broker is disabled we cannot be in a background thread so it's
- // safe to read the heap.
+ if (ref.broker()->mode() == JSHeapBroker::kDisabled ||
+ !FLAG_concurrent_recompilation) {
+ // We cannot be in a background thread so it's safe to read the heap.
+ AllowHandleDereference allow_handle_dereference;
return os << ref.data() << " {" << ref.object() << "}";
} else {
return os << ref.data();
@@ -4734,6 +4907,21 @@ NamedAccessFeedback const& ProcessedFeedback::AsNamedAccess() const {
return *static_cast<NamedAccessFeedback const*>(this);
}
+LiteralFeedback const& ProcessedFeedback::AsLiteral() const {
+ CHECK_EQ(kLiteral, kind());
+ return *static_cast<LiteralFeedback const*>(this);
+}
+
+RegExpLiteralFeedback const& ProcessedFeedback::AsRegExpLiteral() const {
+ CHECK_EQ(kRegExpLiteral, kind());
+ return *static_cast<RegExpLiteralFeedback const*>(this);
+}
+
+TemplateObjectFeedback const& ProcessedFeedback::AsTemplateObject() const {
+ CHECK_EQ(kTemplateObject, kind());
+ return *static_cast<TemplateObjectFeedback const*>(this);
+}
+
BytecodeAnalysis const& JSHeapBroker::GetBytecodeAnalysis(
Handle<BytecodeArray> bytecode_array, BailoutId osr_bailout_id,
bool analyze_liveness, SerializationPolicy policy) {
diff --git a/deps/v8/src/compiler/js-heap-broker.h b/deps/v8/src/compiler/js-heap-broker.h
index 8c2622bf48..c9667a2fed 100644
--- a/deps/v8/src/compiler/js-heap-broker.h
+++ b/deps/v8/src/compiler/js-heap-broker.h
@@ -34,6 +34,12 @@ std::ostream& operator<<(std::ostream& os, const ObjectRef& ref);
broker->Trace() << x << '\n'; \
} while (false)
+#define TRACE_BROKER_MEMORY(broker, x) \
+ do { \
+ if (broker->tracing_enabled() && FLAG_trace_heap_broker_memory) \
+ broker->Trace() << x << std::endl; \
+ } while (false)
+
#define TRACE_BROKER_MISSING(broker, x) \
do { \
if (broker->tracing_enabled()) \
@@ -86,6 +92,10 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
void Retire();
bool SerializingAllowed() const;
+#ifdef DEBUG
+ void PrintRefsAnalysis() const;
+#endif // DEBUG
+
// Returns nullptr iff handle unknown.
ObjectData* GetData(Handle<Object>) const;
// Never returns nullptr.
@@ -125,6 +135,12 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
FeedbackSource const& source);
ProcessedFeedback const& GetFeedbackForInstanceOf(
FeedbackSource const& source);
+ ProcessedFeedback const& GetFeedbackForArrayOrObjectLiteral(
+ FeedbackSource const& source);
+ ProcessedFeedback const& GetFeedbackForRegExpLiteral(
+ FeedbackSource const& source);
+ ProcessedFeedback const& GetFeedbackForTemplateObject(
+ FeedbackSource const& source);
ProcessedFeedback const& GetFeedbackForPropertyAccess(
FeedbackSource const& source, AccessMode mode,
base::Optional<NameRef> static_name);
@@ -143,6 +159,12 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
ProcessedFeedback const& ProcessFeedbackForPropertyAccess(
FeedbackSource const& source, AccessMode mode,
base::Optional<NameRef> static_name);
+ ProcessedFeedback const& ProcessFeedbackForArrayOrObjectLiteral(
+ FeedbackSource const& source);
+ ProcessedFeedback const& ProcessFeedbackForRegExpLiteral(
+ FeedbackSource const& source);
+ ProcessedFeedback const& ProcessFeedbackForTemplateObject(
+ FeedbackSource const& source);
bool FeedbackIsInsufficient(FeedbackSource const& source) const;
@@ -157,7 +179,7 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
StringRef GetTypedArrayStringTag(ElementsKind kind);
- std::ostream& Trace();
+ std::ostream& Trace() const;
void IncrementTracingIndentation();
void DecrementTracingIndentation();
@@ -182,6 +204,12 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
ProcessedFeedback const& ReadFeedbackForPropertyAccess(
FeedbackSource const& source, AccessMode mode,
base::Optional<NameRef> static_name);
+ ProcessedFeedback const& ReadFeedbackForArrayOrObjectLiteral(
+ FeedbackSource const& source);
+ ProcessedFeedback const& ReadFeedbackForRegExpLiteral(
+ FeedbackSource const& source);
+ ProcessedFeedback const& ReadFeedbackForTemplateObject(
+ FeedbackSource const& source);
void InitializeRefsMap();
void CollectArrayAndObjectPrototypes();
@@ -199,7 +227,7 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
array_and_object_prototypes_;
BrokerMode mode_ = kDisabled;
bool const tracing_enabled_;
- StdoutStream trace_out_;
+ mutable StdoutStream trace_out_;
unsigned trace_indentation_ = 0;
PerIsolateCompilerCache* compiler_cache_ = nullptr;
ZoneUnorderedMap<FeedbackSource, ProcessedFeedback const*,
diff --git a/deps/v8/src/compiler/js-heap-copy-reducer.cc b/deps/v8/src/compiler/js-heap-copy-reducer.cc
index bf4b79bf92..13bd6a1282 100644
--- a/deps/v8/src/compiler/js-heap-copy-reducer.cc
+++ b/deps/v8/src/compiler/js-heap-copy-reducer.cc
@@ -12,6 +12,7 @@
#include "src/heap/factory-inl.h"
#include "src/objects/map.h"
#include "src/objects/scope-info.h"
+#include "src/objects/template-objects.h"
namespace v8 {
namespace internal {
@@ -27,172 +28,145 @@ JSHeapBroker* JSHeapCopyReducer::broker() { return broker_; }
Reduction JSHeapCopyReducer::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kHeapConstant: {
- if (!FLAG_concurrent_inlining) {
- ObjectRef object(broker(), HeapConstantOf(node->op()));
- if (object.IsJSFunction()) object.AsJSFunction().Serialize();
- if (object.IsJSObject()) {
- object.AsJSObject().SerializeObjectCreateMap();
- }
- if (object.IsSourceTextModule()) {
- object.AsSourceTextModule().Serialize();
- }
+ ObjectRef object(broker(), HeapConstantOf(node->op()));
+ if (object.IsJSFunction()) object.AsJSFunction().Serialize();
+ if (object.IsJSObject()) {
+ object.AsJSObject().SerializeObjectCreateMap();
+ }
+ if (object.IsSourceTextModule()) {
+ object.AsSourceTextModule().Serialize();
}
break;
}
case IrOpcode::kJSCreateArray: {
- if (!FLAG_concurrent_inlining) {
- CreateArrayParameters const& p = CreateArrayParametersOf(node->op());
- Handle<AllocationSite> site;
- if (p.site().ToHandle(&site)) AllocationSiteRef(broker(), site);
- }
+ CreateArrayParameters const& p = CreateArrayParametersOf(node->op());
+ Handle<AllocationSite> site;
+ if (p.site().ToHandle(&site)) AllocationSiteRef(broker(), site);
break;
}
case IrOpcode::kJSCreateArguments: {
- if (!FLAG_concurrent_inlining) {
- Node* const frame_state = NodeProperties::GetFrameStateInput(node);
- FrameStateInfo state_info = FrameStateInfoOf(frame_state->op());
- SharedFunctionInfoRef shared(
- broker(), state_info.shared_info().ToHandleChecked());
- }
+ Node* const frame_state = NodeProperties::GetFrameStateInput(node);
+ FrameStateInfo state_info = FrameStateInfoOf(frame_state->op());
+ SharedFunctionInfoRef shared(broker(),
+ state_info.shared_info().ToHandleChecked());
break;
}
case IrOpcode::kJSCreateBlockContext: {
- if (!FLAG_concurrent_inlining) {
- ScopeInfoRef(broker(), ScopeInfoOf(node->op()));
- }
+ ScopeInfoRef(broker(), ScopeInfoOf(node->op()));
break;
}
case IrOpcode::kJSCreateBoundFunction: {
- if (!FLAG_concurrent_inlining) {
- CreateBoundFunctionParameters const& p =
- CreateBoundFunctionParametersOf(node->op());
- MapRef(broker(), p.map());
- }
+ CreateBoundFunctionParameters const& p =
+ CreateBoundFunctionParametersOf(node->op());
+ MapRef(broker(), p.map());
break;
}
case IrOpcode::kJSCreateCatchContext: {
- if (!FLAG_concurrent_inlining) {
- ScopeInfoRef(broker(), ScopeInfoOf(node->op()));
- }
+ ScopeInfoRef(broker(), ScopeInfoOf(node->op()));
break;
}
case IrOpcode::kJSCreateClosure: {
- if (!FLAG_concurrent_inlining) {
- CreateClosureParameters const& p =
- CreateClosureParametersOf(node->op());
- SharedFunctionInfoRef(broker(), p.shared_info());
- FeedbackCellRef(broker(), p.feedback_cell());
- HeapObjectRef(broker(), p.code());
- }
+ CreateClosureParameters const& p = CreateClosureParametersOf(node->op());
+ SharedFunctionInfoRef(broker(), p.shared_info());
+ FeedbackCellRef(broker(), p.feedback_cell());
+ HeapObjectRef(broker(), p.code());
break;
}
case IrOpcode::kJSCreateEmptyLiteralArray: {
- if (!FLAG_concurrent_inlining) {
- FeedbackParameter const& p = FeedbackParameterOf(node->op());
- FeedbackVectorRef(broker(), p.feedback().vector).Serialize();
+ FeedbackParameter const& p = FeedbackParameterOf(node->op());
+ if (p.feedback().IsValid()) {
+ broker()->ProcessFeedbackForArrayOrObjectLiteral(p.feedback());
}
break;
}
case IrOpcode::kJSCreateFunctionContext: {
- if (!FLAG_concurrent_inlining) {
- CreateFunctionContextParameters const& p =
- CreateFunctionContextParametersOf(node->op());
- ScopeInfoRef(broker(), p.scope_info());
- }
+ CreateFunctionContextParameters const& p =
+ CreateFunctionContextParametersOf(node->op());
+ ScopeInfoRef(broker(), p.scope_info());
break;
}
case IrOpcode::kJSCreateLiteralArray:
case IrOpcode::kJSCreateLiteralObject: {
- if (!FLAG_concurrent_inlining) {
- CreateLiteralParameters const& p =
- CreateLiteralParametersOf(node->op());
- FeedbackVectorRef(broker(), p.feedback().vector).Serialize();
+ CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
+ if (p.feedback().IsValid()) {
+ broker()->ProcessFeedbackForArrayOrObjectLiteral(p.feedback());
}
break;
}
case IrOpcode::kJSCreateLiteralRegExp: {
- if (!FLAG_concurrent_inlining) {
- CreateLiteralParameters const& p =
- CreateLiteralParametersOf(node->op());
- FeedbackVectorRef(broker(), p.feedback().vector).Serialize();
+ CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
+ if (p.feedback().IsValid()) {
+ broker()->ProcessFeedbackForRegExpLiteral(p.feedback());
}
break;
}
+ case IrOpcode::kJSGetTemplateObject: {
+ GetTemplateObjectParameters const& p =
+ GetTemplateObjectParametersOf(node->op());
+ SharedFunctionInfoRef shared(broker(), p.shared());
+ TemplateObjectDescriptionRef description(broker(), p.description());
+ shared.GetTemplateObject(description, p.feedback(),
+ SerializationPolicy::kSerializeIfNeeded);
+ break;
+ }
case IrOpcode::kJSCreateWithContext: {
- if (!FLAG_concurrent_inlining) {
- ScopeInfoRef(broker(), ScopeInfoOf(node->op()));
- }
+ ScopeInfoRef(broker(), ScopeInfoOf(node->op()));
break;
}
case IrOpcode::kJSLoadNamed: {
- if (!FLAG_concurrent_inlining) {
- NamedAccess const& p = NamedAccessOf(node->op());
- NameRef name(broker(), p.name());
- if (p.feedback().IsValid()) {
- broker()->ProcessFeedbackForPropertyAccess(p.feedback(),
- AccessMode::kLoad, name);
- }
+ NamedAccess const& p = NamedAccessOf(node->op());
+ NameRef name(broker(), p.name());
+ if (p.feedback().IsValid()) {
+ broker()->ProcessFeedbackForPropertyAccess(p.feedback(),
+ AccessMode::kLoad, name);
}
break;
}
case IrOpcode::kJSStoreNamed: {
- if (!FLAG_concurrent_inlining) {
- NamedAccess const& p = NamedAccessOf(node->op());
- NameRef name(broker(), p.name());
- }
+ NamedAccess const& p = NamedAccessOf(node->op());
+ NameRef name(broker(), p.name());
break;
}
case IrOpcode::kStoreField:
case IrOpcode::kLoadField: {
- if (!FLAG_concurrent_inlining) {
- FieldAccess access = FieldAccessOf(node->op());
- Handle<Map> map_handle;
- if (access.map.ToHandle(&map_handle)) {
- MapRef(broker(), map_handle);
- }
- Handle<Name> name_handle;
- if (access.name.ToHandle(&name_handle)) {
- NameRef(broker(), name_handle);
- }
+ FieldAccess access = FieldAccessOf(node->op());
+ Handle<Map> map_handle;
+ if (access.map.ToHandle(&map_handle)) {
+ MapRef(broker(), map_handle);
+ }
+ Handle<Name> name_handle;
+ if (access.name.ToHandle(&name_handle)) {
+ NameRef(broker(), name_handle);
}
break;
}
case IrOpcode::kMapGuard: {
- if (!FLAG_concurrent_inlining) {
- ZoneHandleSet<Map> const& maps = MapGuardMapsOf(node->op());
- for (Handle<Map> map : maps) {
- MapRef(broker(), map);
- }
+ ZoneHandleSet<Map> const& maps = MapGuardMapsOf(node->op());
+ for (Handle<Map> map : maps) {
+ MapRef(broker(), map);
}
break;
}
case IrOpcode::kCheckMaps: {
- if (!FLAG_concurrent_inlining) {
- ZoneHandleSet<Map> const& maps =
- CheckMapsParametersOf(node->op()).maps();
- for (Handle<Map> map : maps) {
- MapRef(broker(), map);
- }
+ ZoneHandleSet<Map> const& maps = CheckMapsParametersOf(node->op()).maps();
+ for (Handle<Map> map : maps) {
+ MapRef(broker(), map);
}
break;
}
case IrOpcode::kCompareMaps: {
- if (!FLAG_concurrent_inlining) {
- ZoneHandleSet<Map> const& maps = CompareMapsParametersOf(node->op());
- for (Handle<Map> map : maps) {
- MapRef(broker(), map);
- }
+ ZoneHandleSet<Map> const& maps = CompareMapsParametersOf(node->op());
+ for (Handle<Map> map : maps) {
+ MapRef(broker(), map);
}
break;
}
case IrOpcode::kJSLoadProperty: {
- if (!FLAG_concurrent_inlining) {
- PropertyAccess const& p = PropertyAccessOf(node->op());
- AccessMode access_mode = AccessMode::kLoad;
- if (p.feedback().IsValid()) {
- broker()->ProcessFeedbackForPropertyAccess(p.feedback(), access_mode,
- base::nullopt);
- }
+ PropertyAccess const& p = PropertyAccessOf(node->op());
+ AccessMode access_mode = AccessMode::kLoad;
+ if (p.feedback().IsValid()) {
+ broker()->ProcessFeedbackForPropertyAccess(p.feedback(), access_mode,
+ base::nullopt);
}
break;
}
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.cc b/deps/v8/src/compiler/js-inlining-heuristic.cc
index ae271b3af9..cc3f321d6b 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.cc
+++ b/deps/v8/src/compiler/js-inlining-heuristic.cc
@@ -22,9 +22,35 @@ namespace compiler {
} while (false)
namespace {
-bool IsSmall(BytecodeArrayRef bytecode) {
+bool IsSmall(BytecodeArrayRef const& bytecode) {
return bytecode.length() <= FLAG_max_inlined_bytecode_size_small;
}
+
+bool CanConsiderForInlining(JSHeapBroker* broker,
+ SharedFunctionInfoRef const& shared,
+ FeedbackVectorRef const& feedback_vector) {
+ if (!shared.IsInlineable()) return false;
+ DCHECK(shared.HasBytecodeArray());
+ if (!shared.IsSerializedForCompilation(feedback_vector)) {
+ TRACE_BROKER_MISSING(
+ broker, "data for " << shared << " (not serialized for compilation)");
+ return false;
+ }
+ return true;
+}
+
+bool CanConsiderForInlining(JSHeapBroker* broker,
+ JSFunctionRef const& function) {
+ if (!function.has_feedback_vector()) return false;
+ if (!function.serialized()) {
+ TRACE_BROKER_MISSING(
+ broker, "data for " << function << " (cannot consider for inlining)");
+ return false;
+ }
+ return CanConsiderForInlining(broker, function.shared(),
+ function.feedback_vector());
+}
+
} // namespace
JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
@@ -38,11 +64,11 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
if (m.HasValue() && m.Ref(broker()).IsJSFunction()) {
out.functions[0] = m.Ref(broker()).AsJSFunction();
JSFunctionRef function = out.functions[0].value();
- if (function.IsSerializedForCompilation()) {
+ if (CanConsiderForInlining(broker(), function)) {
out.bytecode[0] = function.shared().GetBytecodeArray();
+ out.num_functions = 1;
+ return out;
}
- out.num_functions = 1;
- return out;
}
if (m.IsPhi()) {
int const value_input_count = m.node()->op()->ValueInputCount();
@@ -59,7 +85,7 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
out.functions[n] = m.Ref(broker()).AsJSFunction();
JSFunctionRef function = out.functions[n].value();
- if (function.IsSerializedForCompilation()) {
+ if (CanConsiderForInlining(broker(), function)) {
out.bytecode[n] = function.shared().GetBytecodeArray();
}
}
@@ -67,11 +93,14 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
return out;
}
if (m.IsJSCreateClosure()) {
- CreateClosureParameters const& p = CreateClosureParametersOf(m.op());
DCHECK(!out.functions[0].has_value());
- out.shared_info = SharedFunctionInfoRef(broker(), p.shared_info());
- SharedFunctionInfoRef shared_info = out.shared_info.value();
- if (shared_info.HasBytecodeArray()) {
+ CreateClosureParameters const& p = CreateClosureParametersOf(m.op());
+ FeedbackCellRef feedback_cell(broker(), p.feedback_cell());
+ SharedFunctionInfoRef shared_info(broker(), p.shared_info());
+ out.shared_info = shared_info;
+ if (feedback_cell.value().IsFeedbackVector() &&
+ CanConsiderForInlining(broker(), shared_info,
+ feedback_cell.value().AsFeedbackVector())) {
out.bytecode[0] = shared_info.GetBytecodeArray();
}
out.num_functions = 1;
@@ -135,7 +164,8 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
SharedFunctionInfoRef shared = candidate.functions[i].has_value()
? candidate.functions[i].value().shared()
: candidate.shared_info.value();
- candidate.can_inline_function[i] = shared.IsInlineable();
+ candidate.can_inline_function[i] = candidate.bytecode[i].has_value();
+ CHECK_IMPLIES(candidate.can_inline_function[i], shared.IsInlineable());
// Do not allow direct recursion i.e. f() -> f(). We still allow indirect
// recurion like f() -> g() -> f(). The indirect recursion is helpful in
// cases where f() is a small dispatch function that calls the appropriate
@@ -151,14 +181,12 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
node->id(), node->op()->mnemonic());
candidate.can_inline_function[i] = false;
}
- // A function reaching this point should always have its bytecode
- // serialized.
- BytecodeArrayRef bytecode = candidate.bytecode[i].value();
if (candidate.can_inline_function[i]) {
can_inline_candidate = true;
+ BytecodeArrayRef bytecode = candidate.bytecode[i].value();
candidate.total_size += bytecode.length();
+ candidate_is_small = candidate_is_small && IsSmall(bytecode);
}
- candidate_is_small = candidate_is_small && IsSmall(bytecode);
}
if (!can_inline_candidate) return NoChange();
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index 51179f1956..6c071438cc 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -321,7 +321,7 @@ base::Optional<SharedFunctionInfoRef> JSInliner::DetermineCallTarget(
// TODO(turbofan): We might consider to eagerly create the feedback vector
// in such a case (in {DetermineCallContext} below) eventually.
- FeedbackCellRef cell(FeedbackCellRef(broker(), p.feedback_cell()));
+ FeedbackCellRef cell(broker(), p.feedback_cell());
if (!cell.value().IsFeedbackVector()) return base::nullopt;
return SharedFunctionInfoRef(broker(), p.shared_info());
@@ -413,11 +413,11 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
Node* exception_target = nullptr;
NodeProperties::IsExceptionalCall(node, &exception_target);
- // JSInliningHeuristic has already filtered candidates without a
- // BytecodeArray by calling SharedFunctionInfoRef::IsInlineable. For the ones
- // passing the IsInlineable check, The broker holds a reference to the
- // bytecode array, which prevents it from getting flushed.
- // Therefore, the following check should always hold true.
+ // JSInliningHeuristic has already filtered candidates without a BytecodeArray
+ // by calling SharedFunctionInfoRef::IsInlineable. For the ones passing the
+ // IsInlineable check, the broker holds a reference to the bytecode array,
+ // which prevents it from getting flushed. Therefore, the following check
+ // should always hold true.
CHECK(shared_info->is_compiled());
if (!FLAG_concurrent_inlining && info_->is_source_positions_enabled()) {
@@ -428,17 +428,10 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
TRACE("Inlining " << *shared_info << " into " << outer_shared_info
<< ((exception_target != nullptr) ? " (inside try-block)"
: ""));
- // Determine the targets feedback vector and its context.
+ // Determine the target's feedback vector and its context.
Node* context;
FeedbackVectorRef feedback_vector = DetermineCallContext(node, &context);
-
- if (FLAG_concurrent_inlining &&
- !shared_info->IsSerializedForCompilation(feedback_vector)) {
- // TODO(neis): Should this be a broker message?
- TRACE("Missed opportunity to inline a function ("
- << *shared_info << " with " << feedback_vector << ")");
- return NoChange();
- }
+ CHECK(shared_info->IsSerializedForCompilation(feedback_vector));
// ----------------------------------------------------------------
// After this point, we've made a decision to inline this function.
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index 9f950c808c..80c620034b 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -784,12 +784,15 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
Node* node, Node* receiver, Node* value, NameRef const& name,
AccessMode access_mode, Node* key) {
base::Optional<PropertyCellRef> cell =
- native_context().global_proxy_object().GetPropertyCell(name);
+ native_context().global_object().GetPropertyCell(name);
return cell.has_value() ? ReduceGlobalAccess(node, receiver, value, name,
access_mode, key, *cell)
: NoChange();
}
+// TODO(neis): Try to merge this with ReduceNamedAccess by introducing a new
+// PropertyAccessInfo kind for global accesses and using the existing mechanism
+// for building loads/stores.
Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
Node* node, Node* receiver, Node* value, NameRef const& name,
AccessMode access_mode, Node* key, PropertyCellRef const& property_cell) {
@@ -838,15 +841,16 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
effect = BuildCheckEqualsName(name, key, effect, control);
}
- // Check if we have a {receiver} to validate. If so, we need to check that
- // the {receiver} is actually the JSGlobalProxy for the native context that
- // we are specializing to.
+ // If we have a {receiver} to validate, we do so by checking that its map is
+ // the (target) global proxy's map. This guarantees that in fact the receiver
+ // is the global proxy.
if (receiver != nullptr) {
- Node* check = graph()->NewNode(simplified()->ReferenceEqual(), receiver,
- jsgraph()->HeapConstant(global_proxy()));
effect = graph()->NewNode(
- simplified()->CheckIf(DeoptimizeReason::kReceiverNotAGlobalProxy),
- check, effect, control);
+ simplified()->CheckMaps(
+ CheckMapsFlag::kNone,
+ ZoneHandleSet<Map>(
+ HeapObjectRef(broker(), global_proxy()).map().object())),
+ receiver, effect, control);
}
if (access_mode == AccessMode::kLoad || access_mode == AccessMode::kHas) {
@@ -1050,28 +1054,6 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreGlobal(Node* node) {
}
}
-void JSNativeContextSpecialization::FilterMapsAndGetPropertyAccessInfos(
- NamedAccessFeedback const& feedback, AccessMode access_mode, Node* receiver,
- Node* effect, ZoneVector<PropertyAccessInfo>* access_infos) {
- ZoneVector<Handle<Map>> receiver_maps(zone());
-
- // Either infer maps from the graph or use the feedback.
- if (!InferReceiverMaps(receiver, effect, &receiver_maps)) {
- receiver_maps = feedback.maps();
- }
- RemoveImpossibleReceiverMaps(receiver, &receiver_maps);
-
- for (Handle<Map> map_handle : receiver_maps) {
- MapRef map(broker(), map_handle);
- if (map.is_deprecated()) continue;
- PropertyAccessInfo access_info = broker()->GetPropertyAccessInfo(
- map, feedback.name(), access_mode, dependencies(),
- FLAG_concurrent_inlining ? SerializationPolicy::kAssumeSerialized
- : SerializationPolicy::kSerializeIfNeeded);
- access_infos->push_back(access_info);
- }
-}
-
Reduction JSNativeContextSpecialization::ReduceNamedAccess(
Node* node, Node* value, NamedAccessFeedback const& feedback,
AccessMode access_mode, Node* key) {
@@ -1081,36 +1063,54 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
node->opcode() == IrOpcode::kJSStoreProperty ||
node->opcode() == IrOpcode::kJSStoreNamedOwn ||
node->opcode() == IrOpcode::kJSStoreDataPropertyInLiteral ||
- node->opcode() == IrOpcode::kJSHasProperty ||
- node->opcode() == IrOpcode::kJSGetIterator);
+ node->opcode() == IrOpcode::kJSHasProperty);
Node* receiver = NodeProperties::GetValueInput(node, 0);
Node* context = NodeProperties::GetContextInput(node);
Node* frame_state = NodeProperties::GetFrameStateInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- ZoneVector<PropertyAccessInfo> access_infos_for_feedback(zone());
- ZoneVector<PropertyAccessInfo> access_infos(zone());
- FilterMapsAndGetPropertyAccessInfos(feedback, access_mode, receiver, effect,
- &access_infos_for_feedback);
- AccessInfoFactory access_info_factory(broker(), dependencies(),
- graph()->zone());
- if (!access_info_factory.FinalizePropertyAccessInfos(
- access_infos_for_feedback, access_mode, &access_infos)) {
- return NoChange();
+ // Either infer maps from the graph or use the feedback.
+ ZoneVector<Handle<Map>> receiver_maps(zone());
+ if (!InferReceiverMaps(receiver, effect, &receiver_maps)) {
+ receiver_maps = feedback.maps();
}
+ RemoveImpossibleReceiverMaps(receiver, &receiver_maps);
- // Check if we have an access o.x or o.x=v where o is the current
- // native contexts' global proxy, and turn that into a direct access
- // to the current native context's global object instead.
- if (access_infos.size() == 1 && access_infos[0].receiver_maps().size() == 1) {
- MapRef receiver_map(broker(), access_infos[0].receiver_maps()[0]);
- if (receiver_map.IsMapOfTargetGlobalProxy()) {
+ // Check if we have an access o.x or o.x=v where o is the target native
+ // contexts' global proxy, and turn that into a direct access to the
+ // corresponding global object instead.
+ if (receiver_maps.size() == 1) {
+ MapRef receiver_map(broker(), receiver_maps[0]);
+ if (receiver_map.equals(
+ broker()->target_native_context().global_proxy_object().map()) &&
+ !broker()->target_native_context().global_object().IsDetached()) {
return ReduceGlobalAccess(node, receiver, value, feedback.name(),
access_mode, key);
}
}
+ ZoneVector<PropertyAccessInfo> access_infos(zone());
+ {
+ ZoneVector<PropertyAccessInfo> access_infos_for_feedback(zone());
+ for (Handle<Map> map_handle : receiver_maps) {
+ MapRef map(broker(), map_handle);
+ if (map.is_deprecated()) continue;
+ PropertyAccessInfo access_info = broker()->GetPropertyAccessInfo(
+ map, feedback.name(), access_mode, dependencies(),
+ FLAG_concurrent_inlining ? SerializationPolicy::kAssumeSerialized
+ : SerializationPolicy::kSerializeIfNeeded);
+ access_infos_for_feedback.push_back(access_info);
+ }
+
+ AccessInfoFactory access_info_factory(broker(), dependencies(),
+ graph()->zone());
+ if (!access_info_factory.FinalizePropertyAccessInfos(
+ access_infos_for_feedback, access_mode, &access_infos)) {
+ return NoChange();
+ }
+ }
+
// Ensure that {key} matches the specified name (if {key} is given).
if (key != nullptr) {
effect = BuildCheckEqualsName(feedback.name(), key, effect, control);
@@ -1332,24 +1332,6 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
return Replace(value);
}
-Reduction JSNativeContextSpecialization::ReduceNamedAccessFromNexus(
- Node* node, Node* value, FeedbackSource const& source, NameRef const& name,
- AccessMode access_mode) {
- DCHECK(node->opcode() == IrOpcode::kJSLoadNamed ||
- node->opcode() == IrOpcode::kJSStoreNamed ||
- node->opcode() == IrOpcode::kJSStoreNamedOwn);
- Node* const receiver = NodeProperties::GetValueInput(node, 0);
-
- // Optimize accesses to the current native context's global proxy.
- HeapObjectMatcher m(receiver);
- if (m.HasValue() &&
- m.Ref(broker()).equals(native_context().global_proxy_object())) {
- return ReduceGlobalAccess(node, nullptr, value, name, access_mode);
- }
-
- return ReducePropertyAccess(node, nullptr, name, value, source, access_mode);
-}
-
Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
DCHECK_EQ(IrOpcode::kJSLoadNamed, node->opcode());
NamedAccess const& p = NamedAccessOf(node->op());
@@ -1388,18 +1370,134 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
}
if (!p.feedback().IsValid()) return NoChange();
- return ReduceNamedAccessFromNexus(node, jsgraph()->Dead(),
- FeedbackSource(p.feedback()), name,
- AccessMode::kLoad);
+ return ReducePropertyAccess(node, nullptr, name, jsgraph()->Dead(),
+ FeedbackSource(p.feedback()), AccessMode::kLoad);
}
Reduction JSNativeContextSpecialization::ReduceJSGetIterator(Node* node) {
DCHECK_EQ(IrOpcode::kJSGetIterator, node->opcode());
- PropertyAccess const& p = PropertyAccessOf(node->op());
- NameRef name(broker(), factory()->iterator_symbol());
+ GetIteratorParameters const& p = GetIteratorParametersOf(node->op());
- return ReducePropertyAccess(node, nullptr, name, jsgraph()->Dead(),
- FeedbackSource(p.feedback()), AccessMode::kLoad);
+ Node* receiver = NodeProperties::GetValueInput(node, 0);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ Node* iterator_exception_node = nullptr;
+ Node* if_exception_merge = nullptr;
+ Node* if_exception_effect_phi = nullptr;
+ Node* if_exception_phi = nullptr;
+ bool has_exception_node =
+ NodeProperties::IsExceptionalCall(node, &iterator_exception_node);
+ if (has_exception_node) {
+ // If there exists an IfException node for the current {node}, we need
+ // exception handling for all the desugared nodes. Create a combination
+ // of Merge+Phi+EffectPhi nodes that consumes the exception paths from
+ // from all the desugared nodes including the original exception node.
+ // Usages of the original exception node are then rewired to the newly
+ // created combination of Merge+Phi+EffectPhi. Here, use dead_node as a
+ // placeholder for the original exception node until its uses are rewired.
+
+ Node* dead_node = jsgraph()->Dead();
+ if_exception_merge = graph()->NewNode(common()->Merge(1), dead_node);
+ if_exception_effect_phi =
+ graph()->NewNode(common()->EffectPhi(1), dead_node, if_exception_merge);
+ if_exception_phi =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 1),
+ dead_node, if_exception_merge);
+ ReplaceWithValue(iterator_exception_node, if_exception_phi,
+ if_exception_effect_phi, if_exception_merge);
+ if_exception_merge->ReplaceInput(0, iterator_exception_node);
+ if_exception_effect_phi->ReplaceInput(0, iterator_exception_node);
+ if_exception_phi->ReplaceInput(0, iterator_exception_node);
+ }
+
+ // Load iterator property operator
+ Handle<Name> iterator_symbol = factory()->iterator_symbol();
+ const Operator* load_op =
+ javascript()->LoadNamed(iterator_symbol, p.loadFeedback());
+
+ // Lazy deopt of the load iterator property
+ Node* call_slot = jsgraph()->SmiConstant(p.callFeedback().slot.ToInt());
+ Node* call_feedback = jsgraph()->HeapConstant(p.callFeedback().vector);
+ Node* lazy_deopt_parameters[] = {receiver, call_slot, call_feedback};
+ Node* lazy_deopt_frame_state = CreateStubBuiltinContinuationFrameState(
+ jsgraph(), Builtins::kGetIteratorWithFeedbackLazyDeoptContinuation,
+ context, lazy_deopt_parameters, arraysize(lazy_deopt_parameters),
+ frame_state, ContinuationFrameStateMode::LAZY);
+ Node* load_property = graph()->NewNode(
+ load_op, receiver, context, lazy_deopt_frame_state, effect, control);
+ effect = load_property;
+ control = load_property;
+
+ // Handle exception path for the load named property
+ if (has_exception_node) {
+ control =
+ AppendExceptionHandling(effect, control, if_exception_merge,
+ if_exception_phi, if_exception_effect_phi);
+ }
+
+ // Eager deopt of call iterator property
+ Node* parameters[] = {receiver, load_property, call_slot, call_feedback};
+ Node* eager_deopt_frame_state = CreateStubBuiltinContinuationFrameState(
+ jsgraph(), Builtins::kCallIteratorWithFeedback, context, parameters,
+ arraysize(parameters), frame_state, ContinuationFrameStateMode::EAGER);
+ Node* deopt_checkpoint = graph()->NewNode(
+ common()->Checkpoint(), eager_deopt_frame_state, effect, control);
+ effect = deopt_checkpoint;
+
+ // Call iterator property operator
+ ProcessedFeedback const& feedback =
+ broker()->GetFeedbackForCall(p.callFeedback());
+ SpeculationMode mode = feedback.IsInsufficient()
+ ? SpeculationMode::kDisallowSpeculation
+ : feedback.AsCall().speculation_mode();
+ const Operator* call_op =
+ javascript()->Call(2, CallFrequency(), p.callFeedback(),
+ ConvertReceiverMode::kNotNullOrUndefined, mode);
+ Node* call_property = graph()->NewNode(call_op, load_property, receiver,
+ context, frame_state, effect, control);
+ effect = call_property;
+ control = call_property;
+ if (has_exception_node) {
+ control =
+ AppendExceptionHandling(effect, control, if_exception_merge,
+ if_exception_phi, if_exception_effect_phi);
+ }
+
+ // Check if the call property returns a valid JSReceiver else throw an invalid
+ // iterator runtime exception
+ Node* is_receiver =
+ graph()->NewNode(simplified()->ObjectIsReceiver(), call_property);
+ Node* branch_node = graph()->NewNode(
+ common()->Branch(BranchHint::kNone, IsSafetyCheck::kNoSafetyCheck),
+ is_receiver, control);
+ {
+ // Create a version of effect and control for the false path of the branch
+ Node* effect = call_property;
+ Node* control = call_property;
+ Node* if_not_receiver = graph()->NewNode(common()->IfFalse(), branch_node);
+ control = if_not_receiver;
+ const Operator* call_runtime_op =
+ javascript()->CallRuntime(Runtime::kThrowSymbolIteratorInvalid, 0);
+ Node* call_runtime = graph()->NewNode(call_runtime_op, context, frame_state,
+ effect, control);
+ control = call_runtime;
+ effect = call_runtime;
+ if (has_exception_node) {
+ control =
+ AppendExceptionHandling(effect, control, if_exception_merge,
+ if_exception_phi, if_exception_effect_phi);
+ }
+ Node* throw_node =
+ graph()->NewNode(common()->Throw(), call_runtime, control);
+ NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
+ }
+
+ Node* if_receiver = graph()->NewNode(common()->IfTrue(), branch_node);
+ ReplaceWithValue(node, call_property, effect, if_receiver);
+ return Replace(if_receiver);
}
Reduction JSNativeContextSpecialization::ReduceJSStoreNamed(Node* node) {
@@ -1408,9 +1506,8 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreNamed(Node* node) {
Node* const value = NodeProperties::GetValueInput(node, 1);
if (!p.feedback().IsValid()) return NoChange();
- return ReduceNamedAccessFromNexus(node, value, FeedbackSource(p.feedback()),
- NameRef(broker(), p.name()),
- AccessMode::kStore);
+ return ReducePropertyAccess(node, nullptr, NameRef(broker(), p.name()), value,
+ FeedbackSource(p.feedback()), AccessMode::kStore);
}
Reduction JSNativeContextSpecialization::ReduceJSStoreNamedOwn(Node* node) {
@@ -1419,9 +1516,9 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreNamedOwn(Node* node) {
Node* const value = NodeProperties::GetValueInput(node, 1);
if (!p.feedback().IsValid()) return NoChange();
- return ReduceNamedAccessFromNexus(node, value, FeedbackSource(p.feedback()),
- NameRef(broker(), p.name()),
- AccessMode::kStoreInLiteral);
+ return ReducePropertyAccess(node, nullptr, NameRef(broker(), p.name()), value,
+ FeedbackSource(p.feedback()),
+ AccessMode::kStoreInLiteral);
}
Reduction JSNativeContextSpecialization::ReduceElementAccessOnString(
@@ -1578,9 +1675,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
// NoElementsProtector.
for (ElementAccessInfo const& access_info : access_infos) {
if (IsFastElementsKind(access_info.elements_kind())) {
- if (!isolate()->IsNoElementsProtectorIntact()) return NoChange();
- dependencies()->DependOnProtector(
- PropertyCellRef(broker(), factory()->no_elements_protector()));
+ if (!dependencies()->DependOnNoElementsProtector()) return NoChange();
break;
}
}
@@ -1819,8 +1914,7 @@ Reduction JSNativeContextSpecialization::ReducePropertyAccess(
node->opcode() == IrOpcode::kJSHasProperty ||
node->opcode() == IrOpcode::kJSLoadNamed ||
node->opcode() == IrOpcode::kJSStoreNamed ||
- node->opcode() == IrOpcode::kJSStoreNamedOwn ||
- node->opcode() == IrOpcode::kJSGetIterator);
+ node->opcode() == IrOpcode::kJSStoreNamedOwn);
DCHECK_GE(node->op()->ControlOutputCount(), 1);
ProcessedFeedback const& feedback =
@@ -2499,12 +2593,14 @@ JSNativeContextSpecialization::BuildElementAccess(
if (typed_array.has_value()) {
length = jsgraph()->Constant(static_cast<double>(typed_array->length()));
- // Load the (known) base and external pointer for the {receiver}. The
- // {external_pointer} might be invalid if the {buffer} was detached, so
- // we need to make sure that any access is properly guarded.
+ DCHECK(!typed_array->is_on_heap());
+ // Load the (known) data pointer for the {receiver} and set {base_pointer}
+ // and {external_pointer} to the values that will allow to generate typed
+ // element accesses using the known data pointer.
+ // The data pointer might be invalid if the {buffer} was detached,
+ // so we need to make sure that any access is properly guarded.
base_pointer = jsgraph()->ZeroConstant();
- external_pointer =
- jsgraph()->PointerConstant(typed_array->external_pointer());
+ external_pointer = jsgraph()->PointerConstant(typed_array->data_ptr());
} else {
// Load the {receiver}s length.
length = effect = graph()->NewNode(
@@ -3168,6 +3264,22 @@ Node* JSNativeContextSpecialization::BuildCheckEqualsName(NameRef const& name,
control);
}
+Node* JSNativeContextSpecialization::AppendExceptionHandling(
+ Node* effect, Node* control, Node* merge, Node* phi, Node* effect_phi) {
+ DCHECK_EQ(effect, control);
+ int input_count = merge->InputCount() + 1;
+ Node* if_exception =
+ graph()->NewNode(common()->IfException(), effect, control);
+ merge->InsertInput(graph()->zone(), 0, if_exception);
+ NodeProperties::ChangeOp(merge, common()->Merge(input_count));
+ phi->InsertInput(graph()->zone(), 0, if_exception);
+ NodeProperties::ChangeOp(
+ phi, common()->Phi(MachineRepresentation::kTagged, input_count));
+ effect_phi->InsertInput(graph()->zone(), 0, if_exception);
+ NodeProperties::ChangeOp(effect_phi, common()->EffectPhi(input_count));
+ return graph()->NewNode(common()->IfSuccess(), control);
+}
+
bool JSNativeContextSpecialization::CanTreatHoleAsUndefined(
ZoneVector<Handle<Map>> const& receiver_maps) {
// Check if all {receiver_maps} have one of the initial Array.prototype
diff --git a/deps/v8/src/compiler/js-native-context-specialization.h b/deps/v8/src/compiler/js-native-context-specialization.h
index a0707b9830..429be0bb24 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.h
+++ b/deps/v8/src/compiler/js-native-context-specialization.h
@@ -101,10 +101,6 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
base::Optional<NameRef> static_name,
Node* value, FeedbackSource const& source,
AccessMode access_mode);
- Reduction ReduceNamedAccessFromNexus(Node* node, Node* value,
- FeedbackSource const& source,
- NameRef const& name,
- AccessMode access_mode);
Reduction ReduceNamedAccess(Node* node, Node* value,
NamedAccessFeedback const& processed,
AccessMode access_mode, Node* key = nullptr);
@@ -207,6 +203,12 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
Node* BuildCheckEqualsName(NameRef const& name, Node* value, Node* effect,
Node* control);
+ // Attach a pair of success and exception paths on a given control path.
+ // The exception is joined to the Merge+Phi+EffectPhi nodes while the success
+ // path is returned.
+ Node* AppendExceptionHandling(Node* effect, Node* control, Node* merge,
+ Node* phi, Node* effect_phi);
+
// Checks if we can turn the hole into undefined when loading an element
// from an object with one of the {receiver_maps}; sets up appropriate
// code dependencies and might use the array protector cell.
@@ -219,11 +221,6 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
ElementAccessFeedback const& feedback, Node* receiver,
Node* effect) const;
- void FilterMapsAndGetPropertyAccessInfos(
- NamedAccessFeedback const& feedback, AccessMode access_mode,
- Node* receiver, Node* effect,
- ZoneVector<PropertyAccessInfo>* access_infos);
-
// Try to infer maps for the given {receiver} at the current {effect}.
bool InferReceiverMaps(Node* receiver, Node* effect,
ZoneVector<Handle<Map>>* receiver_maps) const;
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index d0581b59a5..42e5f90057 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -11,6 +11,7 @@
#include "src/compiler/operator.h"
#include "src/handles/handles-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/template-objects.h"
namespace v8 {
namespace internal {
@@ -284,8 +285,7 @@ bool operator!=(PropertyAccess const& lhs, PropertyAccess const& rhs) {
PropertyAccess const& PropertyAccessOf(const Operator* op) {
DCHECK(op->opcode() == IrOpcode::kJSHasProperty ||
op->opcode() == IrOpcode::kJSLoadProperty ||
- op->opcode() == IrOpcode::kJSStoreProperty ||
- op->opcode() == IrOpcode::kJSGetIterator);
+ op->opcode() == IrOpcode::kJSStoreProperty);
return OpParameter<PropertyAccess>(op);
}
@@ -473,6 +473,34 @@ const CreateBoundFunctionParameters& CreateBoundFunctionParametersOf(
return OpParameter<CreateBoundFunctionParameters>(op);
}
+bool operator==(GetTemplateObjectParameters const& lhs,
+ GetTemplateObjectParameters const& rhs) {
+ return lhs.description().location() == rhs.description().location() &&
+ lhs.shared().location() == rhs.shared().location() &&
+ lhs.feedback() == rhs.feedback();
+}
+
+bool operator!=(GetTemplateObjectParameters const& lhs,
+ GetTemplateObjectParameters const& rhs) {
+ return !(lhs == rhs);
+}
+
+size_t hash_value(GetTemplateObjectParameters const& p) {
+ return base::hash_combine(p.description().location(), p.shared().location(),
+ FeedbackSource::Hash()(p.feedback()));
+}
+
+std::ostream& operator<<(std::ostream& os,
+ GetTemplateObjectParameters const& p) {
+ return os << Brief(*p.description()) << ", " << Brief(*p.shared());
+}
+
+const GetTemplateObjectParameters& GetTemplateObjectParametersOf(
+ const Operator* op) {
+ DCHECK(op->opcode() == IrOpcode::kJSGetTemplateObject);
+ return OpParameter<GetTemplateObjectParameters>(op);
+}
+
bool operator==(CreateClosureParameters const& lhs,
CreateClosureParameters const& rhs) {
return lhs.allocation() == rhs.allocation() &&
@@ -562,6 +590,31 @@ const CloneObjectParameters& CloneObjectParametersOf(const Operator* op) {
return OpParameter<CloneObjectParameters>(op);
}
+std::ostream& operator<<(std::ostream& os, GetIteratorParameters const& p) {
+ return os << p.loadFeedback() << ", " << p.callFeedback();
+}
+
+bool operator==(GetIteratorParameters const& lhs,
+ GetIteratorParameters const& rhs) {
+ return lhs.loadFeedback() == rhs.loadFeedback() &&
+ lhs.callFeedback() == rhs.callFeedback();
+}
+
+bool operator!=(GetIteratorParameters const& lhs,
+ GetIteratorParameters const& rhs) {
+ return !(lhs == rhs);
+}
+
+GetIteratorParameters const& GetIteratorParametersOf(const Operator* op) {
+ DCHECK(op->opcode() == IrOpcode::kJSGetIterator);
+ return OpParameter<GetIteratorParameters>(op);
+}
+
+size_t hash_value(GetIteratorParameters const& p) {
+ return base::hash_combine(FeedbackSource::Hash()(p.loadFeedback()),
+ FeedbackSource::Hash()(p.callFeedback()));
+}
+
size_t hash_value(ForInMode mode) { return static_cast<uint8_t>(mode); }
std::ostream& operator<<(std::ostream& os, ForInMode mode) {
@@ -957,9 +1010,10 @@ const Operator* JSOperatorBuilder::LoadProperty(
access); // parameter
}
-const Operator* JSOperatorBuilder::GetIterator(FeedbackSource const& feedback) {
- PropertyAccess access(LanguageMode::kSloppy, feedback);
- return new (zone()) Operator1<PropertyAccess>( // --
+const Operator* JSOperatorBuilder::GetIterator(
+ FeedbackSource const& load_feedback, FeedbackSource const& call_feedback) {
+ GetIteratorParameters access(load_feedback, call_feedback);
+ return new (zone()) Operator1<GetIteratorParameters>( // --
IrOpcode::kJSGetIterator, Operator::kNoProperties, // opcode
"JSGetIterator", // name
1, 1, 1, 1, 1, 2, // counts
@@ -1257,6 +1311,18 @@ const Operator* JSOperatorBuilder::CreateLiteralObject(
parameters); // parameter
}
+const Operator* JSOperatorBuilder::GetTemplateObject(
+ Handle<TemplateObjectDescription> description,
+ Handle<SharedFunctionInfo> shared, FeedbackSource const& feedback) {
+ GetTemplateObjectParameters parameters(description, shared, feedback);
+ return new (zone()) Operator1<GetTemplateObjectParameters>( // --
+ IrOpcode::kJSGetTemplateObject, // opcode
+ Operator::kEliminatable, // properties
+ "JSGetTemplateObject", // name
+ 0, 1, 1, 1, 1, 0, // counts
+ parameters); // parameter
+}
+
const Operator* JSOperatorBuilder::CloneObject(FeedbackSource const& feedback,
int literal_flags) {
CloneObjectParameters parameters(feedback, literal_flags);
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index f795a2f402..47b0fff05a 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -409,13 +409,13 @@ class StoreGlobalParameters final {
: language_mode_(language_mode), name_(name), feedback_(feedback) {}
LanguageMode language_mode() const { return language_mode_; }
- const FeedbackSource& feedback() const { return feedback_; }
- const Handle<Name>& name() const { return name_; }
+ FeedbackSource const& feedback() const { return feedback_; }
+ Handle<Name> const& name() const { return name_; }
private:
- const LanguageMode language_mode_;
- const Handle<Name> name_;
- const FeedbackSource feedback_;
+ LanguageMode const language_mode_;
+ Handle<Name> const name_;
+ FeedbackSource const feedback_;
};
bool operator==(StoreGlobalParameters const&, StoreGlobalParameters const&);
@@ -598,6 +598,35 @@ std::ostream& operator<<(std::ostream&, CreateClosureParameters const&);
const CreateClosureParameters& CreateClosureParametersOf(const Operator* op);
+class GetTemplateObjectParameters final {
+ public:
+ GetTemplateObjectParameters(Handle<TemplateObjectDescription> description,
+ Handle<SharedFunctionInfo> shared,
+ FeedbackSource const& feedback)
+ : description_(description), shared_(shared), feedback_(feedback) {}
+
+ Handle<TemplateObjectDescription> description() const { return description_; }
+ Handle<SharedFunctionInfo> shared() const { return shared_; }
+ FeedbackSource const& feedback() const { return feedback_; }
+
+ private:
+ Handle<TemplateObjectDescription> const description_;
+ Handle<SharedFunctionInfo> const shared_;
+ FeedbackSource const feedback_;
+};
+
+bool operator==(GetTemplateObjectParameters const&,
+ GetTemplateObjectParameters const&);
+bool operator!=(GetTemplateObjectParameters const&,
+ GetTemplateObjectParameters const&);
+
+size_t hash_value(GetTemplateObjectParameters const&);
+
+std::ostream& operator<<(std::ostream&, GetTemplateObjectParameters const&);
+
+const GetTemplateObjectParameters& GetTemplateObjectParametersOf(
+ const Operator* op);
+
// Defines shared information for the literal that should be created. This is
// used as parameter by JSCreateLiteralArray, JSCreateLiteralObject and
// JSCreateLiteralRegExp operators.
@@ -653,6 +682,31 @@ std::ostream& operator<<(std::ostream&, CloneObjectParameters const&);
const CloneObjectParameters& CloneObjectParametersOf(const Operator* op);
+// Defines the shared information for the iterator symbol thats loaded and
+// called. This is used as a parameter by JSGetIterator operator.
+class GetIteratorParameters final {
+ public:
+ GetIteratorParameters(const FeedbackSource& load_feedback,
+ const FeedbackSource& call_feedback)
+ : load_feedback_(load_feedback), call_feedback_(call_feedback) {}
+
+ FeedbackSource const& loadFeedback() const { return load_feedback_; }
+ FeedbackSource const& callFeedback() const { return call_feedback_; }
+
+ private:
+ FeedbackSource const load_feedback_;
+ FeedbackSource const call_feedback_;
+};
+
+bool operator==(GetIteratorParameters const&, GetIteratorParameters const&);
+bool operator!=(GetIteratorParameters const&, GetIteratorParameters const&);
+
+size_t hash_value(GetIteratorParameters const&);
+
+std::ostream& operator<<(std::ostream&, GetIteratorParameters const&);
+
+const GetIteratorParameters& GetIteratorParametersOf(const Operator* op);
+
// Descriptor used by the JSForInPrepare and JSForInNext opcodes.
enum class ForInMode : uint8_t {
kUseEnumCacheKeysAndIndices,
@@ -742,7 +796,6 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* CreateEmptyLiteralArray(FeedbackSource const& feedback);
const Operator* CreateArrayFromIterable();
const Operator* CreateEmptyLiteralObject();
-
const Operator* CreateLiteralObject(
Handle<ObjectBoilerplateDescription> constant,
FeedbackSource const& feedback, int literal_flags,
@@ -753,6 +806,10 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
FeedbackSource const& feedback,
int literal_flags);
+ const Operator* GetTemplateObject(
+ Handle<TemplateObjectDescription> description,
+ Handle<SharedFunctionInfo> shared, FeedbackSource const& feedback);
+
const Operator* CallForwardVarargs(size_t arity, uint32_t start_index);
const Operator* Call(
size_t arity, CallFrequency const& frequency = CallFrequency(),
@@ -856,7 +913,8 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* ParseInt();
const Operator* RegExpTest();
- const Operator* GetIterator(FeedbackSource const& feedback);
+ const Operator* GetIterator(FeedbackSource const& load_feedback,
+ FeedbackSource const& call_feedback);
private:
Zone* zone() const { return zone_; }
diff --git a/deps/v8/src/compiler/js-type-hint-lowering.cc b/deps/v8/src/compiler/js-type-hint-lowering.cc
index e1ff928cec..9a6b367ddf 100644
--- a/deps/v8/src/compiler/js-type-hint-lowering.cc
+++ b/deps/v8/src/compiler/js-type-hint-lowering.cc
@@ -482,12 +482,32 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceConstructOperation(
return LoweringResult::NoChange();
}
+JSTypeHintLowering::LoweringResult
+JSTypeHintLowering::ReduceGetIteratorOperation(const Operator* op,
+ Node* receiver, Node* effect,
+ Node* control,
+ FeedbackSlot load_slot,
+ FeedbackSlot call_slot) const {
+ DCHECK_EQ(IrOpcode::kJSGetIterator, op->opcode());
+ // Insert soft deopt if the load feedback is invalid.
+ if (Node* node = TryBuildSoftDeopt(
+ load_slot, effect, control,
+ DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess)) {
+ return LoweringResult::Exit(node);
+ }
+ // Insert soft deopt if the call feedback is invalid.
+ if (Node* node = TryBuildSoftDeopt(
+ call_slot, effect, control,
+ DeoptimizeReason::kInsufficientTypeFeedbackForCall)) {
+ return LoweringResult::Exit(node);
+ }
+ return LoweringResult::NoChange();
+}
+
JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceLoadNamedOperation(
const Operator* op, Node* receiver, Node* effect, Node* control,
FeedbackSlot slot) const {
- // JSGetIterator involves a named load of the Symbol.iterator property.
- DCHECK(op->opcode() == IrOpcode::kJSLoadNamed ||
- op->opcode() == IrOpcode::kJSGetIterator);
+ DCHECK_EQ(IrOpcode::kJSLoadNamed, op->opcode());
if (Node* node = TryBuildSoftDeopt(
slot, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess)) {
diff --git a/deps/v8/src/compiler/js-type-hint-lowering.h b/deps/v8/src/compiler/js-type-hint-lowering.h
index 3e46fb2ec2..303e2f8dcf 100644
--- a/deps/v8/src/compiler/js-type-hint-lowering.h
+++ b/deps/v8/src/compiler/js-type-hint-lowering.h
@@ -134,6 +134,13 @@ class JSTypeHintLowering {
int arg_count, Node* effect,
Node* control,
FeedbackSlot slot) const;
+
+ // Potential reduction of property access and call operations.
+ LoweringResult ReduceGetIteratorOperation(const Operator* op, Node* obj,
+ Node* effect, Node* control,
+ FeedbackSlot load_slot,
+ FeedbackSlot call_slot) const;
+
// Potential reduction of property access operations.
LoweringResult ReduceLoadNamedOperation(const Operator* op, Node* obj,
Node* effect, Node* control,
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index 8caafe6aad..035457c62b 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -17,6 +17,7 @@
#include "src/compiler/operator-properties.h"
#include "src/compiler/type-cache.h"
#include "src/compiler/types.h"
+#include "src/execution/protectors.h"
#include "src/objects/js-generator.h"
#include "src/objects/module-inl.h"
#include "src/objects/objects-inl.h"
@@ -567,9 +568,10 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
Node* length =
graph()->NewNode(simplified()->NumberAdd(), left_length, right_length);
- CellRef string_length_protector(broker(),
- factory()->string_length_protector());
- if (string_length_protector.value().AsSmi() == Isolate::kProtectorValid) {
+ PropertyCellRef string_length_protector(
+ broker(), factory()->string_length_protector());
+ if (string_length_protector.value().AsSmi() ==
+ Protectors::kProtectorValid) {
// We can just deoptimize if the {length} is out-of-bounds. Besides
// generating a shorter code sequence than the version below, this
// has the additional benefit of not holding on to the lazy {frame_state}
@@ -2025,8 +2027,7 @@ Reduction JSTypedLowering::ReduceJSLoadMessage(Node* node) {
ExternalReference const ref =
ExternalReference::address_of_pending_message_obj(isolate());
node->ReplaceInput(0, jsgraph()->ExternalConstant(ref));
- NodeProperties::ChangeOp(
- node, simplified()->LoadField(AccessBuilder::ForExternalTaggedValue()));
+ NodeProperties::ChangeOp(node, simplified()->LoadMessage());
return Changed(node);
}
@@ -2037,8 +2038,7 @@ Reduction JSTypedLowering::ReduceJSStoreMessage(Node* node) {
Node* value = NodeProperties::GetValueInput(node, 0);
node->ReplaceInput(0, jsgraph()->ExternalConstant(ref));
node->ReplaceInput(1, value);
- NodeProperties::ChangeOp(
- node, simplified()->StoreField(AccessBuilder::ForExternalTaggedValue()));
+ NodeProperties::ChangeOp(node, simplified()->StoreMessage());
return Changed(node);
}
diff --git a/deps/v8/src/compiler/machine-graph-verifier.cc b/deps/v8/src/compiler/machine-graph-verifier.cc
index 4c7ee1d141..f6b747c04d 100644
--- a/deps/v8/src/compiler/machine-graph-verifier.cc
+++ b/deps/v8/src/compiler/machine-graph-verifier.cc
@@ -241,7 +241,7 @@ class MachineRepresentationInferrer {
MachineType::PointerRepresentation();
break;
case IrOpcode::kBitcastTaggedToWord:
- case IrOpcode::kBitcastTaggedSignedToWord:
+ case IrOpcode::kBitcastTaggedToWordForTagAndSmiBits:
representation_vector_[node->id()] =
MachineType::PointerRepresentation();
break;
@@ -437,7 +437,7 @@ class MachineRepresentationChecker {
MachineRepresentation::kWord64);
break;
case IrOpcode::kBitcastTaggedToWord:
- case IrOpcode::kBitcastTaggedSignedToWord:
+ case IrOpcode::kBitcastTaggedToWordForTagAndSmiBits:
case IrOpcode::kTaggedPoisonOnSpeculation:
CheckValueInputIsTagged(node, 0);
break;
@@ -461,7 +461,7 @@ class MachineRepresentationChecker {
CheckValueInputForFloat64Op(node, 0);
break;
case IrOpcode::kWord64Equal:
- if (Is64()) {
+ if (Is64() && !COMPRESS_POINTERS_BOOL) {
CheckValueInputIsTaggedOrPointer(node, 0);
CheckValueInputIsTaggedOrPointer(node, 1);
if (!is_stub_) {
@@ -1007,6 +1007,13 @@ class MachineRepresentationChecker {
return IsAnyCompressed(actual);
case MachineRepresentation::kTaggedSigned:
case MachineRepresentation::kTaggedPointer:
+ // TODO(tebbi): At the moment, the machine graph doesn't contain
+ // reliable information if a node is kTaggedSigned, kTaggedPointer or
+ // kTagged, and often this is context-dependent. We should at least
+ // check for obvious violations: kTaggedSigned where we expect
+ // kTaggedPointer and the other way around, but at the moment, this
+ // happens in dead code.
+ return IsAnyTagged(actual);
case MachineRepresentation::kCompressedSigned:
case MachineRepresentation::kCompressedPointer:
case MachineRepresentation::kFloat32:
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index 11124579f6..38013d228c 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -681,7 +681,7 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
Int64Matcher m(node->InputAt(0));
if (m.HasValue()) return ReplaceInt32(static_cast<int32_t>(m.Value()));
if (m.IsChangeInt32ToInt64()) return Replace(m.node()->InputAt(0));
- if (m.IsBitcastTaggedSignedToWord()) {
+ if (m.IsBitcastTaggedToWordForTagAndSmiBits()) {
Int64Matcher n(m.node()->InputAt(0));
if (n.IsChangeCompressedToTagged()) {
DCHECK(machine()->Is64() && SmiValuesAre31Bits());
@@ -725,7 +725,7 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
case IrOpcode::kFloat64RoundDown:
return ReduceFloat64RoundDown(node);
case IrOpcode::kBitcastTaggedToWord:
- case IrOpcode::kBitcastTaggedSignedToWord: {
+ case IrOpcode::kBitcastTaggedToWordForTagAndSmiBits: {
NodeMatcher m(node->InputAt(0));
if (m.IsBitcastWordToTaggedSigned()) {
RelaxEffectsAndControls(node);
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index 0355534408..b450fb60da 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -146,7 +146,8 @@ MachineType AtomicOpType(Operator const* op) {
V(Word64Clz, Operator::kNoProperties, 1, 0, 1) \
V(Word32ReverseBytes, Operator::kNoProperties, 1, 0, 1) \
V(Word64ReverseBytes, Operator::kNoProperties, 1, 0, 1) \
- V(BitcastTaggedSignedToWord, Operator::kNoProperties, 1, 0, 1) \
+ V(Simd128ReverseBytes, Operator::kNoProperties, 1, 0, 1) \
+ V(BitcastTaggedToWordForTagAndSmiBits, Operator::kNoProperties, 1, 0, 1) \
V(BitcastWordToTaggedSigned, Operator::kNoProperties, 1, 0, 1) \
V(BitcastWord32ToCompressedSigned, Operator::kNoProperties, 1, 0, 1) \
V(BitcastCompressedSignedToWord32, Operator::kNoProperties, 1, 0, 1) \
@@ -255,6 +256,7 @@ MachineType AtomicOpType(Operator const* op) {
V(F64x2Splat, Operator::kNoProperties, 1, 0, 1) \
V(F64x2Abs, Operator::kNoProperties, 1, 0, 1) \
V(F64x2Neg, Operator::kNoProperties, 1, 0, 1) \
+ V(F64x2Sqrt, Operator::kNoProperties, 1, 0, 1) \
V(F64x2Add, Operator::kCommutative, 2, 0, 1) \
V(F64x2Sub, Operator::kNoProperties, 2, 0, 1) \
V(F64x2Mul, Operator::kCommutative, 2, 0, 1) \
@@ -265,11 +267,14 @@ MachineType AtomicOpType(Operator const* op) {
V(F64x2Ne, Operator::kCommutative, 2, 0, 1) \
V(F64x2Lt, Operator::kNoProperties, 2, 0, 1) \
V(F64x2Le, Operator::kNoProperties, 2, 0, 1) \
+ V(F64x2Qfma, Operator::kNoProperties, 3, 0, 1) \
+ V(F64x2Qfms, Operator::kNoProperties, 3, 0, 1) \
V(F32x4Splat, Operator::kNoProperties, 1, 0, 1) \
V(F32x4SConvertI32x4, Operator::kNoProperties, 1, 0, 1) \
V(F32x4UConvertI32x4, Operator::kNoProperties, 1, 0, 1) \
V(F32x4Abs, Operator::kNoProperties, 1, 0, 1) \
V(F32x4Neg, Operator::kNoProperties, 1, 0, 1) \
+ V(F32x4Sqrt, Operator::kNoProperties, 1, 0, 1) \
V(F32x4RecipApprox, Operator::kNoProperties, 1, 0, 1) \
V(F32x4RecipSqrtApprox, Operator::kNoProperties, 1, 0, 1) \
V(F32x4Add, Operator::kCommutative, 2, 0, 1) \
@@ -283,6 +288,8 @@ MachineType AtomicOpType(Operator const* op) {
V(F32x4Ne, Operator::kCommutative, 2, 0, 1) \
V(F32x4Lt, Operator::kNoProperties, 2, 0, 1) \
V(F32x4Le, Operator::kNoProperties, 2, 0, 1) \
+ V(F32x4Qfma, Operator::kNoProperties, 3, 0, 1) \
+ V(F32x4Qfms, Operator::kNoProperties, 3, 0, 1) \
V(I64x2Splat, Operator::kNoProperties, 1, 0, 1) \
V(I64x2Neg, Operator::kNoProperties, 1, 0, 1) \
V(I64x2Shl, Operator::kNoProperties, 2, 0, 1) \
@@ -395,6 +402,7 @@ MachineType AtomicOpType(Operator const* op) {
V(S1x8AllTrue, Operator::kNoProperties, 1, 0, 1) \
V(S1x16AnyTrue, Operator::kNoProperties, 1, 0, 1) \
V(S1x16AllTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(S8x16Swizzle, Operator::kNoProperties, 2, 0, 1) \
V(StackPointerGreaterThan, Operator::kNoProperties, 1, 0, 1)
// The format is:
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index 17db145f58..1bd806eefb 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -239,6 +239,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const OptionalOperator Word64ReverseBits();
const Operator* Word32ReverseBytes();
const Operator* Word64ReverseBytes();
+ const Operator* Simd128ReverseBytes();
const OptionalOperator Int32AbsWithOverflow();
const OptionalOperator Int64AbsWithOverflow();
@@ -301,8 +302,13 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
// This operator reinterprets the bits of a tagged pointer as a word.
const Operator* BitcastTaggedToWord();
- // This operator reinterprets the bits of a Smi as a word.
- const Operator* BitcastTaggedSignedToWord();
+ // This operator reinterprets the bits of a tagged value as a word preserving
+ // non-pointer bits (all the bits that are not modified by GC):
+ // 1) smi tag
+ // 2) weak tag
+ // 3) smi payload if the tagged value is a smi.
+ // Note, that it's illegal to "look" at the pointer bits of non-smi values.
+ const Operator* BitcastTaggedToWordForTagAndSmiBits();
// This operator reinterprets the bits of a tagged MaybeObject pointer as
// word.
@@ -477,6 +483,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* F64x2Splat();
const Operator* F64x2Abs();
const Operator* F64x2Neg();
+ const Operator* F64x2Sqrt();
const Operator* F64x2Add();
const Operator* F64x2Sub();
const Operator* F64x2Mul();
@@ -489,6 +496,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* F64x2Ne();
const Operator* F64x2Lt();
const Operator* F64x2Le();
+ const Operator* F64x2Qfma();
+ const Operator* F64x2Qfms();
const Operator* F32x4Splat();
const Operator* F32x4ExtractLane(int32_t);
@@ -497,6 +506,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* F32x4UConvertI32x4();
const Operator* F32x4Abs();
const Operator* F32x4Neg();
+ const Operator* F32x4Sqrt();
const Operator* F32x4RecipApprox();
const Operator* F32x4RecipSqrtApprox();
const Operator* F32x4Add();
@@ -510,6 +520,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* F32x4Ne();
const Operator* F32x4Lt();
const Operator* F32x4Le();
+ const Operator* F32x4Qfma();
+ const Operator* F32x4Qfms();
const Operator* I64x2Splat();
const Operator* I64x2ExtractLane(int32_t);
@@ -632,6 +644,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* S128Not();
const Operator* S128Select();
+ const Operator* S8x16Swizzle();
const Operator* S8x16Shuffle(const uint8_t shuffle[16]);
const Operator* S1x2AnyTrue();
diff --git a/deps/v8/src/compiler/memory-lowering.cc b/deps/v8/src/compiler/memory-lowering.cc
new file mode 100644
index 0000000000..1e112e8e82
--- /dev/null
+++ b/deps/v8/src/compiler/memory-lowering.cc
@@ -0,0 +1,551 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/memory-lowering.h"
+
+#include "src/codegen/interface-descriptors.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/node.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/roots/roots-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// An allocation group represents a set of allocations that have been folded
+// together.
+class MemoryLowering::AllocationGroup final : public ZoneObject {
+ public:
+ AllocationGroup(Node* node, AllocationType allocation, Zone* zone);
+ AllocationGroup(Node* node, AllocationType allocation, Node* size,
+ Zone* zone);
+ ~AllocationGroup() = default;
+
+ void Add(Node* object);
+ bool Contains(Node* object) const;
+ bool IsYoungGenerationAllocation() const {
+ return allocation() == AllocationType::kYoung;
+ }
+
+ AllocationType allocation() const { return allocation_; }
+ Node* size() const { return size_; }
+
+ private:
+ ZoneSet<NodeId> node_ids_;
+ AllocationType const allocation_;
+ Node* const size_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationGroup);
+};
+
+MemoryLowering::MemoryLowering(JSGraph* jsgraph, Zone* zone,
+ PoisoningMitigationLevel poisoning_level,
+ AllocationFolding allocation_folding,
+ WriteBarrierAssertFailedCallback callback,
+ const char* function_debug_name)
+ : jsgraph_(jsgraph),
+ zone_(zone),
+ graph_assembler_(jsgraph, nullptr, nullptr, zone),
+ allocation_folding_(allocation_folding),
+ poisoning_level_(poisoning_level),
+ write_barrier_assert_failed_(callback),
+ function_debug_name_(function_debug_name) {}
+
+Reduction MemoryLowering::Reduce(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kAllocate:
+ // Allocate nodes were purged from the graph in effect-control
+ // linearization.
+ UNREACHABLE();
+ case IrOpcode::kAllocateRaw:
+ return ReduceAllocateRaw(node);
+ case IrOpcode::kLoadFromObject:
+ return ReduceLoadFromObject(node);
+ case IrOpcode::kLoadElement:
+ return ReduceLoadElement(node);
+ case IrOpcode::kLoadField:
+ return ReduceLoadField(node);
+ case IrOpcode::kStoreToObject:
+ return ReduceStoreToObject(node);
+ case IrOpcode::kStoreElement:
+ return ReduceStoreElement(node);
+ case IrOpcode::kStoreField:
+ return ReduceStoreField(node);
+ case IrOpcode::kStore:
+ return ReduceStore(node);
+ default:
+ return NoChange();
+ }
+}
+
+#define __ gasm()->
+
+Reduction MemoryLowering::ReduceAllocateRaw(
+ Node* node, AllocationType allocation_type,
+ AllowLargeObjects allow_large_objects, AllocationState const** state_ptr) {
+ DCHECK_EQ(IrOpcode::kAllocateRaw, node->opcode());
+ DCHECK_IMPLIES(allocation_folding_ == AllocationFolding::kDoAllocationFolding,
+ state_ptr != nullptr);
+ Node* value;
+ Node* size = node->InputAt(0);
+ Node* effect = node->InputAt(1);
+ Node* control = node->InputAt(2);
+
+ gasm()->Reset(effect, control);
+
+ Node* allocate_builtin;
+ if (allocation_type == AllocationType::kYoung) {
+ if (allow_large_objects == AllowLargeObjects::kTrue) {
+ allocate_builtin = __ AllocateInYoungGenerationStubConstant();
+ } else {
+ allocate_builtin = __ AllocateRegularInYoungGenerationStubConstant();
+ }
+ } else {
+ if (allow_large_objects == AllowLargeObjects::kTrue) {
+ allocate_builtin = __ AllocateInOldGenerationStubConstant();
+ } else {
+ allocate_builtin = __ AllocateRegularInOldGenerationStubConstant();
+ }
+ }
+
+ // Determine the top/limit addresses.
+ Node* top_address = __ ExternalConstant(
+ allocation_type == AllocationType::kYoung
+ ? ExternalReference::new_space_allocation_top_address(isolate())
+ : ExternalReference::old_space_allocation_top_address(isolate()));
+ Node* limit_address = __ ExternalConstant(
+ allocation_type == AllocationType::kYoung
+ ? ExternalReference::new_space_allocation_limit_address(isolate())
+ : ExternalReference::old_space_allocation_limit_address(isolate()));
+
+ // Check if we can fold this allocation into a previous allocation represented
+ // by the incoming {state}.
+ IntPtrMatcher m(size);
+ if (m.IsInRange(0, kMaxRegularHeapObjectSize) && FLAG_inline_new &&
+ allocation_folding_ == AllocationFolding::kDoAllocationFolding) {
+ intptr_t const object_size = m.Value();
+ AllocationState const* state = *state_ptr;
+ if (state->size() <= kMaxRegularHeapObjectSize - object_size &&
+ state->group()->allocation() == allocation_type) {
+ // We can fold this Allocate {node} into the allocation {group}
+ // represented by the given {state}. Compute the upper bound for
+ // the new {state}.
+ intptr_t const state_size = state->size() + object_size;
+
+ // Update the reservation check to the actual maximum upper bound.
+ AllocationGroup* const group = state->group();
+ if (machine()->Is64()) {
+ if (OpParameter<int64_t>(group->size()->op()) < state_size) {
+ NodeProperties::ChangeOp(group->size(),
+ common()->Int64Constant(state_size));
+ }
+ } else {
+ if (OpParameter<int32_t>(group->size()->op()) < state_size) {
+ NodeProperties::ChangeOp(
+ group->size(),
+ common()->Int32Constant(static_cast<int32_t>(state_size)));
+ }
+ }
+
+ // Update the allocation top with the new object allocation.
+ // TODO(bmeurer): Defer writing back top as much as possible.
+ Node* top = __ IntAdd(state->top(), size);
+ __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
+ kNoWriteBarrier),
+ top_address, __ IntPtrConstant(0), top);
+
+ // Compute the effective inner allocated address.
+ value = __ BitcastWordToTagged(
+ __ IntAdd(state->top(), __ IntPtrConstant(kHeapObjectTag)));
+ effect = __ ExtractCurrentEffect();
+ control = __ ExtractCurrentControl();
+
+ // Extend the allocation {group}.
+ group->Add(value);
+ *state_ptr =
+ AllocationState::Open(group, state_size, top, effect, zone());
+ } else {
+ auto call_runtime = __ MakeDeferredLabel();
+ auto done = __ MakeLabel(MachineType::PointerRepresentation());
+
+ // Setup a mutable reservation size node; will be patched as we fold
+ // additional allocations into this new group.
+ Node* size = __ UniqueIntPtrConstant(object_size);
+
+ // Load allocation top and limit.
+ Node* top =
+ __ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0));
+ Node* limit =
+ __ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0));
+
+ // Check if we need to collect garbage before we can start bump pointer
+ // allocation (always done for folded allocations).
+ Node* check = __ UintLessThan(__ IntAdd(top, size), limit);
+
+ __ GotoIfNot(check, &call_runtime);
+ __ Goto(&done, top);
+
+ __ Bind(&call_runtime);
+ {
+ if (!allocate_operator_.is_set()) {
+ auto descriptor = AllocateDescriptor{};
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ graph()->zone(), descriptor, descriptor.GetStackParameterCount(),
+ CallDescriptor::kCanUseRoots, Operator::kNoThrow);
+ allocate_operator_.set(common()->Call(call_descriptor));
+ }
+ Node* vfalse = __ BitcastTaggedToWord(
+ __ Call(allocate_operator_.get(), allocate_builtin, size));
+ vfalse = __ IntSub(vfalse, __ IntPtrConstant(kHeapObjectTag));
+ __ Goto(&done, vfalse);
+ }
+
+ __ Bind(&done);
+
+ // Compute the new top and write it back.
+ top = __ IntAdd(done.PhiAt(0), __ IntPtrConstant(object_size));
+ __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
+ kNoWriteBarrier),
+ top_address, __ IntPtrConstant(0), top);
+
+ // Compute the initial object address.
+ value = __ BitcastWordToTagged(
+ __ IntAdd(done.PhiAt(0), __ IntPtrConstant(kHeapObjectTag)));
+ effect = __ ExtractCurrentEffect();
+ control = __ ExtractCurrentControl();
+
+ // Start a new allocation group.
+ AllocationGroup* group =
+ new (zone()) AllocationGroup(value, allocation_type, size, zone());
+ *state_ptr =
+ AllocationState::Open(group, object_size, top, effect, zone());
+ }
+ } else {
+ auto call_runtime = __ MakeDeferredLabel();
+ auto done = __ MakeLabel(MachineRepresentation::kTaggedPointer);
+
+ // Load allocation top and limit.
+ Node* top =
+ __ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0));
+ Node* limit =
+ __ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0));
+
+ // Compute the new top.
+ Node* new_top = __ IntAdd(top, size);
+
+ // Check if we can do bump pointer allocation here.
+ Node* check = __ UintLessThan(new_top, limit);
+ __ GotoIfNot(check, &call_runtime);
+ if (allow_large_objects == AllowLargeObjects::kTrue) {
+ __ GotoIfNot(
+ __ UintLessThan(size, __ IntPtrConstant(kMaxRegularHeapObjectSize)),
+ &call_runtime);
+ }
+ __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
+ kNoWriteBarrier),
+ top_address, __ IntPtrConstant(0), new_top);
+ __ Goto(&done, __ BitcastWordToTagged(
+ __ IntAdd(top, __ IntPtrConstant(kHeapObjectTag))));
+
+ __ Bind(&call_runtime);
+ if (!allocate_operator_.is_set()) {
+ auto descriptor = AllocateDescriptor{};
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ graph()->zone(), descriptor, descriptor.GetStackParameterCount(),
+ CallDescriptor::kCanUseRoots, Operator::kNoThrow);
+ allocate_operator_.set(common()->Call(call_descriptor));
+ }
+ __ Goto(&done, __ Call(allocate_operator_.get(), allocate_builtin, size));
+
+ __ Bind(&done);
+ value = done.PhiAt(0);
+ effect = __ ExtractCurrentEffect();
+ control = __ ExtractCurrentControl();
+
+ if (state_ptr) {
+ // Create an unfoldable allocation group.
+ AllocationGroup* group =
+ new (zone()) AllocationGroup(value, allocation_type, zone());
+ *state_ptr = AllocationState::Closed(group, effect, zone());
+ }
+ }
+
+ // Replace all effect uses of {node} with the {effect} and replace
+ // all value uses of {node} with the {value}.
+ for (Edge edge : node->use_edges()) {
+ if (NodeProperties::IsEffectEdge(edge)) {
+ edge.UpdateTo(effect);
+ } else if (NodeProperties::IsValueEdge(edge)) {
+ edge.UpdateTo(value);
+ } else {
+ DCHECK(NodeProperties::IsControlEdge(edge));
+ edge.UpdateTo(control);
+ }
+ }
+
+ // Kill the {node} to make sure we don't leave dangling dead uses.
+ node->Kill();
+
+ return Replace(value);
+}
+
+Reduction MemoryLowering::ReduceLoadFromObject(Node* node) {
+ DCHECK_EQ(IrOpcode::kLoadFromObject, node->opcode());
+ ObjectAccess const& access = ObjectAccessOf(node->op());
+ NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
+ return Changed(node);
+}
+
+Reduction MemoryLowering::ReduceLoadElement(Node* node) {
+ DCHECK_EQ(IrOpcode::kLoadElement, node->opcode());
+ ElementAccess const& access = ElementAccessOf(node->op());
+ Node* index = node->InputAt(1);
+ node->ReplaceInput(1, ComputeIndex(access, index));
+ MachineType type = access.machine_type;
+ if (NeedsPoisoning(access.load_sensitivity)) {
+ NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type));
+ } else {
+ NodeProperties::ChangeOp(node, machine()->Load(type));
+ }
+ return Changed(node);
+}
+
+Reduction MemoryLowering::ReduceLoadField(Node* node) {
+ DCHECK_EQ(IrOpcode::kLoadField, node->opcode());
+ FieldAccess const& access = FieldAccessOf(node->op());
+ Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
+ node->InsertInput(graph()->zone(), 1, offset);
+ MachineType type = access.machine_type;
+ if (NeedsPoisoning(access.load_sensitivity)) {
+ NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type));
+ } else {
+ NodeProperties::ChangeOp(node, machine()->Load(type));
+ }
+ return Changed(node);
+}
+
+Reduction MemoryLowering::ReduceStoreToObject(Node* node,
+ AllocationState const* state) {
+ DCHECK_EQ(IrOpcode::kStoreToObject, node->opcode());
+ ObjectAccess const& access = ObjectAccessOf(node->op());
+ Node* object = node->InputAt(0);
+ Node* value = node->InputAt(2);
+ WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
+ node, object, value, state, access.write_barrier_kind);
+ NodeProperties::ChangeOp(
+ node, machine()->Store(StoreRepresentation(
+ access.machine_type.representation(), write_barrier_kind)));
+ return Changed(node);
+}
+
+Reduction MemoryLowering::ReduceStoreElement(Node* node,
+ AllocationState const* state) {
+ DCHECK_EQ(IrOpcode::kStoreElement, node->opcode());
+ ElementAccess const& access = ElementAccessOf(node->op());
+ Node* object = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ node->ReplaceInput(1, ComputeIndex(access, index));
+ WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
+ node, object, value, state, access.write_barrier_kind);
+ NodeProperties::ChangeOp(
+ node, machine()->Store(StoreRepresentation(
+ access.machine_type.representation(), write_barrier_kind)));
+ return Changed(node);
+}
+
+Reduction MemoryLowering::ReduceStoreField(Node* node,
+ AllocationState const* state) {
+ DCHECK_EQ(IrOpcode::kStoreField, node->opcode());
+ FieldAccess const& access = FieldAccessOf(node->op());
+ Node* object = node->InputAt(0);
+ Node* value = node->InputAt(1);
+ WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
+ node, object, value, state, access.write_barrier_kind);
+ Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
+ node->InsertInput(graph()->zone(), 1, offset);
+ NodeProperties::ChangeOp(
+ node, machine()->Store(StoreRepresentation(
+ access.machine_type.representation(), write_barrier_kind)));
+ return Changed(node);
+}
+
+Reduction MemoryLowering::ReduceStore(Node* node,
+ AllocationState const* state) {
+ DCHECK_EQ(IrOpcode::kStore, node->opcode());
+ StoreRepresentation representation = StoreRepresentationOf(node->op());
+ Node* object = node->InputAt(0);
+ Node* value = node->InputAt(2);
+ WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
+ node, object, value, state, representation.write_barrier_kind());
+ if (write_barrier_kind != representation.write_barrier_kind()) {
+ NodeProperties::ChangeOp(
+ node, machine()->Store(StoreRepresentation(
+ representation.representation(), write_barrier_kind)));
+ return Changed(node);
+ }
+ return NoChange();
+}
+
+Node* MemoryLowering::ComputeIndex(ElementAccess const& access, Node* index) {
+ int const element_size_shift =
+ ElementSizeLog2Of(access.machine_type.representation());
+ if (element_size_shift) {
+ index = __ WordShl(index, __ IntPtrConstant(element_size_shift));
+ }
+ int const fixed_offset = access.header_size - access.tag();
+ if (fixed_offset) {
+ index = __ IntAdd(index, __ IntPtrConstant(fixed_offset));
+ }
+ return index;
+}
+
+#undef __
+
+namespace {
+
+bool ValueNeedsWriteBarrier(Node* value, Isolate* isolate) {
+ while (true) {
+ switch (value->opcode()) {
+ case IrOpcode::kBitcastWordToTaggedSigned:
+ case IrOpcode::kChangeTaggedSignedToCompressedSigned:
+ case IrOpcode::kChangeTaggedToCompressedSigned:
+ return false;
+ case IrOpcode::kChangeTaggedPointerToCompressedPointer:
+ case IrOpcode::kChangeTaggedToCompressed:
+ value = NodeProperties::GetValueInput(value, 0);
+ continue;
+ case IrOpcode::kHeapConstant: {
+ RootIndex root_index;
+ if (isolate->roots_table().IsRootHandle(HeapConstantOf(value->op()),
+ &root_index) &&
+ RootsTable::IsImmortalImmovable(root_index)) {
+ return false;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ return true;
+ }
+}
+
+} // namespace
+
+Reduction MemoryLowering::ReduceAllocateRaw(Node* node) {
+ DCHECK_EQ(IrOpcode::kAllocateRaw, node->opcode());
+ const AllocateParameters& allocation = AllocateParametersOf(node->op());
+ return ReduceAllocateRaw(node, allocation.allocation_type(),
+ allocation.allow_large_objects(), nullptr);
+}
+
+WriteBarrierKind MemoryLowering::ComputeWriteBarrierKind(
+ Node* node, Node* object, Node* value, AllocationState const* state,
+ WriteBarrierKind write_barrier_kind) {
+ if (state && state->IsYoungGenerationAllocation() &&
+ state->group()->Contains(object)) {
+ write_barrier_kind = kNoWriteBarrier;
+ }
+ if (!ValueNeedsWriteBarrier(value, isolate())) {
+ write_barrier_kind = kNoWriteBarrier;
+ }
+ if (write_barrier_kind == WriteBarrierKind::kAssertNoWriteBarrier) {
+ write_barrier_assert_failed_(node, object, function_debug_name_, zone());
+ }
+ return write_barrier_kind;
+}
+
+bool MemoryLowering::NeedsPoisoning(LoadSensitivity load_sensitivity) const {
+ // Safe loads do not need poisoning.
+ if (load_sensitivity == LoadSensitivity::kSafe) return false;
+
+ switch (poisoning_level_) {
+ case PoisoningMitigationLevel::kDontPoison:
+ return false;
+ case PoisoningMitigationLevel::kPoisonAll:
+ return true;
+ case PoisoningMitigationLevel::kPoisonCriticalOnly:
+ return load_sensitivity == LoadSensitivity::kCritical;
+ }
+ UNREACHABLE();
+}
+
+MemoryLowering::AllocationGroup::AllocationGroup(Node* node,
+ AllocationType allocation,
+ Zone* zone)
+ : node_ids_(zone), allocation_(allocation), size_(nullptr) {
+ node_ids_.insert(node->id());
+}
+
+MemoryLowering::AllocationGroup::AllocationGroup(Node* node,
+ AllocationType allocation,
+ Node* size, Zone* zone)
+ : node_ids_(zone), allocation_(allocation), size_(size) {
+ node_ids_.insert(node->id());
+}
+
+void MemoryLowering::AllocationGroup::Add(Node* node) {
+ node_ids_.insert(node->id());
+}
+
+bool MemoryLowering::AllocationGroup::Contains(Node* node) const {
+ // Additions should stay within the same allocated object, so it's safe to
+ // ignore them.
+ while (node_ids_.find(node->id()) == node_ids_.end()) {
+ switch (node->opcode()) {
+ case IrOpcode::kBitcastTaggedToWord:
+ case IrOpcode::kBitcastWordToTagged:
+ case IrOpcode::kInt32Add:
+ case IrOpcode::kInt64Add:
+ node = NodeProperties::GetValueInput(node, 0);
+ break;
+ default:
+ return false;
+ }
+ }
+ return true;
+}
+
+MemoryLowering::AllocationState::AllocationState()
+ : group_(nullptr),
+ size_(std::numeric_limits<int>::max()),
+ top_(nullptr),
+ effect_(nullptr) {}
+
+MemoryLowering::AllocationState::AllocationState(AllocationGroup* group,
+ Node* effect)
+ : group_(group),
+ size_(std::numeric_limits<int>::max()),
+ top_(nullptr),
+ effect_(effect) {}
+
+MemoryLowering::AllocationState::AllocationState(AllocationGroup* group,
+ intptr_t size, Node* top,
+ Node* effect)
+ : group_(group), size_(size), top_(top), effect_(effect) {}
+
+bool MemoryLowering::AllocationState::IsYoungGenerationAllocation() const {
+ return group() && group()->IsYoungGenerationAllocation();
+}
+
+Graph* MemoryLowering::graph() const { return jsgraph()->graph(); }
+
+Isolate* MemoryLowering::isolate() const { return jsgraph()->isolate(); }
+
+CommonOperatorBuilder* MemoryLowering::common() const {
+ return jsgraph()->common();
+}
+
+MachineOperatorBuilder* MemoryLowering::machine() const {
+ return jsgraph()->machine();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/memory-lowering.h b/deps/v8/src/compiler/memory-lowering.h
new file mode 100644
index 0000000000..a1f1fc1861
--- /dev/null
+++ b/deps/v8/src/compiler/memory-lowering.h
@@ -0,0 +1,136 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_MEMORY_LOWERING_H_
+#define V8_COMPILER_MEMORY_LOWERING_H_
+
+#include "src/compiler/graph-assembler.h"
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class CommonOperatorBuilder;
+struct ElementAccess;
+class Graph;
+class JSGraph;
+class MachineOperatorBuilder;
+class Node;
+class Operator;
+
+// Provides operations to lower all simplified memory access and allocation
+// related nodes (i.e. Allocate, LoadField, StoreField and friends) to machine
+// operators.
+class MemoryLowering final : public Reducer {
+ public:
+ enum class AllocationFolding { kDoAllocationFolding, kDontAllocationFolding };
+ class AllocationGroup;
+
+ // An allocation state is propagated on the effect paths through the graph.
+ class AllocationState final : public ZoneObject {
+ public:
+ static AllocationState const* Empty(Zone* zone) {
+ return new (zone) AllocationState();
+ }
+ static AllocationState const* Closed(AllocationGroup* group, Node* effect,
+ Zone* zone) {
+ return new (zone) AllocationState(group, effect);
+ }
+ static AllocationState const* Open(AllocationGroup* group, intptr_t size,
+ Node* top, Node* effect, Zone* zone) {
+ return new (zone) AllocationState(group, size, top, effect);
+ }
+
+ bool IsYoungGenerationAllocation() const;
+
+ AllocationGroup* group() const { return group_; }
+ Node* top() const { return top_; }
+ Node* effect() const { return effect_; }
+ intptr_t size() const { return size_; }
+
+ private:
+ AllocationState();
+ explicit AllocationState(AllocationGroup* group, Node* effect);
+ AllocationState(AllocationGroup* group, intptr_t size, Node* top,
+ Node* effect);
+
+ AllocationGroup* const group_;
+ // The upper bound of the combined allocated object size on the current path
+ // (max int if allocation folding is impossible on this path).
+ intptr_t const size_;
+ Node* const top_;
+ Node* const effect_;
+
+ DISALLOW_COPY_AND_ASSIGN(AllocationState);
+ };
+
+ using WriteBarrierAssertFailedCallback = std::function<void(
+ Node* node, Node* object, const char* name, Zone* temp_zone)>;
+
+ MemoryLowering(
+ JSGraph* jsgraph, Zone* zone, PoisoningMitigationLevel poisoning_level,
+ AllocationFolding allocation_folding =
+ AllocationFolding::kDontAllocationFolding,
+ WriteBarrierAssertFailedCallback callback = [](Node*, Node*, const char*,
+ Zone*) { UNREACHABLE(); },
+ const char* function_debug_name = nullptr);
+ ~MemoryLowering() = default;
+
+ const char* reducer_name() const override { return "MemoryReducer"; }
+
+ // Perform memory lowering reduction on the given Node.
+ Reduction Reduce(Node* node) override;
+
+ // Specific reducers for each optype to enable keeping track of
+ // AllocationState by the MemoryOptimizer.
+ Reduction ReduceAllocateRaw(Node* node, AllocationType allocation_type,
+ AllowLargeObjects allow_large_objects,
+ AllocationState const** state);
+ Reduction ReduceLoadFromObject(Node* node);
+ Reduction ReduceLoadElement(Node* node);
+ Reduction ReduceLoadField(Node* node);
+ Reduction ReduceStoreToObject(Node* node,
+ AllocationState const* state = nullptr);
+ Reduction ReduceStoreElement(Node* node,
+ AllocationState const* state = nullptr);
+ Reduction ReduceStoreField(Node* node,
+ AllocationState const* state = nullptr);
+ Reduction ReduceStore(Node* node, AllocationState const* state = nullptr);
+
+ private:
+ Reduction ReduceAllocateRaw(Node* node);
+ WriteBarrierKind ComputeWriteBarrierKind(Node* node, Node* object,
+ Node* value,
+ AllocationState const* state,
+ WriteBarrierKind);
+ Node* ComputeIndex(ElementAccess const& access, Node* node);
+ bool NeedsPoisoning(LoadSensitivity load_sensitivity) const;
+
+ Graph* graph() const;
+ Isolate* isolate() const;
+ Zone* zone() const { return zone_; }
+ JSGraph* jsgraph() const { return jsgraph_; }
+ CommonOperatorBuilder* common() const;
+ MachineOperatorBuilder* machine() const;
+ GraphAssembler* gasm() { return &graph_assembler_; }
+
+ SetOncePointer<const Operator> allocate_operator_;
+ JSGraph* const jsgraph_;
+ Zone* zone_;
+ GraphAssembler graph_assembler_;
+ AllocationFolding allocation_folding_;
+ PoisoningMitigationLevel poisoning_level_;
+ WriteBarrierAssertFailedCallback write_barrier_assert_failed_;
+ const char* function_debug_name_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryLowering);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_MEMORY_LOWERING_H_
diff --git a/deps/v8/src/compiler/memory-optimizer.cc b/deps/v8/src/compiler/memory-optimizer.cc
index 8684f2ce3c..6527dfb287 100644
--- a/deps/v8/src/compiler/memory-optimizer.cc
+++ b/deps/v8/src/compiler/memory-optimizer.cc
@@ -11,90 +11,12 @@
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
-#include "src/compiler/simplified-operator.h"
#include "src/roots/roots-inl.h"
namespace v8 {
namespace internal {
namespace compiler {
-MemoryOptimizer::MemoryOptimizer(JSGraph* jsgraph, Zone* zone,
- PoisoningMitigationLevel poisoning_level,
- AllocationFolding allocation_folding,
- const char* function_debug_name,
- TickCounter* tick_counter)
- : jsgraph_(jsgraph),
- empty_state_(AllocationState::Empty(zone)),
- pending_(zone),
- tokens_(zone),
- zone_(zone),
- graph_assembler_(jsgraph, nullptr, nullptr, zone),
- poisoning_level_(poisoning_level),
- allocation_folding_(allocation_folding),
- function_debug_name_(function_debug_name),
- tick_counter_(tick_counter) {}
-
-void MemoryOptimizer::Optimize() {
- EnqueueUses(graph()->start(), empty_state());
- while (!tokens_.empty()) {
- Token const token = tokens_.front();
- tokens_.pop();
- VisitNode(token.node, token.state);
- }
- DCHECK(pending_.empty());
- DCHECK(tokens_.empty());
-}
-
-MemoryOptimizer::AllocationGroup::AllocationGroup(Node* node,
- AllocationType allocation,
- Zone* zone)
- : node_ids_(zone), allocation_(allocation), size_(nullptr) {
- node_ids_.insert(node->id());
-}
-
-MemoryOptimizer::AllocationGroup::AllocationGroup(Node* node,
- AllocationType allocation,
- Node* size, Zone* zone)
- : node_ids_(zone), allocation_(allocation), size_(size) {
- node_ids_.insert(node->id());
-}
-
-void MemoryOptimizer::AllocationGroup::Add(Node* node) {
- node_ids_.insert(node->id());
-}
-
-bool MemoryOptimizer::AllocationGroup::Contains(Node* node) const {
- // Additions should stay within the same allocated object, so it's safe to
- // ignore them.
- while (node_ids_.find(node->id()) == node_ids_.end()) {
- switch (node->opcode()) {
- case IrOpcode::kBitcastTaggedToWord:
- case IrOpcode::kBitcastWordToTagged:
- case IrOpcode::kInt32Add:
- case IrOpcode::kInt64Add:
- node = NodeProperties::GetValueInput(node, 0);
- break;
- default:
- return false;
- }
- }
- return true;
-}
-
-MemoryOptimizer::AllocationState::AllocationState()
- : group_(nullptr), size_(std::numeric_limits<int>::max()), top_(nullptr) {}
-
-MemoryOptimizer::AllocationState::AllocationState(AllocationGroup* group)
- : group_(group), size_(std::numeric_limits<int>::max()), top_(nullptr) {}
-
-MemoryOptimizer::AllocationState::AllocationState(AllocationGroup* group,
- intptr_t size, Node* top)
- : group_(group), size_(size), top_(top) {}
-
-bool MemoryOptimizer::AllocationState::IsYoungGenerationAllocation() const {
- return group() && group()->IsYoungGenerationAllocation();
-}
-
namespace {
bool CanAllocate(const Node* node) {
@@ -221,8 +143,67 @@ Node* EffectPhiForPhi(Node* phi) {
return nullptr;
}
+void WriteBarrierAssertFailed(Node* node, Node* object, const char* name,
+ Zone* temp_zone) {
+ std::stringstream str;
+ str << "MemoryOptimizer could not remove write barrier for node #"
+ << node->id() << "\n";
+ str << " Run mksnapshot with --csa-trap-on-node=" << name << ","
+ << node->id() << " to break in CSA code.\n";
+ Node* object_position = object;
+ if (object_position->opcode() == IrOpcode::kPhi) {
+ object_position = EffectPhiForPhi(object_position);
+ }
+ Node* allocating_node = nullptr;
+ if (object_position && object_position->op()->EffectOutputCount() > 0) {
+ allocating_node = SearchAllocatingNode(node, object_position, temp_zone);
+ }
+ if (allocating_node) {
+ str << "\n There is a potentially allocating node in between:\n";
+ str << " " << *allocating_node << "\n";
+ str << " Run mksnapshot with --csa-trap-on-node=" << name << ","
+ << allocating_node->id() << " to break there.\n";
+ if (allocating_node->opcode() == IrOpcode::kCall) {
+ str << " If this is a never-allocating runtime call, you can add an "
+ "exception to Runtime::MayAllocate.\n";
+ }
+ } else {
+ str << "\n It seems the store happened to something different than a "
+ "direct "
+ "allocation:\n";
+ str << " " << *object << "\n";
+ str << " Run mksnapshot with --csa-trap-on-node=" << name << ","
+ << object->id() << " to break there.\n";
+ }
+ FATAL("%s", str.str().c_str());
+}
+
} // namespace
+MemoryOptimizer::MemoryOptimizer(
+ JSGraph* jsgraph, Zone* zone, PoisoningMitigationLevel poisoning_level,
+ MemoryLowering::AllocationFolding allocation_folding,
+ const char* function_debug_name, TickCounter* tick_counter)
+ : memory_lowering_(jsgraph, zone, poisoning_level, allocation_folding,
+ WriteBarrierAssertFailed, function_debug_name),
+ jsgraph_(jsgraph),
+ empty_state_(AllocationState::Empty(zone)),
+ pending_(zone),
+ tokens_(zone),
+ zone_(zone),
+ tick_counter_(tick_counter) {}
+
+void MemoryOptimizer::Optimize() {
+ EnqueueUses(graph()->start(), empty_state());
+ while (!tokens_.empty()) {
+ Token const token = tokens_.front();
+ tokens_.pop();
+ VisitNode(token.node, token.state);
+ }
+ DCHECK(pending_.empty());
+ DCHECK(tokens_.empty());
+}
+
void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
tick_counter_->DoTick();
DCHECK(!node->IsDead());
@@ -259,8 +240,6 @@ void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
DCHECK_EQ(0, node->op()->EffectOutputCount());
}
-#define __ gasm()->
-
bool MemoryOptimizer::AllocationTypeNeedsUpdateToOld(Node* const node,
const Edge edge) {
if (COMPRESS_POINTERS_BOOL && IrOpcode::IsCompressOpcode(node->opcode())) {
@@ -293,13 +272,6 @@ bool MemoryOptimizer::AllocationTypeNeedsUpdateToOld(Node* const node,
void MemoryOptimizer::VisitAllocateRaw(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kAllocateRaw, node->opcode());
- Node* value;
- Node* size = node->InputAt(0);
- Node* effect = node->InputAt(1);
- Node* control = node->InputAt(2);
-
- gasm()->Reset(effect, control);
-
const AllocateParameters& allocation = AllocateParametersOf(node->op());
AllocationType allocation_type = allocation.allocation_type();
@@ -310,7 +282,6 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
if (allocation_type == AllocationType::kOld) {
for (Edge const edge : node->use_edges()) {
Node* const user = edge.from();
-
if (user->opcode() == IrOpcode::kStoreField && edge.index() == 0) {
Node* child = user->InputAt(1);
// In Pointer Compression we might have a Compress node between an
@@ -339,299 +310,62 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
}
}
- Node* allocate_builtin;
- if (allocation_type == AllocationType::kYoung) {
- if (allocation.allow_large_objects() == AllowLargeObjects::kTrue) {
- allocate_builtin = __ AllocateInYoungGenerationStubConstant();
- } else {
- allocate_builtin = __ AllocateRegularInYoungGenerationStubConstant();
- }
- } else {
- if (allocation.allow_large_objects() == AllowLargeObjects::kTrue) {
- allocate_builtin = __ AllocateInOldGenerationStubConstant();
- } else {
- allocate_builtin = __ AllocateRegularInOldGenerationStubConstant();
- }
- }
-
- // Determine the top/limit addresses.
- Node* top_address = __ ExternalConstant(
- allocation_type == AllocationType::kYoung
- ? ExternalReference::new_space_allocation_top_address(isolate())
- : ExternalReference::old_space_allocation_top_address(isolate()));
- Node* limit_address = __ ExternalConstant(
- allocation_type == AllocationType::kYoung
- ? ExternalReference::new_space_allocation_limit_address(isolate())
- : ExternalReference::old_space_allocation_limit_address(isolate()));
-
- // Check if we can fold this allocation into a previous allocation represented
- // by the incoming {state}.
- IntPtrMatcher m(size);
- if (m.IsInRange(0, kMaxRegularHeapObjectSize) && FLAG_inline_new) {
- intptr_t const object_size = m.Value();
- if (allocation_folding_ == AllocationFolding::kDoAllocationFolding &&
- state->size() <= kMaxRegularHeapObjectSize - object_size &&
- state->group()->allocation() == allocation_type) {
- // We can fold this Allocate {node} into the allocation {group}
- // represented by the given {state}. Compute the upper bound for
- // the new {state}.
- intptr_t const state_size = state->size() + object_size;
-
- // Update the reservation check to the actual maximum upper bound.
- AllocationGroup* const group = state->group();
- if (machine()->Is64()) {
- if (OpParameter<int64_t>(group->size()->op()) < state_size) {
- NodeProperties::ChangeOp(group->size(),
- common()->Int64Constant(state_size));
- }
- } else {
- if (OpParameter<int32_t>(group->size()->op()) < state_size) {
- NodeProperties::ChangeOp(
- group->size(),
- common()->Int32Constant(static_cast<int32_t>(state_size)));
- }
- }
-
- // Update the allocation top with the new object allocation.
- // TODO(bmeurer): Defer writing back top as much as possible.
- Node* top = __ IntAdd(state->top(), size);
- __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
- kNoWriteBarrier),
- top_address, __ IntPtrConstant(0), top);
-
- // Compute the effective inner allocated address.
- value = __ BitcastWordToTagged(
- __ IntAdd(state->top(), __ IntPtrConstant(kHeapObjectTag)));
-
- // Extend the allocation {group}.
- group->Add(value);
- state = AllocationState::Open(group, state_size, top, zone());
- } else {
- auto call_runtime = __ MakeDeferredLabel();
- auto done = __ MakeLabel(MachineType::PointerRepresentation());
-
- // Setup a mutable reservation size node; will be patched as we fold
- // additional allocations into this new group.
- Node* size = __ UniqueIntPtrConstant(object_size);
-
- // Load allocation top and limit.
- Node* top =
- __ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0));
- Node* limit =
- __ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0));
-
- // Check if we need to collect garbage before we can start bump pointer
- // allocation (always done for folded allocations).
- Node* check = __ UintLessThan(__ IntAdd(top, size), limit);
-
- __ GotoIfNot(check, &call_runtime);
- __ Goto(&done, top);
-
- __ Bind(&call_runtime);
- {
- if (!allocate_operator_.is_set()) {
- auto descriptor = AllocateDescriptor{};
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kCanUseRoots, Operator::kNoThrow);
- allocate_operator_.set(common()->Call(call_descriptor));
- }
- Node* vfalse = __ BitcastTaggedToWord(
- __ Call(allocate_operator_.get(), allocate_builtin, size));
- vfalse = __ IntSub(vfalse, __ IntPtrConstant(kHeapObjectTag));
- __ Goto(&done, vfalse);
- }
-
- __ Bind(&done);
-
- // Compute the new top and write it back.
- top = __ IntAdd(done.PhiAt(0), __ IntPtrConstant(object_size));
- __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
- kNoWriteBarrier),
- top_address, __ IntPtrConstant(0), top);
-
- // Compute the initial object address.
- value = __ BitcastWordToTagged(
- __ IntAdd(done.PhiAt(0), __ IntPtrConstant(kHeapObjectTag)));
-
- // Start a new allocation group.
- AllocationGroup* group =
- new (zone()) AllocationGroup(value, allocation_type, size, zone());
- state = AllocationState::Open(group, object_size, top, zone());
- }
- } else {
- auto call_runtime = __ MakeDeferredLabel();
- auto done = __ MakeLabel(MachineRepresentation::kTaggedPointer);
-
- // Load allocation top and limit.
- Node* top =
- __ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0));
- Node* limit =
- __ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0));
-
- // Compute the new top.
- Node* new_top = __ IntAdd(top, size);
-
- // Check if we can do bump pointer allocation here.
- Node* check = __ UintLessThan(new_top, limit);
- __ GotoIfNot(check, &call_runtime);
- if (allocation.allow_large_objects() == AllowLargeObjects::kTrue) {
- __ GotoIfNot(
- __ UintLessThan(size, __ IntPtrConstant(kMaxRegularHeapObjectSize)),
- &call_runtime);
- }
- __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
- kNoWriteBarrier),
- top_address, __ IntPtrConstant(0), new_top);
- __ Goto(&done, __ BitcastWordToTagged(
- __ IntAdd(top, __ IntPtrConstant(kHeapObjectTag))));
-
- __ Bind(&call_runtime);
- if (!allocate_operator_.is_set()) {
- auto descriptor = AllocateDescriptor{};
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kCanUseRoots, Operator::kNoThrow);
- allocate_operator_.set(common()->Call(call_descriptor));
- }
- __ Goto(&done, __ Call(allocate_operator_.get(), allocate_builtin, size));
-
- __ Bind(&done);
- value = done.PhiAt(0);
-
- // Create an unfoldable allocation group.
- AllocationGroup* group =
- new (zone()) AllocationGroup(value, allocation_type, zone());
- state = AllocationState::Closed(group, zone());
- }
-
- effect = __ ExtractCurrentEffect();
- control = __ ExtractCurrentControl();
-
- // Replace all effect uses of {node} with the {effect}, enqueue the
- // effect uses for further processing, and replace all value uses of
- // {node} with the {value}.
- for (Edge edge : node->use_edges()) {
- if (NodeProperties::IsEffectEdge(edge)) {
- EnqueueUse(edge.from(), edge.index(), state);
- edge.UpdateTo(effect);
- } else if (NodeProperties::IsValueEdge(edge)) {
- edge.UpdateTo(value);
- } else {
- DCHECK(NodeProperties::IsControlEdge(edge));
- edge.UpdateTo(control);
- }
- }
-
- // Kill the {node} to make sure we don't leave dangling dead uses.
- node->Kill();
+ memory_lowering()->ReduceAllocateRaw(
+ node, allocation_type, allocation.allow_large_objects(), &state);
+ EnqueueUses(state->effect(), state);
}
void MemoryOptimizer::VisitLoadFromObject(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kLoadFromObject, node->opcode());
- ObjectAccess const& access = ObjectAccessOf(node->op());
- NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
+ memory_lowering()->ReduceLoadFromObject(node);
EnqueueUses(node, state);
}
void MemoryOptimizer::VisitStoreToObject(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kStoreToObject, node->opcode());
- ObjectAccess const& access = ObjectAccessOf(node->op());
- Node* object = node->InputAt(0);
- Node* value = node->InputAt(2);
- WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
- node, object, value, state, access.write_barrier_kind);
- NodeProperties::ChangeOp(
- node, machine()->Store(StoreRepresentation(
- access.machine_type.representation(), write_barrier_kind)));
- EnqueueUses(node, state);
-}
-
-#undef __
-
-void MemoryOptimizer::VisitCall(Node* node, AllocationState const* state) {
- DCHECK_EQ(IrOpcode::kCall, node->opcode());
- // If the call can allocate, we start with a fresh state.
- if (!(CallDescriptorOf(node->op())->flags() & CallDescriptor::kNoAllocate)) {
- state = empty_state();
- }
+ memory_lowering()->ReduceStoreToObject(node, state);
EnqueueUses(node, state);
}
void MemoryOptimizer::VisitLoadElement(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kLoadElement, node->opcode());
- ElementAccess const& access = ElementAccessOf(node->op());
- Node* index = node->InputAt(1);
- node->ReplaceInput(1, ComputeIndex(access, index));
- MachineType type = access.machine_type;
- if (NeedsPoisoning(access.load_sensitivity)) {
- NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type));
- } else {
- NodeProperties::ChangeOp(node, machine()->Load(type));
- }
+ memory_lowering()->ReduceLoadElement(node);
EnqueueUses(node, state);
}
void MemoryOptimizer::VisitLoadField(Node* node, AllocationState const* state) {
DCHECK_EQ(IrOpcode::kLoadField, node->opcode());
- FieldAccess const& access = FieldAccessOf(node->op());
- Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
- node->InsertInput(graph()->zone(), 1, offset);
- MachineType type = access.machine_type;
- if (NeedsPoisoning(access.load_sensitivity)) {
- NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type));
- } else {
- NodeProperties::ChangeOp(node, machine()->Load(type));
- }
+ memory_lowering()->ReduceLoadField(node);
EnqueueUses(node, state);
}
void MemoryOptimizer::VisitStoreElement(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kStoreElement, node->opcode());
- ElementAccess const& access = ElementAccessOf(node->op());
- Node* object = node->InputAt(0);
- Node* index = node->InputAt(1);
- Node* value = node->InputAt(2);
- WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
- node, object, value, state, access.write_barrier_kind);
- node->ReplaceInput(1, ComputeIndex(access, index));
- NodeProperties::ChangeOp(
- node, machine()->Store(StoreRepresentation(
- access.machine_type.representation(), write_barrier_kind)));
+ memory_lowering()->ReduceStoreElement(node, state);
EnqueueUses(node, state);
}
void MemoryOptimizer::VisitStoreField(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kStoreField, node->opcode());
- FieldAccess const& access = FieldAccessOf(node->op());
- Node* object = node->InputAt(0);
- Node* value = node->InputAt(1);
- WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
- node, object, value, state, access.write_barrier_kind);
- Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
- node->InsertInput(graph()->zone(), 1, offset);
- NodeProperties::ChangeOp(
- node, machine()->Store(StoreRepresentation(
- access.machine_type.representation(), write_barrier_kind)));
+ memory_lowering()->ReduceStoreField(node, state);
EnqueueUses(node, state);
}
-
void MemoryOptimizer::VisitStore(Node* node, AllocationState const* state) {
DCHECK_EQ(IrOpcode::kStore, node->opcode());
- StoreRepresentation representation = StoreRepresentationOf(node->op());
- Node* object = node->InputAt(0);
- Node* value = node->InputAt(2);
- WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
- node, object, value, state, representation.write_barrier_kind());
- if (write_barrier_kind != representation.write_barrier_kind()) {
- NodeProperties::ChangeOp(
- node, machine()->Store(StoreRepresentation(
- representation.representation(), write_barrier_kind)));
+ memory_lowering()->ReduceStore(node, state);
+ EnqueueUses(node, state);
+}
+
+void MemoryOptimizer::VisitCall(Node* node, AllocationState const* state) {
+ DCHECK_EQ(IrOpcode::kCall, node->opcode());
+ // If the call can allocate, we start with a fresh state.
+ if (!(CallDescriptorOf(node->op())->flags() & CallDescriptor::kNoAllocate)) {
+ state = empty_state();
}
EnqueueUses(node, state);
}
@@ -641,109 +375,12 @@ void MemoryOptimizer::VisitOtherEffect(Node* node,
EnqueueUses(node, state);
}
-Node* MemoryOptimizer::ComputeIndex(ElementAccess const& access, Node* index) {
- int const element_size_shift =
- ElementSizeLog2Of(access.machine_type.representation());
- if (element_size_shift) {
- index = graph()->NewNode(machine()->WordShl(), index,
- jsgraph()->IntPtrConstant(element_size_shift));
- }
- int const fixed_offset = access.header_size - access.tag();
- if (fixed_offset) {
- index = graph()->NewNode(machine()->IntAdd(), index,
- jsgraph()->IntPtrConstant(fixed_offset));
- }
- return index;
-}
-
-namespace {
-
-bool ValueNeedsWriteBarrier(Node* value, Isolate* isolate) {
- while (true) {
- switch (value->opcode()) {
- case IrOpcode::kBitcastWordToTaggedSigned:
- case IrOpcode::kChangeTaggedSignedToCompressedSigned:
- case IrOpcode::kChangeTaggedToCompressedSigned:
- return false;
- case IrOpcode::kChangeTaggedPointerToCompressedPointer:
- case IrOpcode::kChangeTaggedToCompressed:
- value = NodeProperties::GetValueInput(value, 0);
- continue;
- case IrOpcode::kHeapConstant: {
- RootIndex root_index;
- if (isolate->roots_table().IsRootHandle(HeapConstantOf(value->op()),
- &root_index) &&
- RootsTable::IsImmortalImmovable(root_index)) {
- return false;
- }
- break;
- }
- default:
- break;
- }
- return true;
- }
-}
-
-void WriteBarrierAssertFailed(Node* node, Node* object, const char* name,
- Zone* temp_zone) {
- std::stringstream str;
- str << "MemoryOptimizer could not remove write barrier for node #"
- << node->id() << "\n";
- str << " Run mksnapshot with --csa-trap-on-node=" << name << ","
- << node->id() << " to break in CSA code.\n";
- Node* object_position = object;
- if (object_position->opcode() == IrOpcode::kPhi) {
- object_position = EffectPhiForPhi(object_position);
- }
- Node* allocating_node = nullptr;
- if (object_position && object_position->op()->EffectOutputCount() > 0) {
- allocating_node = SearchAllocatingNode(node, object_position, temp_zone);
- }
- if (allocating_node) {
- str << "\n There is a potentially allocating node in between:\n";
- str << " " << *allocating_node << "\n";
- str << " Run mksnapshot with --csa-trap-on-node=" << name << ","
- << allocating_node->id() << " to break there.\n";
- if (allocating_node->opcode() == IrOpcode::kCall) {
- str << " If this is a never-allocating runtime call, you can add an "
- "exception to Runtime::MayAllocate.\n";
- }
- } else {
- str << "\n It seems the store happened to something different than a "
- "direct "
- "allocation:\n";
- str << " " << *object << "\n";
- str << " Run mksnapshot with --csa-trap-on-node=" << name << ","
- << object->id() << " to break there.\n";
- }
- FATAL("%s", str.str().c_str());
-}
-
-} // namespace
-
-WriteBarrierKind MemoryOptimizer::ComputeWriteBarrierKind(
- Node* node, Node* object, Node* value, AllocationState const* state,
- WriteBarrierKind write_barrier_kind) {
- if (state->IsYoungGenerationAllocation() &&
- state->group()->Contains(object)) {
- write_barrier_kind = kNoWriteBarrier;
- }
- if (!ValueNeedsWriteBarrier(value, isolate())) {
- write_barrier_kind = kNoWriteBarrier;
- }
- if (write_barrier_kind == WriteBarrierKind::kAssertNoWriteBarrier) {
- WriteBarrierAssertFailed(node, object, function_debug_name_, zone());
- }
- return write_barrier_kind;
-}
-
MemoryOptimizer::AllocationState const* MemoryOptimizer::MergeStates(
AllocationStates const& states) {
// Check if all states are the same; or at least if all allocation
// states belong to the same allocation group.
AllocationState const* state = states.front();
- AllocationGroup* group = state->group();
+ MemoryLowering::AllocationGroup* group = state->group();
for (size_t i = 1; i < states.size(); ++i) {
if (states[i] != state) state = nullptr;
if (states[i]->group() != group) group = nullptr;
@@ -755,7 +392,7 @@ MemoryOptimizer::AllocationState const* MemoryOptimizer::MergeStates(
// TODO(bmeurer): We could potentially just create a Phi here to merge
// the various tops; but we need to pay special attention not to create
// an unschedulable graph.
- state = AllocationState::Closed(group, zone());
+ state = AllocationState::Closed(group, nullptr, zone());
} else {
// The states are from different allocation groups.
state = empty_state();
@@ -830,31 +467,6 @@ void MemoryOptimizer::EnqueueUse(Node* node, int index,
Graph* MemoryOptimizer::graph() const { return jsgraph()->graph(); }
-Isolate* MemoryOptimizer::isolate() const { return jsgraph()->isolate(); }
-
-CommonOperatorBuilder* MemoryOptimizer::common() const {
- return jsgraph()->common();
-}
-
-MachineOperatorBuilder* MemoryOptimizer::machine() const {
- return jsgraph()->machine();
-}
-
-bool MemoryOptimizer::NeedsPoisoning(LoadSensitivity load_sensitivity) const {
- // Safe loads do not need poisoning.
- if (load_sensitivity == LoadSensitivity::kSafe) return false;
-
- switch (poisoning_level_) {
- case PoisoningMitigationLevel::kDontPoison:
- return false;
- case PoisoningMitigationLevel::kPoisonAll:
- return true;
- case PoisoningMitigationLevel::kPoisonCriticalOnly:
- return load_sensitivity == LoadSensitivity::kCritical;
- }
- UNREACHABLE();
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/memory-optimizer.h b/deps/v8/src/compiler/memory-optimizer.h
index a663bf07ed..0e0fc5684c 100644
--- a/deps/v8/src/compiler/memory-optimizer.h
+++ b/deps/v8/src/compiler/memory-optimizer.h
@@ -5,7 +5,7 @@
#ifndef V8_COMPILER_MEMORY_OPTIMIZER_H_
#define V8_COMPILER_MEMORY_OPTIMIZER_H_
-#include "src/compiler/graph-assembler.h"
+#include "src/compiler/memory-lowering.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@@ -15,95 +15,29 @@ class TickCounter;
namespace compiler {
-// Forward declarations.
-class CommonOperatorBuilder;
-struct ElementAccess;
-class Graph;
class JSGraph;
-class MachineOperatorBuilder;
-class Node;
-class Operator;
+class Graph;
// NodeIds are identifying numbers for nodes that can be used to index auxiliary
// out-of-line data associated with each node.
using NodeId = uint32_t;
-// Lowers all simplified memory access and allocation related nodes (i.e.
-// Allocate, LoadField, StoreField and friends) to machine operators.
// Performs allocation folding and store write barrier elimination
-// implicitly.
+// implicitly, while lowering all simplified memory access and allocation
+// related nodes (i.e. Allocate, LoadField, StoreField and friends) to machine
+// operators.
class MemoryOptimizer final {
public:
- enum class AllocationFolding { kDoAllocationFolding, kDontAllocationFolding };
-
MemoryOptimizer(JSGraph* jsgraph, Zone* zone,
PoisoningMitigationLevel poisoning_level,
- AllocationFolding allocation_folding,
+ MemoryLowering::AllocationFolding allocation_folding,
const char* function_debug_name, TickCounter* tick_counter);
~MemoryOptimizer() = default;
void Optimize();
private:
- // An allocation group represents a set of allocations that have been folded
- // together.
- class AllocationGroup final : public ZoneObject {
- public:
- AllocationGroup(Node* node, AllocationType allocation, Zone* zone);
- AllocationGroup(Node* node, AllocationType allocation, Node* size,
- Zone* zone);
- ~AllocationGroup() = default;
-
- void Add(Node* object);
- bool Contains(Node* object) const;
- bool IsYoungGenerationAllocation() const {
- return allocation() == AllocationType::kYoung;
- }
-
- AllocationType allocation() const { return allocation_; }
- Node* size() const { return size_; }
-
- private:
- ZoneSet<NodeId> node_ids_;
- AllocationType const allocation_;
- Node* const size_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationGroup);
- };
-
- // An allocation state is propagated on the effect paths through the graph.
- class AllocationState final : public ZoneObject {
- public:
- static AllocationState const* Empty(Zone* zone) {
- return new (zone) AllocationState();
- }
- static AllocationState const* Closed(AllocationGroup* group, Zone* zone) {
- return new (zone) AllocationState(group);
- }
- static AllocationState const* Open(AllocationGroup* group, intptr_t size,
- Node* top, Zone* zone) {
- return new (zone) AllocationState(group, size, top);
- }
-
- bool IsYoungGenerationAllocation() const;
-
- AllocationGroup* group() const { return group_; }
- Node* top() const { return top_; }
- intptr_t size() const { return size_; }
-
- private:
- AllocationState();
- explicit AllocationState(AllocationGroup* group);
- AllocationState(AllocationGroup* group, intptr_t size, Node* top);
-
- AllocationGroup* const group_;
- // The upper bound of the combined allocated object size on the current path
- // (max int if allocation folding is impossible on this path).
- intptr_t const size_;
- Node* const top_;
-
- DISALLOW_COPY_AND_ASSIGN(AllocationState);
- };
+ using AllocationState = MemoryLowering::AllocationState;
// An array of allocation states used to collect states on merges.
using AllocationStates = ZoneVector<AllocationState const*>;
@@ -127,44 +61,29 @@ class MemoryOptimizer final {
void VisitStore(Node*, AllocationState const*);
void VisitOtherEffect(Node*, AllocationState const*);
- Node* ComputeIndex(ElementAccess const&, Node*);
- WriteBarrierKind ComputeWriteBarrierKind(Node* node, Node* object,
- Node* value,
- AllocationState const* state,
- WriteBarrierKind);
-
AllocationState const* MergeStates(AllocationStates const& states);
void EnqueueMerge(Node*, int, AllocationState const*);
void EnqueueUses(Node*, AllocationState const*);
void EnqueueUse(Node*, int, AllocationState const*);
- bool NeedsPoisoning(LoadSensitivity load_sensitivity) const;
-
// Returns true if the AllocationType of the current AllocateRaw node that we
// are visiting needs to be updated to kOld, due to propagation of tenuring
// from outer to inner allocations.
bool AllocationTypeNeedsUpdateToOld(Node* const user, const Edge edge);
AllocationState const* empty_state() const { return empty_state_; }
+ MemoryLowering* memory_lowering() { return &memory_lowering_; }
Graph* graph() const;
- Isolate* isolate() const;
JSGraph* jsgraph() const { return jsgraph_; }
- CommonOperatorBuilder* common() const;
- MachineOperatorBuilder* machine() const;
Zone* zone() const { return zone_; }
- GraphAssembler* gasm() { return &graph_assembler_; }
- SetOncePointer<const Operator> allocate_operator_;
- JSGraph* const jsgraph_;
+ MemoryLowering memory_lowering_;
+ JSGraph* jsgraph_;
AllocationState const* const empty_state_;
ZoneMap<NodeId, AllocationStates> pending_;
ZoneQueue<Token> tokens_;
Zone* const zone_;
- GraphAssembler graph_assembler_;
- PoisoningMitigationLevel poisoning_level_;
- AllocationFolding allocation_folding_;
- const char* function_debug_name_;
TickCounter* const tick_counter_;
DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryOptimizer);
diff --git a/deps/v8/src/compiler/node-matchers.h b/deps/v8/src/compiler/node-matchers.h
index 20698f4cd6..82bc179519 100644
--- a/deps/v8/src/compiler/node-matchers.h
+++ b/deps/v8/src/compiler/node-matchers.h
@@ -187,10 +187,11 @@ using Float64Matcher = FloatMatcher<double, IrOpcode::kFloat64Constant>;
using NumberMatcher = FloatMatcher<double, IrOpcode::kNumberConstant>;
// A pattern matcher for heap object constants.
-struct HeapObjectMatcher final
- : public ValueMatcher<Handle<HeapObject>, IrOpcode::kHeapConstant> {
- explicit HeapObjectMatcher(Node* node)
- : ValueMatcher<Handle<HeapObject>, IrOpcode::kHeapConstant>(node) {}
+template <IrOpcode::Value kHeapConstantOpcode>
+struct HeapObjectMatcherImpl final
+ : public ValueMatcher<Handle<HeapObject>, kHeapConstantOpcode> {
+ explicit HeapObjectMatcherImpl(Node* node)
+ : ValueMatcher<Handle<HeapObject>, kHeapConstantOpcode>(node) {}
bool Is(Handle<HeapObject> const& value) const {
return this->HasValue() && this->Value().address() == value.address();
@@ -201,6 +202,9 @@ struct HeapObjectMatcher final
}
};
+using HeapObjectMatcher = HeapObjectMatcherImpl<IrOpcode::kHeapConstant>;
+using CompressedHeapObjectMatcher =
+ HeapObjectMatcherImpl<IrOpcode::kCompressedHeapConstant>;
// A pattern matcher for external reference constants.
struct ExternalReferenceMatcher final
@@ -295,6 +299,8 @@ using Float64BinopMatcher = BinopMatcher<Float64Matcher, Float64Matcher>;
using NumberBinopMatcher = BinopMatcher<NumberMatcher, NumberMatcher>;
using HeapObjectBinopMatcher =
BinopMatcher<HeapObjectMatcher, HeapObjectMatcher>;
+using CompressedHeapObjectBinopMatcher =
+ BinopMatcher<CompressedHeapObjectMatcher, CompressedHeapObjectMatcher>;
template <class BinopMatcher, IrOpcode::Value kMulOpcode,
IrOpcode::Value kShiftOpcode>
diff --git a/deps/v8/src/compiler/node.h b/deps/v8/src/compiler/node.h
index 76ea4bb1a9..b4ff5f7185 100644
--- a/deps/v8/src/compiler/node.h
+++ b/deps/v8/src/compiler/node.h
@@ -149,7 +149,7 @@ class V8_EXPORT_PRIVATE Node final {
Uses uses() { return Uses(this); }
- // Returns true if {owner} is the user of {this} node.
+ // Returns true if {owner} is the only user of {this} node.
bool OwnedBy(Node* owner) const {
return first_use_ && first_use_->from() == owner && !first_use_->next;
}
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index fe45d9276a..76c6bfec2f 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -156,7 +156,8 @@
V(JSCreateObject) \
V(JSCreatePromise) \
V(JSCreateStringIterator) \
- V(JSCreateTypedArray)
+ V(JSCreateTypedArray) \
+ V(JSGetTemplateObject)
#define JS_OBJECT_OP_LIST(V) \
JS_CREATE_OP_LIST(V) \
@@ -425,11 +426,14 @@
V(LoadFieldByIndex) \
V(LoadField) \
V(LoadElement) \
+ V(LoadMessage) \
V(LoadTypedElement) \
V(LoadFromObject) \
V(LoadDataViewElement) \
+ V(LoadStackArgument) \
V(StoreField) \
V(StoreElement) \
+ V(StoreMessage) \
V(StoreTypedElement) \
V(StoreToObject) \
V(StoreDataViewElement) \
@@ -669,9 +673,10 @@
V(Word64Ctz) \
V(Word64ReverseBits) \
V(Word64ReverseBytes) \
+ V(Simd128ReverseBytes) \
V(Int64AbsWithOverflow) \
V(BitcastTaggedToWord) \
- V(BitcastTaggedSignedToWord) \
+ V(BitcastTaggedToWordForTagAndSmiBits) \
V(BitcastWordToTagged) \
V(BitcastWordToTaggedSigned) \
V(BitcastWord32ToCompressedSigned) \
@@ -749,6 +754,7 @@
V(F64x2ReplaceLane) \
V(F64x2Abs) \
V(F64x2Neg) \
+ V(F64x2Sqrt) \
V(F64x2Add) \
V(F64x2Sub) \
V(F64x2Mul) \
@@ -759,6 +765,8 @@
V(F64x2Ne) \
V(F64x2Lt) \
V(F64x2Le) \
+ V(F64x2Qfma) \
+ V(F64x2Qfms) \
V(F32x4Splat) \
V(F32x4ExtractLane) \
V(F32x4ReplaceLane) \
@@ -766,6 +774,7 @@
V(F32x4UConvertI32x4) \
V(F32x4Abs) \
V(F32x4Neg) \
+ V(F32x4Sqrt) \
V(F32x4RecipApprox) \
V(F32x4RecipSqrtApprox) \
V(F32x4Add) \
@@ -781,6 +790,8 @@
V(F32x4Le) \
V(F32x4Gt) \
V(F32x4Ge) \
+ V(F32x4Qfma) \
+ V(F32x4Qfms) \
V(I64x2Splat) \
V(I64x2ExtractLane) \
V(I64x2ReplaceLane) \
@@ -905,6 +916,7 @@
V(S128Or) \
V(S128Xor) \
V(S128Select) \
+ V(S8x16Swizzle) \
V(S8x16Shuffle) \
V(S1x2AnyTrue) \
V(S1x2AllTrue) \
diff --git a/deps/v8/src/compiler/operator-properties.cc b/deps/v8/src/compiler/operator-properties.cc
index 1fcc12291d..731a6c8496 100644
--- a/deps/v8/src/compiler/operator-properties.cc
+++ b/deps/v8/src/compiler/operator-properties.cc
@@ -41,6 +41,7 @@ bool OperatorProperties::NeedsExactContext(const Operator* op) {
case IrOpcode::kJSCreateEmptyLiteralObject:
case IrOpcode::kJSCreateArrayFromIterable:
case IrOpcode::kJSCreateLiteralRegExp:
+ case IrOpcode::kJSGetTemplateObject:
case IrOpcode::kJSForInEnumerate:
case IrOpcode::kJSForInNext:
case IrOpcode::kJSForInPrepare:
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index 8b2f424789..b9648d9195 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -9,7 +9,6 @@
#include <memory>
#include <sstream>
-#include "src/base/adapters.h"
#include "src/base/optional.h"
#include "src/base/platform/elapsed-timer.h"
#include "src/codegen/assembler-inl.h"
@@ -97,6 +96,35 @@ namespace v8 {
namespace internal {
namespace compiler {
+static constexpr char kCodegenZoneName[] = "codegen-zone";
+static constexpr char kGraphZoneName[] = "graph-zone";
+static constexpr char kInstructionZoneName[] = "instruction-zone";
+static constexpr char kMachineGraphVerifierZoneName[] =
+ "machine-graph-verifier-zone";
+static constexpr char kPipelineCompilationJobZoneName[] =
+ "pipeline-compilation-job-zone";
+static constexpr char kRegisterAllocationZoneName[] =
+ "register-allocation-zone";
+static constexpr char kRegisterAllocatorVerifierZoneName[] =
+ "register-allocator-verifier-zone";
+namespace {
+
+Maybe<OuterContext> GetModuleContext(Handle<JSFunction> closure) {
+ Context current = closure->context();
+ size_t distance = 0;
+ while (!current.IsNativeContext()) {
+ if (current.IsModuleContext()) {
+ return Just(
+ OuterContext(handle(current, current.GetIsolate()), distance));
+ }
+ current = current.previous();
+ distance++;
+ }
+ return Nothing<OuterContext>();
+}
+
+} // anonymous namespace
+
class PipelineData {
public:
// For main entry point.
@@ -113,15 +141,16 @@ class PipelineData {
roots_relative_addressing_enabled_(
!isolate->serializer_enabled() &&
!isolate->IsGeneratingEmbeddedBuiltins()),
- graph_zone_scope_(zone_stats_, ZONE_NAME),
+ graph_zone_scope_(zone_stats_, kGraphZoneName),
graph_zone_(graph_zone_scope_.zone()),
- instruction_zone_scope_(zone_stats_, ZONE_NAME),
+ instruction_zone_scope_(zone_stats_, kInstructionZoneName),
instruction_zone_(instruction_zone_scope_.zone()),
- codegen_zone_scope_(zone_stats_, ZONE_NAME),
+ codegen_zone_scope_(zone_stats_, kCodegenZoneName),
codegen_zone_(codegen_zone_scope_.zone()),
broker_(new JSHeapBroker(isolate_, info_->zone(),
info_->trace_heap_broker_enabled())),
- register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
+ register_allocation_zone_scope_(zone_stats_,
+ kRegisterAllocationZoneName),
register_allocation_zone_(register_allocation_zone_scope_.zone()),
assembler_options_(AssemblerOptions::Default(isolate)) {
PhaseScope scope(pipeline_statistics, "V8.TFInitPipelineData");
@@ -158,7 +187,7 @@ class PipelineData {
may_have_unverifiable_graph_(false),
zone_stats_(zone_stats),
pipeline_statistics_(pipeline_statistics),
- graph_zone_scope_(zone_stats_, ZONE_NAME),
+ graph_zone_scope_(zone_stats_, kGraphZoneName),
graph_zone_(graph_zone_scope_.zone()),
graph_(mcgraph->graph()),
source_positions_(source_positions),
@@ -166,11 +195,12 @@ class PipelineData {
machine_(mcgraph->machine()),
common_(mcgraph->common()),
mcgraph_(mcgraph),
- instruction_zone_scope_(zone_stats_, ZONE_NAME),
+ instruction_zone_scope_(zone_stats_, kInstructionZoneName),
instruction_zone_(instruction_zone_scope_.zone()),
- codegen_zone_scope_(zone_stats_, ZONE_NAME),
+ codegen_zone_scope_(zone_stats_, kCodegenZoneName),
codegen_zone_(codegen_zone_scope_.zone()),
- register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
+ register_allocation_zone_scope_(zone_stats_,
+ kRegisterAllocationZoneName),
register_allocation_zone_(register_allocation_zone_scope_.zone()),
assembler_options_(assembler_options) {}
@@ -185,17 +215,18 @@ class PipelineData {
info_(info),
debug_name_(info_->GetDebugName()),
zone_stats_(zone_stats),
- graph_zone_scope_(zone_stats_, ZONE_NAME),
+ graph_zone_scope_(zone_stats_, kGraphZoneName),
graph_zone_(graph_zone_scope_.zone()),
graph_(graph),
source_positions_(source_positions),
node_origins_(node_origins),
schedule_(schedule),
- instruction_zone_scope_(zone_stats_, ZONE_NAME),
+ instruction_zone_scope_(zone_stats_, kInstructionZoneName),
instruction_zone_(instruction_zone_scope_.zone()),
- codegen_zone_scope_(zone_stats_, ZONE_NAME),
+ codegen_zone_scope_(zone_stats_, kCodegenZoneName),
codegen_zone_(codegen_zone_scope_.zone()),
- register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
+ register_allocation_zone_scope_(zone_stats_,
+ kRegisterAllocationZoneName),
register_allocation_zone_(register_allocation_zone_scope_.zone()),
jump_optimization_info_(jump_opt),
assembler_options_(assembler_options) {
@@ -218,13 +249,14 @@ class PipelineData {
info_(info),
debug_name_(info_->GetDebugName()),
zone_stats_(zone_stats),
- graph_zone_scope_(zone_stats_, ZONE_NAME),
- instruction_zone_scope_(zone_stats_, ZONE_NAME),
+ graph_zone_scope_(zone_stats_, kGraphZoneName),
+ instruction_zone_scope_(zone_stats_, kInstructionZoneName),
instruction_zone_(sequence->zone()),
sequence_(sequence),
- codegen_zone_scope_(zone_stats_, ZONE_NAME),
+ codegen_zone_scope_(zone_stats_, kCodegenZoneName),
codegen_zone_(codegen_zone_scope_.zone()),
- register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
+ register_allocation_zone_scope_(zone_stats_,
+ kRegisterAllocationZoneName),
register_allocation_zone_(register_allocation_zone_scope_.zone()),
assembler_options_(AssemblerOptions::Default(isolate)) {}
@@ -323,6 +355,20 @@ class PipelineData {
return assembler_options_;
}
+ void ChooseSpecializationContext() {
+ if (info()->is_function_context_specializing()) {
+ DCHECK(info()->has_context());
+ specialization_context_ =
+ Just(OuterContext(handle(info()->context(), isolate()), 0));
+ } else {
+ specialization_context_ = GetModuleContext(info()->closure());
+ }
+ }
+
+ Maybe<OuterContext> specialization_context() const {
+ return specialization_context_;
+ }
+
size_t* address_of_max_unoptimized_frame_height() {
return &max_unoptimized_frame_height_;
}
@@ -531,6 +577,7 @@ class PipelineData {
JumpOptimizationInfo* jump_optimization_info_ = nullptr;
AssemblerOptions assembler_options_;
+ Maybe<OuterContext> specialization_context_ = Nothing<OuterContext>();
// The maximal combined height of all inlined frames in their unoptimized
// state. Calculated during instruction selection, applied during code
@@ -548,12 +595,19 @@ class PipelineImpl final {
template <typename Phase, typename... Args>
void Run(Args&&... args);
- // Step A. Run the graph creation and initial optimization passes.
+ // Step A.1. Serialize the data needed for the compilation front-end.
+ void Serialize();
+
+ // Step A.2. Run the graph creation and initial optimization passes.
bool CreateGraph();
- // B. Run the concurrent optimization passes.
+ // Step B. Run the concurrent optimization passes.
bool OptimizeGraph(Linkage* linkage);
+ // Alternative step B. Run minimal concurrent optimization passes for
+ // mid-tier.
+ bool OptimizeGraphForMidTier(Linkage* linkage);
+
// Substep B.1. Produce a scheduled graph.
void ComputeScheduledGraph();
@@ -642,8 +696,6 @@ void PrintInlinedFunctionInfo(
// compilation. For inlined functions print source position of their inlining.
void PrintParticipatingSource(OptimizedCompilationInfo* info,
Isolate* isolate) {
- AllowDeferredHandleDereference allow_deference_for_print_code;
-
SourceIdAssigner id_assigner(info->inlined_functions().size());
PrintFunctionSource(info, isolate, -1, info->shared_info());
const auto& inlined = info->inlined_functions();
@@ -662,7 +714,6 @@ void PrintCode(Isolate* isolate, Handle<Code> code,
}
#ifdef ENABLE_DISASSEMBLER
- AllowDeferredHandleDereference allow_deference_for_print_code;
bool print_code =
FLAG_print_code ||
(info->IsOptimizing() && FLAG_print_opt_code &&
@@ -703,7 +754,7 @@ void PrintCode(Isolate* isolate, Handle<Code> code,
Handle<SharedFunctionInfo> shared = info->shared_info();
os << "source_position = " << shared->StartPosition() << "\n";
}
- code->Disassemble(debug_name.get(), os);
+ code->Disassemble(debug_name.get(), os, isolate);
os << "--- End code ---\n";
}
#endif // ENABLE_DISASSEMBLER
@@ -800,8 +851,10 @@ class PipelineRunScope {
public:
PipelineRunScope(PipelineData* data, const char* phase_name)
: phase_scope_(data->pipeline_statistics(), phase_name),
- zone_scope_(data->zone_stats(), ZONE_NAME),
- origin_scope_(data->node_origins(), phase_name) {}
+ zone_scope_(data->zone_stats(), phase_name),
+ origin_scope_(data->node_origins(), phase_name) {
+ DCHECK_NOT_NULL(phase_name);
+ }
Zone* zone() { return zone_scope_.zone(); }
@@ -886,7 +939,7 @@ class PipelineCompilationJob final : public OptimizedCompilationJob {
PipelineCompilationJob(Isolate* isolate,
Handle<SharedFunctionInfo> shared_info,
Handle<JSFunction> function);
- ~PipelineCompilationJob();
+ ~PipelineCompilationJob() final;
protected:
Status PrepareJobImpl(Isolate* isolate) final;
@@ -915,7 +968,8 @@ PipelineCompilationJob::PipelineCompilationJob(
// we pass it to the CompilationJob constructor, but it is not
// dereferenced there.
: OptimizedCompilationJob(&compilation_info_, "TurboFan"),
- zone_(function->GetIsolate()->allocator(), ZONE_NAME),
+ zone_(function->GetIsolate()->allocator(),
+ kPipelineCompilationJobZoneName),
zone_stats_(function->GetIsolate()->allocator()),
compilation_info_(&zone_, function->GetIsolate(), shared_info, function),
pipeline_statistics_(CreatePipelineStatistics(
@@ -976,9 +1030,16 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
compilation_info()->MarkAsAllocationFoldingEnabled();
}
+ // Determine whether to specialize the code for the function's context.
+ // We can't do this in the case of OSR, because we want to cache the
+ // generated code on the native context keyed on SharedFunctionInfo.
+ // TODO(mythria): Check if it is better to key the OSR cache on JSFunction and
+ // allow context specialization for OSR code.
if (compilation_info()->closure()->raw_feedback_cell().map() ==
- ReadOnlyRoots(isolate).one_closure_cell_map()) {
+ ReadOnlyRoots(isolate).one_closure_cell_map() &&
+ !compilation_info()->is_osr()) {
compilation_info()->MarkAsFunctionContextSpecializing();
+ data_.ChooseSpecializationContext();
}
if (compilation_info()->is_source_positions_enabled()) {
@@ -999,9 +1060,13 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
// assembly.
Deoptimizer::EnsureCodeForDeoptimizationEntries(isolate);
- if (!pipeline_.CreateGraph()) {
- CHECK(!isolate->has_pending_exception());
- return AbortOptimization(BailoutReason::kGraphBuildingFailed);
+ pipeline_.Serialize();
+
+ if (!FLAG_concurrent_inlining) {
+ if (!pipeline_.CreateGraph()) {
+ CHECK(!isolate->has_pending_exception());
+ return AbortOptimization(BailoutReason::kGraphBuildingFailed);
+ }
}
return SUCCEEDED;
@@ -1012,7 +1077,21 @@ PipelineCompilationJob::Status PipelineCompilationJob::ExecuteJobImpl() {
TRACE_DISABLED_BY_DEFAULT("v8.compile"), "v8.optimizingCompile.execute",
this, TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT, "function",
compilation_info()->shared_info()->TraceIDRef());
- if (!pipeline_.OptimizeGraph(linkage_)) return FAILED;
+
+ if (FLAG_concurrent_inlining) {
+ if (!pipeline_.CreateGraph()) {
+ return AbortOptimization(BailoutReason::kGraphBuildingFailed);
+ }
+ }
+
+ bool success;
+ if (FLAG_turboprop) {
+ success = pipeline_.OptimizeGraphForMidTier(linkage_);
+ } else {
+ success = pipeline_.OptimizeGraph(linkage_);
+ }
+ if (!success) return FAILED;
+
pipeline_.AssembleCode(linkage_);
return SUCCEEDED;
}
@@ -1091,8 +1170,6 @@ class WasmHeapStubCompilationJob final : public OptimizedCompilationJob {
pipeline_(&data_),
wasm_engine_(wasm_engine) {}
- ~WasmHeapStubCompilationJob() = default;
-
protected:
Status PrepareJobImpl(Isolate* isolate) final;
Status ExecuteJobImpl() final;
@@ -1119,7 +1196,7 @@ Pipeline::NewWasmHeapStubCompilationJob(
CallDescriptor* call_descriptor, std::unique_ptr<Zone> zone, Graph* graph,
Code::Kind kind, std::unique_ptr<char[]> debug_name,
const AssemblerOptions& options, SourcePositionTable* source_positions) {
- return base::make_unique<WasmHeapStubCompilationJob>(
+ return std::make_unique<WasmHeapStubCompilationJob>(
isolate, wasm_engine, call_descriptor, std::move(zone), graph, kind,
std::move(debug_name), options, source_positions);
}
@@ -1175,7 +1252,7 @@ CompilationJob::Status WasmHeapStubCompilationJob::FinalizeJobImpl(
if (FLAG_print_opt_code) {
CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
OFStream os(tracing_scope.file());
- code->Disassemble(compilation_info()->GetDebugName().get(), os);
+ code->Disassemble(compilation_info()->GetDebugName().get(), os, isolate);
}
#endif
return SUCCEEDED;
@@ -1212,38 +1289,10 @@ struct GraphBuilderPhase {
}
};
-namespace {
-
-Maybe<OuterContext> GetModuleContext(Handle<JSFunction> closure) {
- Context current = closure->context();
- size_t distance = 0;
- while (!current.IsNativeContext()) {
- if (current.IsModuleContext()) {
- return Just(
- OuterContext(handle(current, current.GetIsolate()), distance));
- }
- current = current.previous();
- distance++;
- }
- return Nothing<OuterContext>();
-}
-
-Maybe<OuterContext> ChooseSpecializationContext(
- Isolate* isolate, OptimizedCompilationInfo* info) {
- if (info->is_function_context_specializing()) {
- DCHECK(info->has_context());
- return Just(OuterContext(handle(info->context(), isolate), 0));
- }
- return GetModuleContext(info->closure());
-}
-
-} // anonymous namespace
-
struct InliningPhase {
static const char* phase_name() { return "V8.TFInlining"; }
void Run(PipelineData* data, Zone* temp_zone) {
- Isolate* isolate = data->isolate();
OptimizedCompilationInfo* info = data->info();
GraphReducer graph_reducer(temp_zone, data->graph(), &info->tick_counter(),
data->jsgraph()->Dead());
@@ -1260,7 +1309,7 @@ struct InliningPhase {
data->dependencies());
JSContextSpecialization context_specialization(
&graph_reducer, data->jsgraph(), data->broker(),
- ChooseSpecializationContext(isolate, data->info()),
+ data->specialization_context(),
data->info()->is_function_context_specializing()
? data->info()->closure()
: MaybeHandle<JSFunction>());
@@ -1389,9 +1438,13 @@ struct SerializationPhase {
flags |=
SerializerForBackgroundCompilationFlag::kAnalyzeEnvironmentLiveness;
}
- RunSerializerForBackgroundCompilation(data->broker(), data->dependencies(),
- temp_zone, data->info()->closure(),
- flags, data->info()->osr_offset());
+ RunSerializerForBackgroundCompilation(
+ data->zone_stats(), data->broker(), data->dependencies(),
+ data->info()->closure(), flags, data->info()->osr_offset());
+ if (data->specialization_context().IsJust()) {
+ ContextRef(data->broker(),
+ data->specialization_context().FromJust().context);
+ }
}
};
@@ -1682,8 +1735,8 @@ struct MemoryOptimizationPhase {
MemoryOptimizer optimizer(
data->jsgraph(), temp_zone, data->info()->GetPoisoningMitigationLevel(),
data->info()->is_allocation_folding_enabled()
- ? MemoryOptimizer::AllocationFolding::kDoAllocationFolding
- : MemoryOptimizer::AllocationFolding::kDontAllocationFolding,
+ ? MemoryLowering::AllocationFolding::kDoAllocationFolding
+ : MemoryLowering::AllocationFolding::kDontAllocationFolding,
data->debug_name(), &data->info()->tick_counter());
optimizer.Optimize();
}
@@ -1705,13 +1758,15 @@ struct LateOptimizationPhase {
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->broker(), data->common(),
data->machine(), temp_zone);
- SelectLowering select_lowering(data->jsgraph()->graph(),
- data->jsgraph()->common());
-#ifdef V8_COMPRESS_POINTERS
+ SelectLowering select_lowering(data->jsgraph(), temp_zone);
+ // TODO(v8:7703, solanes): go back to using #if guards once
+ // FLAG_turbo_decompression_elimination gets removed.
DecompressionElimination decompression_elimination(
&graph_reducer, data->graph(), data->machine(), data->common());
- AddReducer(data, &graph_reducer, &decompression_elimination);
-#endif
+ if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) {
+ AddReducer(data, &graph_reducer, &decompression_elimination);
+ }
+ USE(decompression_elimination);
AddReducer(data, &graph_reducer, &branch_condition_elimination);
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &machine_reducer);
@@ -1738,6 +1793,23 @@ struct MachineOperatorOptimizationPhase {
}
};
+struct MidTierMachineLoweringPhase {
+ static const char* phase_name() { return "V8.TFMidTierMachineLoweringPhase"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
+ data->jsgraph()->Dead());
+ SelectLowering select_lowering(data->jsgraph(), temp_zone);
+ MemoryLowering memory_lowering(data->jsgraph(), temp_zone,
+ data->info()->GetPoisoningMitigationLevel());
+
+ AddReducer(data, &graph_reducer, &memory_lowering);
+ AddReducer(data, &graph_reducer, &select_lowering);
+ graph_reducer.ReduceGraph();
+ }
+};
+
struct CsaEarlyOptimizationPhase {
static const char* phase_name() { return "V8.CSAEarlyOptimization"; }
@@ -1779,11 +1851,14 @@ struct CsaOptimizationPhase {
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->broker(), data->common(),
data->machine(), temp_zone);
-#ifdef V8_COMPRESS_POINTERS
+ // TODO(v8:7703, solanes): go back to using #if guards once
+ // FLAG_turbo_decompression_elimination gets removed.
DecompressionElimination decompression_elimination(
&graph_reducer, data->graph(), data->machine(), data->common());
- AddReducer(data, &graph_reducer, &decompression_elimination);
-#endif
+ if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) {
+ AddReducer(data, &graph_reducer, &decompression_elimination);
+ }
+ USE(decompression_elimination);
AddReducer(data, &graph_reducer, &branch_condition_elimination);
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &machine_reducer);
@@ -2077,7 +2152,7 @@ struct JumpThreadingPhase {
void Run(PipelineData* data, Zone* temp_zone, bool frame_at_start) {
ZoneVector<RpoNumber> result(temp_zone);
- if (JumpThreading::ComputeForwarding(temp_zone, result, data->sequence(),
+ if (JumpThreading::ComputeForwarding(temp_zone, &result, data->sequence(),
frame_at_start)) {
JumpThreading::ApplyForwarding(temp_zone, result, data->sequence());
}
@@ -2102,7 +2177,7 @@ struct FinalizeCodePhase {
struct PrintGraphPhase {
- static const char* phase_name() { return nullptr; }
+ static const char* phase_name() { return "V8.TFPrintGraph"; }
void Run(PipelineData* data, Zone* temp_zone, const char* phase) {
OptimizedCompilationInfo* info = data->info();
@@ -2143,7 +2218,7 @@ struct PrintGraphPhase {
struct VerifyGraphPhase {
- static const char* phase_name() { return nullptr; }
+ static const char* phase_name() { return "V8.TFVerifyGraph"; }
void Run(PipelineData* data, Zone* temp_zone, const bool untyped,
bool values_only = false) {
@@ -2176,10 +2251,10 @@ void PipelineImpl::RunPrintAndVerify(const char* phase, bool untyped) {
}
}
-bool PipelineImpl::CreateGraph() {
+void PipelineImpl::Serialize() {
PipelineData* data = this->data_;
- data->BeginPhaseKind("V8.TFGraphCreation");
+ data->BeginPhaseKind("V8.TFBrokerInitAndSerialization");
if (info()->trace_turbo_json_enabled() ||
info()->trace_turbo_graph_enabled()) {
@@ -2203,15 +2278,19 @@ bool PipelineImpl::CreateGraph() {
if (FLAG_concurrent_inlining) {
Run<HeapBrokerInitializationPhase>();
Run<SerializationPhase>();
+ data->broker()->StopSerializing();
}
+ data->EndPhaseKind();
+}
+
+bool PipelineImpl::CreateGraph() {
+ PipelineData* data = this->data_;
+
+ data->BeginPhaseKind("V8.TFGraphCreation");
Run<GraphBuilderPhase>();
RunPrintAndVerify(GraphBuilderPhase::phase_name(), true);
- if (FLAG_concurrent_inlining) {
- Run<CopyMetadataForConcurrentCompilePhase>();
- }
-
// Perform function context specialization and inlining (if enabled).
Run<InliningPhase>();
RunPrintAndVerify(InliningPhase::phase_name(), true);
@@ -2222,12 +2301,13 @@ bool PipelineImpl::CreateGraph() {
// Determine the Typer operation flags.
{
- if (is_sloppy(info()->shared_info()->language_mode()) &&
- info()->shared_info()->IsUserJavaScript()) {
+ SharedFunctionInfoRef shared_info(data->broker(), info()->shared_info());
+ if (is_sloppy(shared_info.language_mode()) &&
+ shared_info.IsUserJavaScript()) {
// Sloppy mode functions always have an Object for this.
data->AddTyperFlag(Typer::kThisIsReceiver);
}
- if (IsClassConstructor(info()->shared_info()->kind())) {
+ if (IsClassConstructor(shared_info.kind())) {
// Class constructors cannot be [[Call]]ed.
data->AddTyperFlag(Typer::kNewTargetIsReceiver);
}
@@ -2235,12 +2315,7 @@ bool PipelineImpl::CreateGraph() {
// Run the type-sensitive lowerings and optimizations on the graph.
{
- if (FLAG_concurrent_inlining) {
- // TODO(neis): Remove CopyMetadataForConcurrentCompilePhase call once
- // brokerization of JSNativeContextSpecialization is complete.
- Run<CopyMetadataForConcurrentCompilePhase>();
- data->broker()->StopSerializing();
- } else {
+ if (!FLAG_concurrent_inlining) {
Run<HeapBrokerInitializationPhase>();
Run<CopyMetadataForConcurrentCompilePhase>();
data->broker()->StopSerializing();
@@ -2359,6 +2434,70 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
return SelectInstructions(linkage);
}
+bool PipelineImpl::OptimizeGraphForMidTier(Linkage* linkage) {
+ PipelineData* data = this->data_;
+
+ data->BeginPhaseKind("V8.TFLowering");
+
+ // Type the graph and keep the Typer running such that new nodes get
+ // automatically typed when they are created.
+ Run<TyperPhase>(data->CreateTyper());
+ RunPrintAndVerify(TyperPhase::phase_name());
+ Run<TypedLoweringPhase>();
+ RunPrintAndVerify(TypedLoweringPhase::phase_name());
+
+ // TODO(9684): Consider rolling this into the preceeding phase or not creating
+ // LoopExit nodes at all.
+ Run<LoopExitEliminationPhase>();
+ RunPrintAndVerify(LoopExitEliminationPhase::phase_name(), true);
+
+ data->DeleteTyper();
+
+ if (FLAG_assert_types) {
+ Run<TypeAssertionsPhase>();
+ RunPrintAndVerify(TypeAssertionsPhase::phase_name());
+ }
+
+ // Perform simplified lowering. This has to run w/o the Typer decorator,
+ // because we cannot compute meaningful types anyways, and the computed types
+ // might even conflict with the representation/truncation logic.
+ Run<SimplifiedLoweringPhase>();
+ RunPrintAndVerify(SimplifiedLoweringPhase::phase_name(), true);
+
+ // From now on it is invalid to look at types on the nodes, because the types
+ // on the nodes might not make sense after representation selection due to the
+ // way we handle truncations; if we'd want to look at types afterwards we'd
+ // essentially need to re-type (large portions of) the graph.
+
+ // In order to catch bugs related to type access after this point, we now
+ // remove the types from the nodes (currently only in Debug builds).
+#ifdef DEBUG
+ Run<UntyperPhase>();
+ RunPrintAndVerify(UntyperPhase::phase_name(), true);
+#endif
+
+ // Run generic lowering pass.
+ Run<GenericLoweringPhase>();
+ RunPrintAndVerify(GenericLoweringPhase::phase_name(), true);
+
+ data->BeginPhaseKind("V8.TFBlockBuilding");
+
+ Run<EffectControlLinearizationPhase>();
+ RunPrintAndVerify(EffectControlLinearizationPhase::phase_name(), true);
+
+ Run<MidTierMachineLoweringPhase>();
+ RunPrintAndVerify(MidTierMachineLoweringPhase::phase_name(), true);
+
+ data->source_positions()->RemoveDecorator();
+ if (data->info()->trace_turbo_json_enabled()) {
+ data->node_origins()->RemoveDecorator();
+ }
+
+ ComputeScheduledGraph();
+
+ return SelectInstructions(linkage);
+}
+
MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
SourcePositionTable* source_positions, Code::Kind kind,
@@ -2571,6 +2710,7 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
Linkage linkage(Linkage::ComputeIncoming(data.instruction_zone(), info));
Deoptimizer::EnsureCodeForDeoptimizationEntries(isolate);
+ pipeline.Serialize();
if (!pipeline.CreateGraph()) return MaybeHandle<Code>();
if (!pipeline.OptimizeGraph(&linkage)) return MaybeHandle<Code>();
pipeline.AssembleCode(&linkage);
@@ -2628,7 +2768,7 @@ std::unique_ptr<OptimizedCompilationJob> Pipeline::NewCompilationJob(
Isolate* isolate, Handle<JSFunction> function, bool has_script) {
Handle<SharedFunctionInfo> shared =
handle(function->shared(), function->GetIsolate());
- return base::make_unique<PipelineCompilationJob>(isolate, shared, function);
+ return std::make_unique<PipelineCompilationJob>(isolate, shared, function);
}
// static
@@ -2709,7 +2849,7 @@ void Pipeline::GenerateCodeForWasmFunction(
if (!pipeline.SelectInstructions(&linkage)) return;
pipeline.AssembleCode(&linkage, instruction_buffer->CreateView());
- auto result = base::make_unique<wasm::WasmCompilationResult>();
+ auto result = std::make_unique<wasm::WasmCompilationResult>();
CodeGenerator* code_generator = pipeline.code_generator();
code_generator->tasm()->GetCode(
nullptr, &result->code_desc, code_generator->safepoint_table_builder(),
@@ -2818,7 +2958,7 @@ bool PipelineImpl::SelectInstructions(Linkage* linkage) {
<< "--- End of " << data->debug_name() << " generated by TurboFan\n"
<< "--------------------------------------------------\n";
}
- Zone temp_zone(data->allocator(), ZONE_NAME);
+ Zone temp_zone(data->allocator(), kMachineGraphVerifierZoneName);
MachineGraphVerifier::Run(
data->graph(), data->schedule(), linkage,
data->info()->IsNotOptimizedFunctionOrWasmFunction(),
@@ -2993,6 +3133,7 @@ void PipelineImpl::AssembleCode(Linkage* linkage,
MaybeHandle<Code> PipelineImpl::FinalizeCode(bool retire_broker) {
PipelineData* data = this->data_;
+ data->BeginPhaseKind("V8.TFFinalizeCode");
if (data->broker() && retire_broker) {
data->broker()->Retire();
}
@@ -3007,7 +3148,7 @@ MaybeHandle<Code> PipelineImpl::FinalizeCode(bool retire_broker) {
if (data->profiler_data()) {
#ifdef ENABLE_DISASSEMBLER
std::ostringstream os;
- code->Disassemble(nullptr, os);
+ code->Disassemble(nullptr, os, isolate());
data->profiler_data()->SetCode(&os);
#endif // ENABLE_DISASSEMBLER
}
@@ -3023,7 +3164,7 @@ MaybeHandle<Code> PipelineImpl::FinalizeCode(bool retire_broker) {
<< "\"data\":\"";
#ifdef ENABLE_DISASSEMBLER
std::stringstream disassembly_stream;
- code->Disassemble(nullptr, disassembly_stream);
+ code->Disassemble(nullptr, disassembly_stream, isolate());
std::string disassembly_string(disassembly_stream.str());
for (const auto& c : disassembly_string) {
json_of << AsEscapedUC16ForJSON(c);
@@ -3043,6 +3184,7 @@ MaybeHandle<Code> PipelineImpl::FinalizeCode(bool retire_broker) {
<< "Finished compiling method " << info()->GetDebugName().get()
<< " using TurboFan" << std::endl;
}
+ data->EndPhaseKind();
return code;
}
@@ -3100,7 +3242,8 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
std::unique_ptr<Zone> verifier_zone;
RegisterAllocatorVerifier* verifier = nullptr;
if (run_verifier) {
- verifier_zone.reset(new Zone(data->allocator(), ZONE_NAME));
+ verifier_zone.reset(
+ new Zone(data->allocator(), kRegisterAllocatorVerifierZoneName));
verifier = new (verifier_zone.get()) RegisterAllocatorVerifier(
verifier_zone.get(), config, data->sequence());
}
diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h
index 3707bfb06e..42f31472a9 100644
--- a/deps/v8/src/compiler/pipeline.h
+++ b/deps/v8/src/compiler/pipeline.h
@@ -5,6 +5,8 @@
#ifndef V8_COMPILER_PIPELINE_H_
#define V8_COMPILER_PIPELINE_H_
+#include <memory>
+
// Clients of this interface shouldn't depend on lots of compiler internals.
// Do not include anything from src/compiler here!
#include "src/common/globals.h"
diff --git a/deps/v8/src/compiler/processed-feedback.h b/deps/v8/src/compiler/processed-feedback.h
index 17829863de..1d1ee538d8 100644
--- a/deps/v8/src/compiler/processed-feedback.h
+++ b/deps/v8/src/compiler/processed-feedback.h
@@ -18,7 +18,10 @@ class ElementAccessFeedback;
class ForInFeedback;
class GlobalAccessFeedback;
class InstanceOfFeedback;
+class LiteralFeedback;
class NamedAccessFeedback;
+class RegExpLiteralFeedback;
+class TemplateObjectFeedback;
class ProcessedFeedback : public ZoneObject {
public:
@@ -31,7 +34,10 @@ class ProcessedFeedback : public ZoneObject {
kForIn,
kGlobalAccess,
kInstanceOf,
+ kLiteral,
kNamedAccess,
+ kRegExpLiteral,
+ kTemplateObject,
};
Kind kind() const { return kind_; }
@@ -46,6 +52,9 @@ class ProcessedFeedback : public ZoneObject {
GlobalAccessFeedback const& AsGlobalAccess() const;
InstanceOfFeedback const& AsInstanceOf() const;
NamedAccessFeedback const& AsNamedAccess() const;
+ LiteralFeedback const& AsLiteral() const;
+ RegExpLiteralFeedback const& AsRegExpLiteral() const;
+ TemplateObjectFeedback const& AsTemplateObject() const;
protected:
ProcessedFeedback(Kind kind, FeedbackSlotKind slot_kind);
@@ -187,7 +196,9 @@ class SingleValueFeedback : public ProcessedFeedback {
(K == kBinaryOperation && slot_kind == FeedbackSlotKind::kBinaryOp) ||
(K == kCompareOperation && slot_kind == FeedbackSlotKind::kCompareOp) ||
(K == kForIn && slot_kind == FeedbackSlotKind::kForIn) ||
- (K == kInstanceOf && slot_kind == FeedbackSlotKind::kInstanceOf));
+ (K == kInstanceOf && slot_kind == FeedbackSlotKind::kInstanceOf) ||
+ ((K == kLiteral || K == kRegExpLiteral || K == kTemplateObject) &&
+ slot_kind == FeedbackSlotKind::kLiteral));
}
T value() const { return value_; }
@@ -202,6 +213,24 @@ class InstanceOfFeedback
using SingleValueFeedback::SingleValueFeedback;
};
+class LiteralFeedback
+ : public SingleValueFeedback<AllocationSiteRef,
+ ProcessedFeedback::kLiteral> {
+ using SingleValueFeedback::SingleValueFeedback;
+};
+
+class RegExpLiteralFeedback
+ : public SingleValueFeedback<JSRegExpRef,
+ ProcessedFeedback::kRegExpLiteral> {
+ using SingleValueFeedback::SingleValueFeedback;
+};
+
+class TemplateObjectFeedback
+ : public SingleValueFeedback<JSArrayRef,
+ ProcessedFeedback::kTemplateObject> {
+ using SingleValueFeedback::SingleValueFeedback;
+};
+
class BinaryOperationFeedback
: public SingleValueFeedback<BinaryOperationHint,
ProcessedFeedback::kBinaryOperation> {
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
index e399b9c4f6..c709729081 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.cc
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -690,15 +690,14 @@ Node* RawMachineAssembler::CallNWithFrameState(CallDescriptor* call_descriptor,
return AddNode(common()->Call(call_descriptor), input_count, inputs);
}
-Node* RawMachineAssembler::TailCallN(CallDescriptor* call_descriptor,
- int input_count, Node* const* inputs) {
+void RawMachineAssembler::TailCallN(CallDescriptor* call_descriptor,
+ int input_count, Node* const* inputs) {
// +1 is for target.
DCHECK_EQ(input_count, call_descriptor->ParameterCount() + 1);
Node* tail_call =
MakeNode(common()->TailCall(call_descriptor), input_count, inputs);
schedule()->AddTailCall(CurrentBlock(), tail_call);
current_block_ = nullptr;
- return tail_call;
}
namespace {
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index 46940df44f..cbbb719d54 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -131,7 +131,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
std::pair<MachineType, const Operator*> InsertDecompressionIfNeeded(
MachineType type) {
const Operator* decompress_op = nullptr;
- if (COMPRESS_POINTERS_BOOL) {
+ if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) {
switch (type.representation()) {
case MachineRepresentation::kTaggedPointer:
type = MachineType::CompressedPointer();
@@ -188,7 +188,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
std::pair<MachineRepresentation, Node*> InsertCompressionIfNeeded(
MachineRepresentation rep, Node* value) {
- if (COMPRESS_POINTERS_BOOL) {
+ if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) {
switch (rep) {
case MachineRepresentation::kTaggedPointer:
rep = MachineRepresentation::kCompressedPointer;
@@ -237,7 +237,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
object, value);
}
void OptimizedStoreMap(Node* object, Node* value) {
- if (COMPRESS_POINTERS_BOOL) {
+ if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) {
DCHECK(AccessBuilder::ForMap().machine_type.IsCompressedPointer());
value =
AddNode(machine()->ChangeTaggedPointerToCompressedPointer(), value);
@@ -736,8 +736,8 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* BitcastTaggedToWord(Node* a) {
return AddNode(machine()->BitcastTaggedToWord(), a);
}
- Node* BitcastTaggedSignedToWord(Node* a) {
- return AddNode(machine()->BitcastTaggedSignedToWord(), a);
+ Node* BitcastTaggedToWordForTagAndSmiBits(Node* a) {
+ return AddNode(machine()->BitcastTaggedToWordForTagAndSmiBits(), a);
}
Node* BitcastMaybeObjectToWord(Node* a) {
return AddNode(machine()->BitcastMaybeObjectToWord(), a);
@@ -965,8 +965,8 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
// Tail call a given call descriptor and the given arguments.
// The call target is passed as part of the {inputs} array.
- Node* TailCallN(CallDescriptor* call_descriptor, int input_count,
- Node* const* inputs);
+ void TailCallN(CallDescriptor* call_descriptor, int input_count,
+ Node* const* inputs);
// Type representing C function argument with type info.
using CFunctionArg = std::pair<MachineType, Node*>;
diff --git a/deps/v8/src/compiler/representation-change.cc b/deps/v8/src/compiler/representation-change.cc
index fd0cbabe66..ca1b1e221f 100644
--- a/deps/v8/src/compiler/representation-change.cc
+++ b/deps/v8/src/compiler/representation-change.cc
@@ -1272,8 +1272,13 @@ Node* RepresentationChanger::GetBitRepresentationFor(
}
}
} else if (output_rep == MachineRepresentation::kTaggedSigned) {
- node = jsgraph()->graph()->NewNode(machine()->WordEqual(), node,
- jsgraph()->IntPtrConstant(0));
+ if (COMPRESS_POINTERS_BOOL) {
+ node = jsgraph()->graph()->NewNode(machine()->Word32Equal(), node,
+ jsgraph()->Int32Constant(0));
+ } else {
+ node = jsgraph()->graph()->NewNode(machine()->WordEqual(), node,
+ jsgraph()->IntPtrConstant(0));
+ }
return jsgraph()->graph()->NewNode(machine()->Word32Equal(), node,
jsgraph()->Int32Constant(0));
} else if (output_rep == MachineRepresentation::kCompressed) {
@@ -1546,14 +1551,17 @@ const Operator* RepresentationChanger::TaggedSignedOperatorFor(
IrOpcode::Value opcode) {
switch (opcode) {
case IrOpcode::kSpeculativeNumberLessThan:
- return machine()->Is32() ? machine()->Int32LessThan()
- : machine()->Int64LessThan();
+ return (COMPRESS_POINTERS_BOOL || machine()->Is32())
+ ? machine()->Int32LessThan()
+ : machine()->Int64LessThan();
case IrOpcode::kSpeculativeNumberLessThanOrEqual:
- return machine()->Is32() ? machine()->Int32LessThanOrEqual()
- : machine()->Int64LessThanOrEqual();
+ return (COMPRESS_POINTERS_BOOL || machine()->Is32())
+ ? machine()->Int32LessThanOrEqual()
+ : machine()->Int64LessThanOrEqual();
case IrOpcode::kSpeculativeNumberEqual:
- return machine()->Is32() ? machine()->Word32Equal()
- : machine()->Word64Equal();
+ return (COMPRESS_POINTERS_BOOL || machine()->Is32())
+ ? machine()->Word32Equal()
+ : machine()->Word64Equal();
default:
UNREACHABLE();
}
diff --git a/deps/v8/src/compiler/scheduler.cc b/deps/v8/src/compiler/scheduler.cc
index bf23e436f6..2999cbfcd6 100644
--- a/deps/v8/src/compiler/scheduler.cc
+++ b/deps/v8/src/compiler/scheduler.cc
@@ -6,7 +6,7 @@
#include <iomanip>
-#include "src/base/adapters.h"
+#include "src/base/iterator.h"
#include "src/codegen/tick-counter.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/control-equivalence.h"
diff --git a/deps/v8/src/compiler/select-lowering.cc b/deps/v8/src/compiler/select-lowering.cc
index 4d5bb99053..290306a966 100644
--- a/deps/v8/src/compiler/select-lowering.cc
+++ b/deps/v8/src/compiler/select-lowering.cc
@@ -14,29 +14,39 @@ namespace v8 {
namespace internal {
namespace compiler {
-SelectLowering::SelectLowering(Graph* graph, CommonOperatorBuilder* common)
- : common_(common), graph_(graph) {}
+SelectLowering::SelectLowering(JSGraph* jsgraph, Zone* zone)
+ : graph_assembler_(jsgraph, nullptr, nullptr, zone),
+ start_(jsgraph->graph()->start()) {}
SelectLowering::~SelectLowering() = default;
-
Reduction SelectLowering::Reduce(Node* node) {
if (node->opcode() != IrOpcode::kSelect) return NoChange();
+ return Changed(LowerSelect(node));
+}
+
+#define __ gasm()->
+
+Node* SelectLowering::LowerSelect(Node* node) {
SelectParameters const p = SelectParametersOf(node->op());
- Node* cond = node->InputAt(0);
- Node* vthen = node->InputAt(1);
- Node* velse = node->InputAt(2);
-
- // Create a diamond and a phi.
- Diamond d(graph(), common(), cond, p.hint());
- node->ReplaceInput(0, vthen);
- node->ReplaceInput(1, velse);
- node->ReplaceInput(2, d.merge);
- NodeProperties::ChangeOp(node, common()->Phi(p.representation(), 2));
- return Changed(node);
+ Node* condition = node->InputAt(0);
+ Node* vtrue = node->InputAt(1);
+ Node* vfalse = node->InputAt(2);
+
+ gasm()->Reset(start(), start());
+
+ auto done = __ MakeLabel(p.representation());
+
+ __ GotoIf(condition, &done, vtrue);
+ __ Goto(&done, vfalse);
+ __ Bind(&done);
+
+ return done.PhiAt(0);
}
+#undef __
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/select-lowering.h b/deps/v8/src/compiler/select-lowering.h
index d8c12d4d54..53890a7898 100644
--- a/deps/v8/src/compiler/select-lowering.h
+++ b/deps/v8/src/compiler/select-lowering.h
@@ -5,33 +5,31 @@
#ifndef V8_COMPILER_SELECT_LOWERING_H_
#define V8_COMPILER_SELECT_LOWERING_H_
+#include "src/compiler/graph-assembler.h"
#include "src/compiler/graph-reducer.h"
namespace v8 {
namespace internal {
namespace compiler {
-// Forward declarations.
-class CommonOperatorBuilder;
-class Graph;
-
-
// Lowers Select nodes to diamonds.
class SelectLowering final : public Reducer {
public:
- SelectLowering(Graph* graph, CommonOperatorBuilder* common);
+ SelectLowering(JSGraph* jsgraph, Zone* zone);
~SelectLowering() override;
const char* reducer_name() const override { return "SelectLowering"; }
Reduction Reduce(Node* node) override;
+ Node* LowerSelect(Node* node);
+
private:
- CommonOperatorBuilder* common() const { return common_; }
- Graph* graph() const { return graph_; }
+ GraphAssembler* gasm() { return &graph_assembler_; }
+ Node* start() { return start_; }
- CommonOperatorBuilder* common_;
- Graph* graph_;
+ GraphAssembler graph_assembler_;
+ Node* start_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/serializer-for-background-compilation.cc b/deps/v8/src/compiler/serializer-for-background-compilation.cc
index 20d405b775..0391e8742d 100644
--- a/deps/v8/src/compiler/serializer-for-background-compilation.cc
+++ b/deps/v8/src/compiler/serializer-for-background-compilation.cc
@@ -10,7 +10,9 @@
#include "src/compiler/access-info.h"
#include "src/compiler/bytecode-analysis.h"
#include "src/compiler/compilation-dependencies.h"
+#include "src/compiler/functional-list.h"
#include "src/compiler/js-heap-broker.h"
+#include "src/compiler/zone-stats.h"
#include "src/handles/handles-inl.h"
#include "src/ic/call-optimization.h"
#include "src/interpreter/bytecode-array-iterator.h"
@@ -41,7 +43,6 @@ namespace compiler {
V(CallRuntime) \
V(CloneObject) \
V(CreateArrayFromIterable) \
- V(CreateEmptyArrayLiteral) \
V(CreateEmptyObjectLiteral) \
V(CreateMappedArguments) \
V(CreateRestParameter) \
@@ -160,6 +161,7 @@ namespace compiler {
V(CreateBlockContext) \
V(CreateCatchContext) \
V(CreateClosure) \
+ V(CreateEmptyArrayLiteral) \
V(CreateEvalContext) \
V(CreateFunctionContext) \
V(CreateObjectLiteral) \
@@ -230,13 +232,41 @@ namespace compiler {
UNCONDITIONAL_JUMPS_LIST(V) \
UNREACHABLE_BYTECODE_LIST(V)
-template <typename T>
-struct HandleComparator {
- bool operator()(const Handle<T>& lhs, const Handle<T>& rhs) const {
- return lhs.address() < rhs.address();
+template <typename T, typename EqualTo>
+class FunctionalSet {
+ public:
+ void Add(T const& elem, Zone* zone) {
+ for (auto const& l : data_) {
+ if (equal_to(l, elem)) return;
+ }
+ data_.PushFront(elem, zone);
+ }
+
+ bool Includes(FunctionalSet<T, EqualTo> const& other) const {
+ return std::all_of(other.begin(), other.end(), [&](T const& other_elem) {
+ return std::any_of(this->begin(), this->end(), [&](T const& this_elem) {
+ return equal_to(this_elem, other_elem);
+ });
+ });
}
+
+ bool IsEmpty() const { return data_.begin() == data_.end(); }
+
+ void Clear() { data_.Clear(); }
+
+ using iterator = typename FunctionalList<T>::iterator;
+
+ iterator begin() const { return data_.begin(); }
+ iterator end() const { return data_.end(); }
+
+ private:
+ static EqualTo equal_to;
+ FunctionalList<T> data_;
};
+template <typename T, typename EqualTo>
+EqualTo FunctionalSet<T, EqualTo>::equal_to;
+
struct VirtualContext {
unsigned int distance;
Handle<Context> context;
@@ -245,21 +275,22 @@ struct VirtualContext {
: distance(distance_in), context(context_in) {
CHECK_GT(distance, 0);
}
- bool operator<(const VirtualContext& other) const {
- return HandleComparator<Context>()(context, other.context) &&
- distance < other.distance;
+ bool operator==(const VirtualContext& other) const {
+ return context.equals(other.context) && distance == other.distance;
}
};
class FunctionBlueprint;
-using ConstantsSet = ZoneSet<Handle<Object>, HandleComparator<Object>>;
-using VirtualContextsSet = ZoneSet<VirtualContext>;
-using MapsSet = ZoneSet<Handle<Map>, HandleComparator<Map>>;
-using BlueprintsSet = ZoneSet<FunctionBlueprint>;
+using ConstantsSet = FunctionalSet<Handle<Object>, Handle<Object>::equal_to>;
+using VirtualContextsSet =
+ FunctionalSet<VirtualContext, std::equal_to<VirtualContext>>;
+using MapsSet = FunctionalSet<Handle<Map>, Handle<Map>::equal_to>;
+using BlueprintsSet =
+ FunctionalSet<FunctionBlueprint, std::equal_to<FunctionBlueprint>>;
class Hints {
public:
- explicit Hints(Zone* zone);
+ Hints() = default;
static Hints SingleConstant(Handle<Object> constant, Zone* zone);
@@ -268,12 +299,13 @@ class Hints {
const BlueprintsSet& function_blueprints() const;
const VirtualContextsSet& virtual_contexts() const;
- void AddConstant(Handle<Object> constant);
- void AddMap(Handle<Map> map);
- void AddFunctionBlueprint(FunctionBlueprint function_blueprint);
- void AddVirtualContext(VirtualContext virtual_context);
+ void AddConstant(Handle<Object> constant, Zone* zone);
+ void AddMap(Handle<Map> map, Zone* zone);
+ void AddFunctionBlueprint(FunctionBlueprint function_blueprint, Zone* zone);
+ void AddVirtualContext(VirtualContext virtual_context, Zone* zone);
- void Add(const Hints& other);
+ void Add(const Hints& other, Zone* zone);
+ void AddFromChildSerializer(const Hints& other, Zone* zone);
void Clear();
bool IsEmpty() const;
@@ -292,6 +324,8 @@ class Hints {
using HintsVector = ZoneVector<Hints>;
+// A FunctionBlueprint is a SharedFunctionInfo and a FeedbackVector, plus
+// Hints about the context in which a closure will be created from them.
class FunctionBlueprint {
public:
FunctionBlueprint(Handle<JSFunction> function, Isolate* isolate, Zone* zone);
@@ -304,13 +338,23 @@ class FunctionBlueprint {
Handle<FeedbackVector> feedback_vector() const { return feedback_vector_; }
const Hints& context_hints() const { return context_hints_; }
- bool operator<(const FunctionBlueprint& other) const {
- // A feedback vector is never used for more than one SFI, so it can
- // be used for strict ordering of blueprints.
+ bool operator==(const FunctionBlueprint& other) const {
+ // A feedback vector is never used for more than one SFI. Moreover, we can
+ // never have two blueprints with identical feedback vector (and SFI) but
+ // different hints, because:
+ // (1) A blueprint originates either (i) from the data associated with a
+ // CreateClosure bytecode, in which case two different CreateClosure
+ // bytecodes never have the same feedback vector, or (ii) from a
+ // JSFunction, in which case the hints are determined by the closure.
+ // (2) We never extend a blueprint's hints after construction.
+ //
+ // It is therefore sufficient to look at the feedback vector in order to
+ // decide equality.
DCHECK_IMPLIES(feedback_vector_.equals(other.feedback_vector_),
shared_.equals(other.shared_));
- return HandleComparator<FeedbackVector>()(feedback_vector_,
- other.feedback_vector_);
+ SLOW_DCHECK(!feedback_vector_.equals(other.feedback_vector_) ||
+ context_hints_.Equals(other.context_hints_));
+ return feedback_vector_.equals(other.feedback_vector_);
}
private:
@@ -319,6 +363,8 @@ class FunctionBlueprint {
Hints context_hints_;
};
+// A CompilationSubject is a FunctionBlueprint, optionally with a matching
+// closure.
class CompilationSubject {
public:
explicit CompilationSubject(FunctionBlueprint blueprint)
@@ -336,24 +382,65 @@ class CompilationSubject {
MaybeHandle<JSFunction> closure_;
};
+// A Callee is either a JSFunction (which may not have a feedback vector), or a
+// FunctionBlueprint. Note that this is different from CompilationSubject, which
+// always has a FunctionBlueprint.
+class Callee {
+ public:
+ explicit Callee(Handle<JSFunction> jsfunction) : jsfunction_(jsfunction) {}
+ explicit Callee(FunctionBlueprint const& blueprint) : blueprint_(blueprint) {}
+
+ Handle<SharedFunctionInfo> shared(Isolate* isolate) const {
+ return blueprint_.has_value()
+ ? blueprint_->shared()
+ : handle(jsfunction_.ToHandleChecked()->shared(), isolate);
+ }
+
+ bool HasFeedbackVector() const {
+ Handle<JSFunction> function;
+ return blueprint_.has_value() ||
+ jsfunction_.ToHandleChecked()->has_feedback_vector();
+ }
+
+ CompilationSubject ToCompilationSubject(Isolate* isolate, Zone* zone) const {
+ CHECK(HasFeedbackVector());
+ return blueprint_.has_value()
+ ? CompilationSubject(*blueprint_)
+ : CompilationSubject(jsfunction_.ToHandleChecked(), isolate,
+ zone);
+ }
+
+ private:
+ MaybeHandle<JSFunction> const jsfunction_;
+ base::Optional<FunctionBlueprint> const blueprint_;
+};
+
+// If a list of arguments (hints) is shorter than the function's parameter
+// count, this enum expresses what we know about the missing arguments.
+enum MissingArgumentsPolicy {
+ kMissingArgumentsAreUndefined, // ... as in the JS undefined value
+ kMissingArgumentsAreUnknown,
+};
+
// The SerializerForBackgroundCompilation makes sure that the relevant function
// data such as bytecode, SharedFunctionInfo and FeedbackVector, used by later
// optimizations in the compiler, is copied to the heap broker.
class SerializerForBackgroundCompilation {
public:
SerializerForBackgroundCompilation(
- JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone,
- Handle<JSFunction> closure, SerializerForBackgroundCompilationFlags flags,
- BailoutId osr_offset);
+ ZoneStats* zone_stats, JSHeapBroker* broker,
+ CompilationDependencies* dependencies, Handle<JSFunction> closure,
+ SerializerForBackgroundCompilationFlags flags, BailoutId osr_offset);
Hints Run(); // NOTE: Returns empty for an already-serialized function.
class Environment;
private:
SerializerForBackgroundCompilation(
- JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone,
- CompilationSubject function, base::Optional<Hints> new_target,
- const HintsVector& arguments,
+ ZoneStats* zone_stats, JSHeapBroker* broker,
+ CompilationDependencies* dependencies, CompilationSubject function,
+ base::Optional<Hints> new_target, const HintsVector& arguments,
+ MissingArgumentsPolicy padding,
SerializerForBackgroundCompilationFlags flags);
bool BailoutOnUninitialized(ProcessedFeedback const& feedback);
@@ -365,36 +452,39 @@ class SerializerForBackgroundCompilation {
SUPPORTED_BYTECODE_LIST(DECLARE_VISIT_BYTECODE)
#undef DECLARE_VISIT_BYTECODE
- // Returns whether the callee with the given SFI should be processed further,
- // i.e. whether it's inlineable.
- bool ProcessSFIForCallOrConstruct(Handle<SharedFunctionInfo> shared,
+ void ProcessSFIForCallOrConstruct(Callee const& callee,
+ base::Optional<Hints> new_target,
const HintsVector& arguments,
- SpeculationMode speculation_mode);
- // Returns whether {function} should be serialized for compilation.
- bool ProcessCalleeForCallOrConstruct(Handle<JSFunction> function,
+ SpeculationMode speculation_mode,
+ MissingArgumentsPolicy padding);
+ void ProcessCalleeForCallOrConstruct(Handle<Object> callee,
+ base::Optional<Hints> new_target,
const HintsVector& arguments,
- SpeculationMode speculation_mode);
+ SpeculationMode speculation_mode,
+ MissingArgumentsPolicy padding);
void ProcessCallOrConstruct(Hints callee, base::Optional<Hints> new_target,
const HintsVector& arguments, FeedbackSlot slot,
- bool with_spread = false);
- void ProcessCallVarArgs(ConvertReceiverMode receiver_mode,
- Hints const& callee, interpreter::Register first_reg,
- int reg_count, FeedbackSlot slot,
- bool with_spread = false);
+ MissingArgumentsPolicy padding);
+ void ProcessCallVarArgs(
+ ConvertReceiverMode receiver_mode, Hints const& callee,
+ interpreter::Register first_reg, int reg_count, FeedbackSlot slot,
+ MissingArgumentsPolicy padding = kMissingArgumentsAreUndefined);
void ProcessApiCall(Handle<SharedFunctionInfo> target,
const HintsVector& arguments);
void ProcessReceiverMapForApiCall(FunctionTemplateInfoRef target,
Handle<Map> receiver);
void ProcessBuiltinCall(Handle<SharedFunctionInfo> target,
+ base::Optional<Hints> new_target,
const HintsVector& arguments,
- SpeculationMode speculation_mode);
+ SpeculationMode speculation_mode,
+ MissingArgumentsPolicy padding);
void ProcessJump(interpreter::BytecodeArrayIterator* iterator);
void ProcessKeyedPropertyAccess(Hints const& receiver, Hints const& key,
FeedbackSlot slot, AccessMode access_mode,
bool honor_bailout_on_uninitialized);
- void ProcessNamedPropertyAccess(Hints receiver, NameRef const& name,
+ void ProcessNamedPropertyAccess(Hints const& receiver, NameRef const& name,
FeedbackSlot slot, AccessMode access_mode);
void ProcessNamedAccess(Hints receiver, NamedAccessFeedback const& feedback,
AccessMode access_mode, Hints* new_accumulator_hints);
@@ -411,7 +501,6 @@ class SerializerForBackgroundCompilation {
void ProcessHintsForHasInPrototypeChain(Hints const& instance_hints);
void ProcessHintsForRegExpTest(Hints const& regexp_hints);
PropertyAccessInfo ProcessMapForRegExpTest(MapRef map);
- void ProcessHintsForFunctionCall(Hints const& target_hints);
void ProcessHintsForFunctionBind(Hints const& receiver_hints);
void ProcessHintsForObjectGetPrototype(Hints const& object_hints);
void ProcessConstantForOrdinaryHasInstance(HeapObjectRef const& constructor,
@@ -456,7 +545,8 @@ class SerializerForBackgroundCompilation {
Hints RunChildSerializer(CompilationSubject function,
base::Optional<Hints> new_target,
- const HintsVector& arguments, bool with_spread);
+ const HintsVector& arguments,
+ MissingArgumentsPolicy padding);
// When (forward-)branching bytecodes are encountered, e.g. a conditional
// jump, we call ContributeToJumpTargetEnvironment to "remember" the current
@@ -475,14 +565,14 @@ class SerializerForBackgroundCompilation {
JSHeapBroker* broker() const { return broker_; }
CompilationDependencies* dependencies() const { return dependencies_; }
- Zone* zone() const { return zone_; }
+ Zone* zone() { return zone_scope_.zone(); }
Environment* environment() const { return environment_; }
SerializerForBackgroundCompilationFlags flags() const { return flags_; }
BailoutId osr_offset() const { return osr_offset_; }
JSHeapBroker* const broker_;
CompilationDependencies* const dependencies_;
- Zone* const zone_;
+ ZoneStats::Scope zone_scope_;
Environment* const environment_;
ZoneUnorderedMap<int, Environment*> jump_target_environments_;
SerializerForBackgroundCompilationFlags const flags_;
@@ -490,11 +580,11 @@ class SerializerForBackgroundCompilation {
};
void RunSerializerForBackgroundCompilation(
- JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone,
- Handle<JSFunction> closure, SerializerForBackgroundCompilationFlags flags,
- BailoutId osr_offset) {
- SerializerForBackgroundCompilation serializer(broker, dependencies, zone,
- closure, flags, osr_offset);
+ ZoneStats* zone_stats, JSHeapBroker* broker,
+ CompilationDependencies* dependencies, Handle<JSFunction> closure,
+ SerializerForBackgroundCompilationFlags flags, BailoutId osr_offset) {
+ SerializerForBackgroundCompilation serializer(
+ zone_stats, broker, dependencies, closure, flags, osr_offset);
serializer.Run();
}
@@ -505,14 +595,19 @@ FunctionBlueprint::FunctionBlueprint(Handle<SharedFunctionInfo> shared,
const Hints& context_hints)
: shared_(shared),
feedback_vector_(feedback_vector),
- context_hints_(context_hints) {}
+ context_hints_(context_hints) {
+ // The checked invariant rules out recursion and thus avoids complexity.
+ CHECK(context_hints_.function_blueprints().IsEmpty());
+}
FunctionBlueprint::FunctionBlueprint(Handle<JSFunction> function,
Isolate* isolate, Zone* zone)
: shared_(handle(function->shared(), isolate)),
- feedback_vector_(handle(function->feedback_vector(), isolate)),
- context_hints_(zone) {
- context_hints_.AddConstant(handle(function->context(), isolate));
+ feedback_vector_(function->feedback_vector(), isolate),
+ context_hints_() {
+ context_hints_.AddConstant(handle(function->context(), isolate), zone);
+ // The checked invariant rules out recursion and thus avoids complexity.
+ CHECK(context_hints_.function_blueprints().IsEmpty());
}
CompilationSubject::CompilationSubject(Handle<JSFunction> closure,
@@ -521,25 +616,11 @@ CompilationSubject::CompilationSubject(Handle<JSFunction> closure,
CHECK(closure->has_feedback_vector());
}
-Hints::Hints(Zone* zone)
- : virtual_contexts_(zone),
- constants_(zone),
- maps_(zone),
- function_blueprints_(zone) {}
-
#ifdef ENABLE_SLOW_DCHECKS
-namespace {
-template <typename K, typename Compare>
-bool SetIncludes(ZoneSet<K, Compare> const& lhs,
- ZoneSet<K, Compare> const& rhs) {
- return std::all_of(rhs.cbegin(), rhs.cend(),
- [&](K const& x) { return lhs.find(x) != lhs.cend(); });
-}
-} // namespace
bool Hints::Includes(Hints const& other) const {
- return SetIncludes(constants(), other.constants()) &&
- SetIncludes(function_blueprints(), other.function_blueprints()) &&
- SetIncludes(maps(), other.maps());
+ return constants().Includes(other.constants()) &&
+ function_blueprints().Includes(other.function_blueprints()) &&
+ maps().Includes(other.maps());
}
bool Hints::Equals(Hints const& other) const {
return this->Includes(other) && other.Includes(*this);
@@ -547,8 +628,8 @@ bool Hints::Equals(Hints const& other) const {
#endif
Hints Hints::SingleConstant(Handle<Object> constant, Zone* zone) {
- Hints result(zone);
- result.AddConstant(constant);
+ Hints result;
+ result.AddConstant(constant, zone);
return result;
}
@@ -564,30 +645,49 @@ const VirtualContextsSet& Hints::virtual_contexts() const {
return virtual_contexts_;
}
-void Hints::AddVirtualContext(VirtualContext virtual_context) {
- virtual_contexts_.insert(virtual_context);
+void Hints::AddVirtualContext(VirtualContext virtual_context, Zone* zone) {
+ virtual_contexts_.Add(virtual_context, zone);
}
-void Hints::AddConstant(Handle<Object> constant) {
- constants_.insert(constant);
+void Hints::AddConstant(Handle<Object> constant, Zone* zone) {
+ constants_.Add(constant, zone);
}
-void Hints::AddMap(Handle<Map> map) { maps_.insert(map); }
+void Hints::AddMap(Handle<Map> map, Zone* zone) { maps_.Add(map, zone); }
+
+void Hints::AddFunctionBlueprint(FunctionBlueprint function_blueprint,
+ Zone* zone) {
+ function_blueprints_.Add(function_blueprint, zone);
+}
-void Hints::AddFunctionBlueprint(FunctionBlueprint function_blueprint) {
- function_blueprints_.insert(function_blueprint);
+void Hints::Add(const Hints& other, Zone* zone) {
+ for (auto x : other.constants()) AddConstant(x, zone);
+ for (auto x : other.maps()) AddMap(x, zone);
+ for (auto x : other.function_blueprints()) AddFunctionBlueprint(x, zone);
+ for (auto x : other.virtual_contexts()) AddVirtualContext(x, zone);
}
-void Hints::Add(const Hints& other) {
- for (auto x : other.constants()) AddConstant(x);
- for (auto x : other.maps()) AddMap(x);
- for (auto x : other.function_blueprints()) AddFunctionBlueprint(x);
- for (auto x : other.virtual_contexts()) AddVirtualContext(x);
+void Hints::AddFromChildSerializer(const Hints& other, Zone* zone) {
+ for (auto x : other.constants()) AddConstant(x, zone);
+ for (auto x : other.maps()) AddMap(x, zone);
+ for (auto x : other.virtual_contexts()) AddVirtualContext(x, zone);
+
+ // Adding hints from a child serializer run means copying data out from
+ // a zone that's being destroyed. FunctionBlueprints have zone allocated
+ // data, so we've got to make a deep copy to eliminate traces of the
+ // dying zone.
+ for (auto x : other.function_blueprints()) {
+ Hints new_blueprint_hints;
+ new_blueprint_hints.AddFromChildSerializer(x.context_hints(), zone);
+ FunctionBlueprint new_blueprint(x.shared(), x.feedback_vector(),
+ new_blueprint_hints);
+ AddFunctionBlueprint(new_blueprint, zone);
+ }
}
bool Hints::IsEmpty() const {
- return constants().empty() && maps().empty() &&
- function_blueprints().empty() && virtual_contexts().empty();
+ return constants().IsEmpty() && maps().IsEmpty() &&
+ function_blueprints().IsEmpty() && virtual_contexts().IsEmpty();
}
std::ostream& operator<<(std::ostream& out,
@@ -625,10 +725,10 @@ std::ostream& operator<<(std::ostream& out, const Hints& hints) {
}
void Hints::Clear() {
- virtual_contexts_.clear();
- constants_.clear();
- maps_.clear();
- function_blueprints_.clear();
+ virtual_contexts_.Clear();
+ constants_.Clear();
+ maps_.Clear();
+ function_blueprints_.Clear();
DCHECK(IsEmpty());
}
@@ -636,7 +736,8 @@ class SerializerForBackgroundCompilation::Environment : public ZoneObject {
public:
Environment(Zone* zone, CompilationSubject function);
Environment(Zone* zone, Isolate* isolate, CompilationSubject function,
- base::Optional<Hints> new_target, const HintsVector& arguments);
+ base::Optional<Hints> new_target, const HintsVector& arguments,
+ MissingArgumentsPolicy padding);
bool IsDead() const { return ephemeral_hints_.empty(); }
@@ -648,7 +749,7 @@ class SerializerForBackgroundCompilation::Environment : public ZoneObject {
void Revive() {
DCHECK(IsDead());
- ephemeral_hints_.resize(ephemeral_hints_size(), Hints(zone()));
+ ephemeral_hints_.resize(ephemeral_hints_size(), Hints());
DCHECK(!IsDead());
}
@@ -691,7 +792,6 @@ class SerializerForBackgroundCompilation::Environment : public ZoneObject {
int RegisterToLocalIndex(interpreter::Register reg) const;
- Zone* zone() const { return zone_; }
int parameter_count() const { return parameter_count_; }
int register_count() const { return register_count_; }
@@ -722,24 +822,25 @@ SerializerForBackgroundCompilation::Environment::Environment(
parameter_count_(
function_.shared()->GetBytecodeArray().parameter_count()),
register_count_(function_.shared()->GetBytecodeArray().register_count()),
- closure_hints_(zone),
- current_context_hints_(zone),
- return_value_hints_(zone),
- ephemeral_hints_(ephemeral_hints_size(), Hints(zone), zone) {
+ closure_hints_(),
+ current_context_hints_(),
+ return_value_hints_(),
+ ephemeral_hints_(ephemeral_hints_size(), Hints(), zone) {
Handle<JSFunction> closure;
if (function.closure().ToHandle(&closure)) {
- closure_hints_.AddConstant(closure);
+ closure_hints_.AddConstant(closure, zone);
} else {
- closure_hints_.AddFunctionBlueprint(function.blueprint());
+ closure_hints_.AddFunctionBlueprint(function.blueprint(), zone);
}
// Consume blueprint context hint information.
- current_context_hints().Add(function.blueprint().context_hints());
+ current_context_hints().Add(function.blueprint().context_hints(), zone);
}
SerializerForBackgroundCompilation::Environment::Environment(
Zone* zone, Isolate* isolate, CompilationSubject function,
- base::Optional<Hints> new_target, const HintsVector& arguments)
+ base::Optional<Hints> new_target, const HintsVector& arguments,
+ MissingArgumentsPolicy padding)
: Environment(zone, function) {
// Copy the hints for the actually passed arguments, at most up to
// the parameter_count.
@@ -748,11 +849,14 @@ SerializerForBackgroundCompilation::Environment::Environment(
ephemeral_hints_[i] = arguments[i];
}
- // Pad the rest with "undefined".
- Hints undefined_hint =
- Hints::SingleConstant(isolate->factory()->undefined_value(), zone);
- for (size_t i = arguments.size(); i < param_count; ++i) {
- ephemeral_hints_[i] = undefined_hint;
+ if (padding == kMissingArgumentsAreUndefined) {
+ Hints undefined_hint =
+ Hints::SingleConstant(isolate->factory()->undefined_value(), zone);
+ for (size_t i = arguments.size(); i < param_count; ++i) {
+ ephemeral_hints_[i] = undefined_hint;
+ }
+ } else {
+ DCHECK_EQ(padding, kMissingArgumentsAreUnknown);
}
interpreter::Register new_target_reg =
@@ -762,7 +866,7 @@ SerializerForBackgroundCompilation::Environment::Environment(
if (new_target_reg.is_valid()) {
DCHECK(register_hints(new_target_reg).IsEmpty());
if (new_target.has_value()) {
- register_hints(new_target_reg).Add(*new_target);
+ register_hints(new_target_reg).Add(*new_target, zone);
}
}
}
@@ -785,10 +889,10 @@ void SerializerForBackgroundCompilation::Environment::Merge(
CHECK_EQ(ephemeral_hints_.size(), other->ephemeral_hints_.size());
for (size_t i = 0; i < ephemeral_hints_.size(); ++i) {
- ephemeral_hints_[i].Add(other->ephemeral_hints_[i]);
+ ephemeral_hints_[i].Add(other->ephemeral_hints_[i], zone_);
}
- return_value_hints_.Add(other->return_value_hints_);
+ return_value_hints_.Add(other->return_value_hints_, zone_);
}
std::ostream& operator<<(
@@ -845,30 +949,33 @@ int SerializerForBackgroundCompilation::Environment::RegisterToLocalIndex(
}
SerializerForBackgroundCompilation::SerializerForBackgroundCompilation(
- JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone,
- Handle<JSFunction> closure, SerializerForBackgroundCompilationFlags flags,
- BailoutId osr_offset)
+ ZoneStats* zone_stats, JSHeapBroker* broker,
+ CompilationDependencies* dependencies, Handle<JSFunction> closure,
+ SerializerForBackgroundCompilationFlags flags, BailoutId osr_offset)
: broker_(broker),
dependencies_(dependencies),
- zone_(zone),
- environment_(new (zone) Environment(
- zone, CompilationSubject(closure, broker_->isolate(), zone))),
- jump_target_environments_(zone),
+ zone_scope_(zone_stats, ZONE_NAME),
+ environment_(new (zone()) Environment(
+ zone(), CompilationSubject(closure, broker_->isolate(), zone()))),
+ jump_target_environments_(zone()),
flags_(flags),
osr_offset_(osr_offset) {
JSFunctionRef(broker, closure).Serialize();
}
SerializerForBackgroundCompilation::SerializerForBackgroundCompilation(
- JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone,
- CompilationSubject function, base::Optional<Hints> new_target,
- const HintsVector& arguments, SerializerForBackgroundCompilationFlags flags)
+ ZoneStats* zone_stats, JSHeapBroker* broker,
+ CompilationDependencies* dependencies, CompilationSubject function,
+ base::Optional<Hints> new_target, const HintsVector& arguments,
+ MissingArgumentsPolicy padding,
+ SerializerForBackgroundCompilationFlags flags)
: broker_(broker),
dependencies_(dependencies),
- zone_(zone),
- environment_(new (zone) Environment(zone, broker_->isolate(), function,
- new_target, arguments)),
- jump_target_environments_(zone),
+ zone_scope_(zone_stats, ZONE_NAME),
+ environment_(new (zone())
+ Environment(zone(), broker_->isolate(), function,
+ new_target, arguments, padding)),
+ jump_target_environments_(zone()),
flags_(flags),
osr_offset_(BailoutId::None()) {
TraceScope tracer(
@@ -902,13 +1009,15 @@ bool SerializerForBackgroundCompilation::BailoutOnUninitialized(
Hints SerializerForBackgroundCompilation::Run() {
TraceScope tracer(broker(), this, "SerializerForBackgroundCompilation::Run");
+ TRACE_BROKER_MEMORY(broker(), "[serializer start] Broker zone usage: "
+ << broker()->zone()->allocation_size());
SharedFunctionInfoRef shared(broker(), environment()->function().shared());
FeedbackVectorRef feedback_vector_ref(broker(), feedback_vector());
if (shared.IsSerializedForCompilation(feedback_vector_ref)) {
TRACE_BROKER(broker(), "Already ran serializer for SharedFunctionInfo "
<< Brief(*shared.object())
<< ", bailing out.\n");
- return Hints(zone());
+ return Hints();
}
shared.SetSerializedForCompilation(feedback_vector_ref);
@@ -923,6 +1032,9 @@ Hints SerializerForBackgroundCompilation::Run() {
feedback_vector_ref.Serialize();
TraverseBytecode();
+
+ TRACE_BROKER_MEMORY(broker(), "[serializer end] Broker zone usage: "
+ << broker()->zone()->allocation_size());
return environment()->return_value_hints();
}
@@ -1036,12 +1148,19 @@ void SerializerForBackgroundCompilation::TraverseBytecode() {
void SerializerForBackgroundCompilation::VisitGetIterator(
BytecodeArrayIterator* iterator) {
- AccessMode mode = AccessMode::kLoad;
Hints const& receiver =
environment()->register_hints(iterator->GetRegisterOperand(0));
Handle<Name> name = broker()->isolate()->factory()->iterator_symbol();
- FeedbackSlot slot = iterator->GetSlotOperand(1);
- ProcessNamedPropertyAccess(receiver, NameRef(broker(), name), slot, mode);
+ FeedbackSlot load_slot = iterator->GetSlotOperand(1);
+ ProcessNamedPropertyAccess(receiver, NameRef(broker(), name), load_slot,
+ AccessMode::kLoad);
+ if (environment()->IsDead()) return;
+
+ const Hints& callee = Hints();
+ FeedbackSlot call_slot = iterator->GetSlotOperand(2);
+ HintsVector parameters({receiver}, zone());
+ ProcessCallOrConstruct(callee, base::nullopt, parameters, call_slot,
+ kMissingArgumentsAreUndefined);
}
void SerializerForBackgroundCompilation::VisitGetSuperConstructor(
@@ -1057,72 +1176,74 @@ void SerializerForBackgroundCompilation::VisitGetSuperConstructor(
map.SerializePrototype();
ObjectRef proto = map.prototype();
if (proto.IsHeapObject() && proto.AsHeapObject().map().is_constructor()) {
- environment()->register_hints(dst).AddConstant(proto.object());
+ environment()->register_hints(dst).AddConstant(proto.object(), zone());
}
}
}
void SerializerForBackgroundCompilation::VisitGetTemplateObject(
BytecodeArrayIterator* iterator) {
- ObjectRef description(
+ TemplateObjectDescriptionRef description(
broker(), iterator->GetConstantForIndexOperand(0, broker()->isolate()));
FeedbackSlot slot = iterator->GetSlotOperand(1);
- FeedbackVectorRef feedback_vector_ref(broker(), feedback_vector());
+ FeedbackSource source(feedback_vector(), slot);
SharedFunctionInfoRef shared(broker(), environment()->function().shared());
- JSArrayRef template_object =
- shared.GetTemplateObject(description, feedback_vector_ref, slot,
- SerializationPolicy::kSerializeIfNeeded);
+ JSArrayRef template_object = shared.GetTemplateObject(
+ description, source, SerializationPolicy::kSerializeIfNeeded);
environment()->accumulator_hints().Clear();
- environment()->accumulator_hints().AddConstant(template_object.object());
+ environment()->accumulator_hints().AddConstant(template_object.object(),
+ zone());
}
void SerializerForBackgroundCompilation::VisitLdaTrue(
BytecodeArrayIterator* iterator) {
environment()->accumulator_hints().Clear();
environment()->accumulator_hints().AddConstant(
- broker()->isolate()->factory()->true_value());
+ broker()->isolate()->factory()->true_value(), zone());
}
void SerializerForBackgroundCompilation::VisitLdaFalse(
BytecodeArrayIterator* iterator) {
environment()->accumulator_hints().Clear();
environment()->accumulator_hints().AddConstant(
- broker()->isolate()->factory()->false_value());
+ broker()->isolate()->factory()->false_value(), zone());
}
void SerializerForBackgroundCompilation::VisitLdaTheHole(
BytecodeArrayIterator* iterator) {
environment()->accumulator_hints().Clear();
environment()->accumulator_hints().AddConstant(
- broker()->isolate()->factory()->the_hole_value());
+ broker()->isolate()->factory()->the_hole_value(), zone());
}
void SerializerForBackgroundCompilation::VisitLdaUndefined(
BytecodeArrayIterator* iterator) {
environment()->accumulator_hints().Clear();
environment()->accumulator_hints().AddConstant(
- broker()->isolate()->factory()->undefined_value());
+ broker()->isolate()->factory()->undefined_value(), zone());
}
void SerializerForBackgroundCompilation::VisitLdaNull(
BytecodeArrayIterator* iterator) {
environment()->accumulator_hints().Clear();
environment()->accumulator_hints().AddConstant(
- broker()->isolate()->factory()->null_value());
+ broker()->isolate()->factory()->null_value(), zone());
}
void SerializerForBackgroundCompilation::VisitLdaZero(
BytecodeArrayIterator* iterator) {
environment()->accumulator_hints().Clear();
environment()->accumulator_hints().AddConstant(
- handle(Smi::FromInt(0), broker()->isolate()));
+ handle(Smi::FromInt(0), broker()->isolate()), zone());
}
void SerializerForBackgroundCompilation::VisitLdaSmi(
BytecodeArrayIterator* iterator) {
environment()->accumulator_hints().Clear();
- environment()->accumulator_hints().AddConstant(handle(
- Smi::FromInt(iterator->GetImmediateOperand(0)), broker()->isolate()));
+ environment()->accumulator_hints().AddConstant(
+ handle(Smi::FromInt(iterator->GetImmediateOperand(0)),
+ broker()->isolate()),
+ zone());
}
void SerializerForBackgroundCompilation::VisitInvokeIntrinsic(
@@ -1215,7 +1336,7 @@ void SerializerForBackgroundCompilation::VisitLdaConstant(
ObjectRef object(
broker(), iterator->GetConstantForIndexOperand(0, broker()->isolate()));
environment()->accumulator_hints().Clear();
- environment()->accumulator_hints().AddConstant(object.object());
+ environment()->accumulator_hints().AddConstant(object.object(), zone());
}
void SerializerForBackgroundCompilation::VisitPushContext(
@@ -1225,12 +1346,12 @@ void SerializerForBackgroundCompilation::VisitPushContext(
Hints& saved_context_hints =
environment()->register_hints(iterator->GetRegisterOperand(0));
saved_context_hints.Clear();
- saved_context_hints.Add(current_context_hints);
+ saved_context_hints.Add(current_context_hints, zone());
// New context is in the accumulator. Put those hints into the current context
// register hints.
current_context_hints.Clear();
- current_context_hints.Add(environment()->accumulator_hints());
+ current_context_hints.Add(environment()->accumulator_hints(), zone());
}
void SerializerForBackgroundCompilation::VisitPopContext(
@@ -1239,7 +1360,7 @@ void SerializerForBackgroundCompilation::VisitPopContext(
Hints& new_context_hints =
environment()->register_hints(iterator->GetRegisterOperand(0));
environment()->current_context_hints().Clear();
- environment()->current_context_hints().Add(new_context_hints);
+ environment()->current_context_hints().Add(new_context_hints, zone());
}
void SerializerForBackgroundCompilation::ProcessImmutableLoad(
@@ -1251,7 +1372,7 @@ void SerializerForBackgroundCompilation::ProcessImmutableLoad(
// If requested, record the object as a hint for the result value.
if (result_hints != nullptr && slot_value.has_value()) {
- result_hints->AddConstant(slot_value.value().object());
+ result_hints->AddConstant(slot_value.value().object(), zone());
}
}
@@ -1294,11 +1415,11 @@ void SerializerForBackgroundCompilation::VisitLdaContextSlot(
environment()->register_hints(iterator->GetRegisterOperand(0));
const int slot = iterator->GetIndexOperand(1);
const int depth = iterator->GetUnsignedImmediateOperand(2);
- Hints new_accumulator_hints(zone());
+ Hints new_accumulator_hints;
ProcessContextAccess(context_hints, slot, depth, kIgnoreSlot,
&new_accumulator_hints);
environment()->accumulator_hints().Clear();
- environment()->accumulator_hints().Add(new_accumulator_hints);
+ environment()->accumulator_hints().Add(new_accumulator_hints, zone());
}
void SerializerForBackgroundCompilation::VisitLdaCurrentContextSlot(
@@ -1306,11 +1427,11 @@ void SerializerForBackgroundCompilation::VisitLdaCurrentContextSlot(
const int slot = iterator->GetIndexOperand(0);
const int depth = 0;
Hints const& context_hints = environment()->current_context_hints();
- Hints new_accumulator_hints(zone());
+ Hints new_accumulator_hints;
ProcessContextAccess(context_hints, slot, depth, kIgnoreSlot,
&new_accumulator_hints);
environment()->accumulator_hints().Clear();
- environment()->accumulator_hints().Add(new_accumulator_hints);
+ environment()->accumulator_hints().Add(new_accumulator_hints, zone());
}
void SerializerForBackgroundCompilation::VisitLdaImmutableContextSlot(
@@ -1319,11 +1440,11 @@ void SerializerForBackgroundCompilation::VisitLdaImmutableContextSlot(
const int depth = iterator->GetUnsignedImmediateOperand(2);
Hints const& context_hints =
environment()->register_hints(iterator->GetRegisterOperand(0));
- Hints new_accumulator_hints(zone());
+ Hints new_accumulator_hints;
ProcessContextAccess(context_hints, slot, depth, kSerializeSlot,
&new_accumulator_hints);
environment()->accumulator_hints().Clear();
- environment()->accumulator_hints().Add(new_accumulator_hints);
+ environment()->accumulator_hints().Add(new_accumulator_hints, zone());
}
void SerializerForBackgroundCompilation::VisitLdaImmutableCurrentContextSlot(
@@ -1331,11 +1452,11 @@ void SerializerForBackgroundCompilation::VisitLdaImmutableCurrentContextSlot(
const int slot = iterator->GetIndexOperand(0);
const int depth = 0;
Hints const& context_hints = environment()->current_context_hints();
- Hints new_accumulator_hints(zone());
+ Hints new_accumulator_hints;
ProcessContextAccess(context_hints, slot, depth, kSerializeSlot,
&new_accumulator_hints);
environment()->accumulator_hints().Clear();
- environment()->accumulator_hints().Add(new_accumulator_hints);
+ environment()->accumulator_hints().Add(new_accumulator_hints, zone());
}
void SerializerForBackgroundCompilation::ProcessModuleVariableAccess(
@@ -1344,7 +1465,7 @@ void SerializerForBackgroundCompilation::ProcessModuleVariableAccess(
const int depth = iterator->GetUnsignedImmediateOperand(1);
Hints const& context_hints = environment()->current_context_hints();
- Hints result_hints(zone());
+ Hints result_hints;
ProcessContextAccess(context_hints, slot, depth, kSerializeSlot,
&result_hints);
for (Handle<Object> constant : result_hints.constants()) {
@@ -1392,14 +1513,15 @@ void SerializerForBackgroundCompilation::VisitLdar(
BytecodeArrayIterator* iterator) {
environment()->accumulator_hints().Clear();
environment()->accumulator_hints().Add(
- environment()->register_hints(iterator->GetRegisterOperand(0)));
+ environment()->register_hints(iterator->GetRegisterOperand(0)), zone());
}
void SerializerForBackgroundCompilation::VisitStar(
BytecodeArrayIterator* iterator) {
interpreter::Register reg = iterator->GetRegisterOperand(0);
environment()->register_hints(reg).Clear();
- environment()->register_hints(reg).Add(environment()->accumulator_hints());
+ environment()->register_hints(reg).Add(environment()->accumulator_hints(),
+ zone());
}
void SerializerForBackgroundCompilation::VisitMov(
@@ -1407,7 +1529,8 @@ void SerializerForBackgroundCompilation::VisitMov(
interpreter::Register src = iterator->GetRegisterOperand(0);
interpreter::Register dst = iterator->GetRegisterOperand(1);
environment()->register_hints(dst).Clear();
- environment()->register_hints(dst).Add(environment()->register_hints(src));
+ environment()->register_hints(dst).Add(environment()->register_hints(src),
+ zone());
}
void SerializerForBackgroundCompilation::VisitCreateRegExpLiteral(
@@ -1415,6 +1538,9 @@ void SerializerForBackgroundCompilation::VisitCreateRegExpLiteral(
Handle<String> constant_pattern = Handle<String>::cast(
iterator->GetConstantForIndexOperand(0, broker()->isolate()));
StringRef description(broker(), constant_pattern);
+ FeedbackSlot slot = iterator->GetSlotOperand(1);
+ FeedbackSource source(feedback_vector(), slot);
+ broker()->ProcessFeedbackForRegExpLiteral(source);
environment()->accumulator_hints().Clear();
}
@@ -1425,6 +1551,17 @@ void SerializerForBackgroundCompilation::VisitCreateArrayLiteral(
iterator->GetConstantForIndexOperand(0, broker()->isolate()));
ArrayBoilerplateDescriptionRef description(broker(),
array_boilerplate_description);
+ FeedbackSlot slot = iterator->GetSlotOperand(1);
+ FeedbackSource source(feedback_vector(), slot);
+ broker()->ProcessFeedbackForArrayOrObjectLiteral(source);
+ environment()->accumulator_hints().Clear();
+}
+
+void SerializerForBackgroundCompilation::VisitCreateEmptyArrayLiteral(
+ BytecodeArrayIterator* iterator) {
+ FeedbackSlot slot = iterator->GetSlotOperand(0);
+ FeedbackSource source(feedback_vector(), slot);
+ broker()->ProcessFeedbackForArrayOrObjectLiteral(source);
environment()->accumulator_hints().Clear();
}
@@ -1434,6 +1571,9 @@ void SerializerForBackgroundCompilation::VisitCreateObjectLiteral(
Handle<ObjectBoilerplateDescription>::cast(
iterator->GetConstantForIndexOperand(0, broker()->isolate()));
ObjectBoilerplateDescriptionRef description(broker(), constant_properties);
+ FeedbackSlot slot = iterator->GetSlotOperand(1);
+ FeedbackSource source(feedback_vector(), slot);
+ broker()->ProcessFeedbackForArrayOrObjectLiteral(source);
environment()->accumulator_hints().Clear();
}
@@ -1490,7 +1630,8 @@ void SerializerForBackgroundCompilation::ProcessCreateContext(
for (auto x : current_context_hints.constants()) {
if (x->IsContext()) {
Handle<Context> as_context(Handle<Context>::cast(x));
- accumulator_hints.AddVirtualContext(VirtualContext(1, as_context));
+ accumulator_hints.AddVirtualContext(VirtualContext(1, as_context),
+ zone());
}
}
@@ -1498,7 +1639,7 @@ void SerializerForBackgroundCompilation::ProcessCreateContext(
// it of distance {existing distance} + 1.
for (auto x : current_context_hints.virtual_contexts()) {
accumulator_hints.AddVirtualContext(
- VirtualContext(x.distance + 1, x.context));
+ VirtualContext(x.distance + 1, x.context), zone());
}
}
@@ -1518,7 +1659,7 @@ void SerializerForBackgroundCompilation::VisitCreateClosure(
FunctionBlueprint blueprint(shared,
Handle<FeedbackVector>::cast(cell_value),
environment()->current_context_hints());
- environment()->accumulator_hints().AddFunctionBlueprint(blueprint);
+ environment()->accumulator_hints().AddFunctionBlueprint(blueprint, zone());
}
}
@@ -1542,7 +1683,8 @@ void SerializerForBackgroundCompilation::VisitCallUndefinedReceiver0(
Hints receiver = Hints::SingleConstant(
broker()->isolate()->factory()->undefined_value(), zone());
HintsVector parameters({receiver}, zone());
- ProcessCallOrConstruct(callee, base::nullopt, parameters, slot);
+ ProcessCallOrConstruct(callee, base::nullopt, parameters, slot,
+ kMissingArgumentsAreUndefined);
}
void SerializerForBackgroundCompilation::VisitCallUndefinedReceiver1(
@@ -1556,7 +1698,8 @@ void SerializerForBackgroundCompilation::VisitCallUndefinedReceiver1(
Hints receiver = Hints::SingleConstant(
broker()->isolate()->factory()->undefined_value(), zone());
HintsVector parameters({receiver, arg0}, zone());
- ProcessCallOrConstruct(callee, base::nullopt, parameters, slot);
+ ProcessCallOrConstruct(callee, base::nullopt, parameters, slot,
+ kMissingArgumentsAreUndefined);
}
void SerializerForBackgroundCompilation::VisitCallUndefinedReceiver2(
@@ -1572,7 +1715,8 @@ void SerializerForBackgroundCompilation::VisitCallUndefinedReceiver2(
Hints receiver = Hints::SingleConstant(
broker()->isolate()->factory()->undefined_value(), zone());
HintsVector parameters({receiver, arg0, arg1}, zone());
- ProcessCallOrConstruct(callee, base::nullopt, parameters, slot);
+ ProcessCallOrConstruct(callee, base::nullopt, parameters, slot,
+ kMissingArgumentsAreUndefined);
}
void SerializerForBackgroundCompilation::VisitCallAnyReceiver(
@@ -1616,7 +1760,8 @@ void SerializerForBackgroundCompilation::VisitCallProperty0(
FeedbackSlot slot = iterator->GetSlotOperand(2);
HintsVector parameters({receiver}, zone());
- ProcessCallOrConstruct(callee, base::nullopt, parameters, slot);
+ ProcessCallOrConstruct(callee, base::nullopt, parameters, slot,
+ kMissingArgumentsAreUndefined);
}
void SerializerForBackgroundCompilation::VisitCallProperty1(
@@ -1630,7 +1775,8 @@ void SerializerForBackgroundCompilation::VisitCallProperty1(
FeedbackSlot slot = iterator->GetSlotOperand(3);
HintsVector parameters({receiver, arg0}, zone());
- ProcessCallOrConstruct(callee, base::nullopt, parameters, slot);
+ ProcessCallOrConstruct(callee, base::nullopt, parameters, slot,
+ kMissingArgumentsAreUndefined);
}
void SerializerForBackgroundCompilation::VisitCallProperty2(
@@ -1646,7 +1792,8 @@ void SerializerForBackgroundCompilation::VisitCallProperty2(
FeedbackSlot slot = iterator->GetSlotOperand(4);
HintsVector parameters({receiver, arg0, arg1}, zone());
- ProcessCallOrConstruct(callee, base::nullopt, parameters, slot);
+ ProcessCallOrConstruct(callee, base::nullopt, parameters, slot,
+ kMissingArgumentsAreUndefined);
}
void SerializerForBackgroundCompilation::VisitCallWithSpread(
@@ -1657,7 +1804,7 @@ void SerializerForBackgroundCompilation::VisitCallWithSpread(
int reg_count = static_cast<int>(iterator->GetRegisterCountOperand(2));
FeedbackSlot slot = iterator->GetSlotOperand(3);
ProcessCallVarArgs(ConvertReceiverMode::kAny, callee, first_reg, reg_count,
- slot, true);
+ slot, kMissingArgumentsAreUnknown);
}
void SerializerForBackgroundCompilation::VisitCallJSRuntime(
@@ -1677,61 +1824,45 @@ void SerializerForBackgroundCompilation::VisitCallJSRuntime(
Hints SerializerForBackgroundCompilation::RunChildSerializer(
CompilationSubject function, base::Optional<Hints> new_target,
- const HintsVector& arguments, bool with_spread) {
- if (with_spread) {
- DCHECK_LT(0, arguments.size());
- // Pad the missing arguments in case we were called with spread operator.
- // Drop the last actually passed argument, which contains the spread.
- // We don't know what the spread element produces. Therefore we pretend
- // that the function is called with the maximal number of parameters and
- // that we have no information about the parameters that were not
- // explicitly provided.
- HintsVector padded = arguments;
- padded.pop_back(); // Remove the spread element.
- // Fill the rest with empty hints.
- padded.resize(
- function.blueprint().shared()->GetBytecodeArray().parameter_count(),
- Hints(zone()));
- return RunChildSerializer(function, new_target, padded, false);
- }
-
+ const HintsVector& arguments, MissingArgumentsPolicy padding) {
SerializerForBackgroundCompilation child_serializer(
- broker(), dependencies(), zone(), function, new_target, arguments,
- flags());
- return child_serializer.Run();
-}
-
-bool SerializerForBackgroundCompilation::ProcessSFIForCallOrConstruct(
- Handle<SharedFunctionInfo> shared, const HintsVector& arguments,
- SpeculationMode speculation_mode) {
+ zone_scope_.zone_stats(), broker(), dependencies(), function, new_target,
+ arguments, padding, flags());
+ // The Hints returned by the call to Run are allocated in the zone
+ // created by the child serializer. Adding those hints to a hints
+ // object created in our zone will preserve the information.
+ Hints hints;
+ hints.AddFromChildSerializer(child_serializer.Run(), zone());
+ return hints;
+}
+
+void SerializerForBackgroundCompilation::ProcessSFIForCallOrConstruct(
+ Callee const& callee, base::Optional<Hints> new_target,
+ const HintsVector& arguments, SpeculationMode speculation_mode,
+ MissingArgumentsPolicy padding) {
+ Handle<SharedFunctionInfo> shared = callee.shared(broker()->isolate());
if (shared->IsApiFunction()) {
ProcessApiCall(shared, arguments);
DCHECK(!shared->IsInlineable());
} else if (shared->HasBuiltinId()) {
- ProcessBuiltinCall(shared, arguments, speculation_mode);
+ ProcessBuiltinCall(shared, new_target, arguments, speculation_mode,
+ padding);
DCHECK(!shared->IsInlineable());
+ } else if (shared->IsInlineable() && callee.HasFeedbackVector()) {
+ CompilationSubject subject =
+ callee.ToCompilationSubject(broker()->isolate(), zone());
+ environment()->accumulator_hints().Add(
+ RunChildSerializer(subject, new_target, arguments, padding), zone());
}
- return shared->IsInlineable();
-}
-
-bool SerializerForBackgroundCompilation::ProcessCalleeForCallOrConstruct(
- Handle<JSFunction> function, const HintsVector& arguments,
- SpeculationMode speculation_mode) {
- JSFunctionRef(broker(), function).Serialize();
-
- Handle<SharedFunctionInfo> shared(function->shared(), broker()->isolate());
-
- return ProcessSFIForCallOrConstruct(shared, arguments, speculation_mode) &&
- function->has_feedback_vector();
}
namespace {
-// Returns the innermost bound target, if it's a JSFunction and inserts
-// all bound arguments and {original_arguments} into {expanded_arguments}
-// in the appropriate order.
-MaybeHandle<JSFunction> UnrollBoundFunction(
- JSBoundFunctionRef const& bound_function, JSHeapBroker* broker,
- const HintsVector& original_arguments, HintsVector* expanded_arguments) {
+// Returns the innermost bound target and inserts all bound arguments and
+// {original_arguments} into {expanded_arguments} in the appropriate order.
+JSReceiverRef UnrollBoundFunction(JSBoundFunctionRef const& bound_function,
+ JSHeapBroker* broker,
+ const HintsVector& original_arguments,
+ HintsVector* expanded_arguments) {
DCHECK(expanded_arguments->empty());
JSReceiverRef target = bound_function.AsJSReceiver();
@@ -1750,8 +1881,6 @@ MaybeHandle<JSFunction> UnrollBoundFunction(
reversed_bound_arguments.push_back(arg);
}
- if (!target.IsJSFunction()) return MaybeHandle<JSFunction>();
-
expanded_arguments->insert(expanded_arguments->end(),
reversed_bound_arguments.rbegin(),
reversed_bound_arguments.rend());
@@ -1759,13 +1888,38 @@ MaybeHandle<JSFunction> UnrollBoundFunction(
original_arguments.begin(),
original_arguments.end());
- return target.AsJSFunction().object();
+ return target;
}
} // namespace
+void SerializerForBackgroundCompilation::ProcessCalleeForCallOrConstruct(
+ Handle<Object> callee, base::Optional<Hints> new_target,
+ const HintsVector& arguments, SpeculationMode speculation_mode,
+ MissingArgumentsPolicy padding) {
+ const HintsVector* actual_arguments = &arguments;
+ HintsVector expanded_arguments(zone());
+ if (callee->IsJSBoundFunction()) {
+ JSBoundFunctionRef bound_function(broker(),
+ Handle<JSBoundFunction>::cast(callee));
+ bound_function.Serialize();
+ callee = UnrollBoundFunction(bound_function, broker(), arguments,
+ &expanded_arguments)
+ .object();
+ actual_arguments = &expanded_arguments;
+ }
+ if (!callee->IsJSFunction()) return;
+
+ JSFunctionRef function(broker(), Handle<JSFunction>::cast(callee));
+ function.Serialize();
+ Callee new_callee(function.object());
+ ProcessSFIForCallOrConstruct(new_callee, new_target, *actual_arguments,
+ speculation_mode, padding);
+}
+
void SerializerForBackgroundCompilation::ProcessCallOrConstruct(
Hints callee, base::Optional<Hints> new_target,
- const HintsVector& arguments, FeedbackSlot slot, bool with_spread) {
+ const HintsVector& arguments, FeedbackSlot slot,
+ MissingArgumentsPolicy padding) {
SpeculationMode speculation_mode = SpeculationMode::kDisallowSpeculation;
if (!slot.IsInvalid()) {
FeedbackSource source(feedback_vector(), slot);
@@ -1782,11 +1936,11 @@ void SerializerForBackgroundCompilation::ProcessCallOrConstruct(
// site, and it may make sense to add the Array JSFunction constant.
if (new_target.has_value()) {
// Construct; feedback is new_target, which often is also the callee.
- new_target->AddConstant(target->object());
- callee.AddConstant(target->object());
+ new_target->AddConstant(target->object(), zone());
+ callee.AddConstant(target->object(), zone());
} else {
// Call; target is callee.
- callee.AddConstant(target->object());
+ callee.AddConstant(target->object(), zone());
}
}
}
@@ -1795,50 +1949,22 @@ void SerializerForBackgroundCompilation::ProcessCallOrConstruct(
environment()->accumulator_hints().Clear();
// For JSCallReducer::ReduceJSCall and JSCallReducer::ReduceJSConstruct.
- for (auto hint : callee.constants()) {
- const HintsVector* actual_arguments = &arguments;
- Handle<JSFunction> function;
- HintsVector expanded_arguments(zone());
- if (hint->IsJSBoundFunction()) {
- JSBoundFunctionRef bound_function(broker(),
- Handle<JSBoundFunction>::cast(hint));
- bound_function.Serialize();
-
- MaybeHandle<JSFunction> maybe_function = UnrollBoundFunction(
- bound_function, broker(), arguments, &expanded_arguments);
- if (maybe_function.is_null()) continue;
- function = maybe_function.ToHandleChecked();
- actual_arguments = &expanded_arguments;
- } else if (hint->IsJSFunction()) {
- function = Handle<JSFunction>::cast(hint);
- } else {
- continue;
- }
-
- if (ProcessCalleeForCallOrConstruct(function, *actual_arguments,
- speculation_mode)) {
- environment()->accumulator_hints().Add(RunChildSerializer(
- CompilationSubject(function, broker()->isolate(), zone()), new_target,
- *actual_arguments, with_spread));
- }
+ for (auto constant : callee.constants()) {
+ ProcessCalleeForCallOrConstruct(constant, new_target, arguments,
+ speculation_mode, padding);
}
// For JSCallReducer::ReduceJSCall and JSCallReducer::ReduceJSConstruct.
for (auto hint : callee.function_blueprints()) {
- Handle<SharedFunctionInfo> shared = hint.shared();
- if (!ProcessSFIForCallOrConstruct(shared, arguments, speculation_mode)) {
- continue;
- }
-
- environment()->accumulator_hints().Add(RunChildSerializer(
- CompilationSubject(hint), new_target, arguments, with_spread));
+ ProcessSFIForCallOrConstruct(Callee(hint), new_target, arguments,
+ speculation_mode, padding);
}
}
void SerializerForBackgroundCompilation::ProcessCallVarArgs(
ConvertReceiverMode receiver_mode, Hints const& callee,
interpreter::Register first_reg, int reg_count, FeedbackSlot slot,
- bool with_spread) {
+ MissingArgumentsPolicy padding) {
HintsVector arguments(zone());
// The receiver is either given in the first register or it is implicitly
// the {undefined} value.
@@ -1848,7 +1974,7 @@ void SerializerForBackgroundCompilation::ProcessCallVarArgs(
}
environment()->ExportRegisterHints(first_reg, reg_count, &arguments);
- ProcessCallOrConstruct(callee, base::nullopt, arguments, slot);
+ ProcessCallOrConstruct(callee, base::nullopt, arguments, slot, padding);
}
void SerializerForBackgroundCompilation::ProcessApiCall(
@@ -1866,17 +1992,17 @@ void SerializerForBackgroundCompilation::ProcessApiCall(
FunctionTemplateInfoRef target_template_info(
broker(), handle(target->function_data(), broker()->isolate()));
if (!target_template_info.has_call_code()) return;
-
target_template_info.SerializeCallCode();
SharedFunctionInfoRef target_ref(broker(), target);
target_ref.SerializeFunctionTemplateInfo();
if (target_template_info.accept_any_receiver() &&
- target_template_info.is_signature_undefined())
+ target_template_info.is_signature_undefined()) {
return;
+ }
- CHECK_GE(arguments.size(), 1);
+ if (arguments.empty()) return;
Hints const& receiver_hints = arguments[0];
for (auto hint : receiver_hints.constants()) {
if (hint->IsUndefined()) {
@@ -1920,8 +2046,9 @@ void SerializerForBackgroundCompilation::ProcessHintsForObjectCreate(
}
void SerializerForBackgroundCompilation::ProcessBuiltinCall(
- Handle<SharedFunctionInfo> target, const HintsVector& arguments,
- SpeculationMode speculation_mode) {
+ Handle<SharedFunctionInfo> target, base::Optional<Hints> new_target,
+ const HintsVector& arguments, SpeculationMode speculation_mode,
+ MissingArgumentsPolicy padding) {
DCHECK(target->HasBuiltinId());
const int builtin_id = target->builtin_id();
const char* name = Builtins::name(builtin_id);
@@ -1963,20 +2090,31 @@ void SerializerForBackgroundCompilation::ProcessBuiltinCall(
case Builtins::kPromiseResolveTrampoline:
// For JSCallReducer::ReducePromiseInternalResolve and
// JSNativeContextSpecialization::ReduceJSResolvePromise.
- if (arguments.size() >= 2) {
- Hints const& resolution_hints = arguments[1];
+ if (arguments.size() >= 1) {
+ Hints const& resolution_hints =
+ arguments.size() >= 2
+ ? arguments[1]
+ : Hints::SingleConstant(
+ broker()->isolate()->factory()->undefined_value(),
+ zone());
ProcessHintsForPromiseResolve(resolution_hints);
}
break;
case Builtins::kPromiseInternalResolve:
// For JSCallReducer::ReducePromiseInternalResolve and
// JSNativeContextSpecialization::ReduceJSResolvePromise.
- if (arguments.size() >= 3) {
- Hints const& resolution_hints = arguments[2];
+ if (arguments.size() >= 2) {
+ Hints const& resolution_hints =
+ arguments.size() >= 3
+ ? arguments[2]
+ : Hints::SingleConstant(
+ broker()->isolate()->factory()->undefined_value(),
+ zone());
ProcessHintsForPromiseResolve(resolution_hints);
}
break;
case Builtins::kRegExpPrototypeTest:
+ case Builtins::kRegExpPrototypeTestFast:
// For JSCallReducer::ReduceRegExpPrototypeTest.
if (arguments.size() >= 1 &&
speculation_mode != SpeculationMode::kDisallowSpeculation) {
@@ -1990,35 +2128,105 @@ void SerializerForBackgroundCompilation::ProcessBuiltinCall(
case Builtins::kArrayPrototypeFind:
case Builtins::kArrayPrototypeFindIndex:
case Builtins::kArrayMap:
+ case Builtins::kArraySome:
+ if (arguments.size() >= 2 &&
+ speculation_mode != SpeculationMode::kDisallowSpeculation) {
+ Hints const& callback = arguments[1];
+ // "Call(callbackfn, T, « kValue, k, O »)"
+ HintsVector new_arguments(zone());
+ new_arguments.push_back(
+ arguments.size() < 3
+ ? Hints::SingleConstant(
+ broker()->isolate()->factory()->undefined_value(), zone())
+ : arguments[2]); // T
+ new_arguments.push_back(Hints()); // kValue
+ new_arguments.push_back(Hints()); // k
+ new_arguments.push_back(arguments[0]); // O
+ for (auto constant : callback.constants()) {
+ ProcessCalleeForCallOrConstruct(constant, base::nullopt,
+ new_arguments,
+ SpeculationMode::kDisallowSpeculation,
+ kMissingArgumentsAreUndefined);
+ }
+ }
+ break;
case Builtins::kArrayReduce:
case Builtins::kArrayReduceRight:
- case Builtins::kArraySome:
if (arguments.size() >= 2 &&
speculation_mode != SpeculationMode::kDisallowSpeculation) {
- Hints const& callback_hints = arguments[1];
- ProcessHintsForFunctionCall(callback_hints);
+ Hints const& callback = arguments[1];
+ // "Call(callbackfn, undefined, « accumulator, kValue, k, O »)"
+ HintsVector new_arguments(zone());
+ new_arguments.push_back(Hints::SingleConstant(
+ broker()->isolate()->factory()->undefined_value(), zone()));
+ new_arguments.push_back(Hints()); // accumulator
+ new_arguments.push_back(Hints()); // kValue
+ new_arguments.push_back(Hints()); // k
+ new_arguments.push_back(arguments[0]); // O
+ for (auto constant : callback.constants()) {
+ ProcessCalleeForCallOrConstruct(constant, base::nullopt,
+ new_arguments,
+ SpeculationMode::kDisallowSpeculation,
+ kMissingArgumentsAreUndefined);
+ }
}
break;
+ // TODO(neis): At least for Array* we should look at blueprints too.
+ // TODO(neis): Might need something like a FunctionBlueprint but for
+ // creating bound functions rather than creating closures.
case Builtins::kFunctionPrototypeApply:
- case Builtins::kFunctionPrototypeCall:
+ if (arguments.size() >= 1) {
+ // Drop hints for all arguments except the user-given receiver.
+ Hints new_receiver =
+ arguments.size() >= 2
+ ? arguments[1]
+ : Hints::SingleConstant(
+ broker()->isolate()->factory()->undefined_value(),
+ zone());
+ HintsVector new_arguments({new_receiver}, zone());
+ for (auto constant : arguments[0].constants()) {
+ ProcessCalleeForCallOrConstruct(constant, base::nullopt,
+ new_arguments,
+ SpeculationMode::kDisallowSpeculation,
+ kMissingArgumentsAreUnknown);
+ }
+ }
+ break;
case Builtins::kPromiseConstructor:
- // TODO(mslekova): Since the reducer for all these introduce a
- // JSCall/JSConstruct that will again get optimized by the JSCallReducer,
- // we basically might have to do all the serialization that we do for that
- // here as well. The only difference is that the new JSCall/JSConstruct
- // has speculation disabled, causing the JSCallReducer to do much less
- // work. To account for that, ProcessCallOrConstruct should have a way of
- // taking the speculation mode as an argument rather than getting that
- // from the feedback. (Also applies to Reflect.apply and
- // Reflect.construct.)
if (arguments.size() >= 1) {
- ProcessHintsForFunctionCall(arguments[0]);
+ // "Call(executor, undefined, « resolvingFunctions.[[Resolve]],
+ // resolvingFunctions.[[Reject]] »)"
+ HintsVector new_arguments(
+ {Hints::SingleConstant(
+ broker()->isolate()->factory()->undefined_value(), zone())},
+ zone());
+ for (auto constant : arguments[0].constants()) {
+ ProcessCalleeForCallOrConstruct(constant, base::nullopt,
+ new_arguments,
+ SpeculationMode::kDisallowSpeculation,
+ kMissingArgumentsAreUnknown);
+ }
+ }
+ break;
+ case Builtins::kFunctionPrototypeCall:
+ if (arguments.size() >= 1) {
+ HintsVector new_arguments(arguments.begin() + 1, arguments.end(),
+ zone());
+ for (auto constant : arguments[0].constants()) {
+ ProcessCalleeForCallOrConstruct(
+ constant, base::nullopt, new_arguments,
+ SpeculationMode::kDisallowSpeculation, padding);
+ }
}
break;
case Builtins::kReflectApply:
case Builtins::kReflectConstruct:
if (arguments.size() >= 2) {
- ProcessHintsForFunctionCall(arguments[1]);
+ for (auto constant : arguments[1].constants()) {
+ if (constant->IsJSFunction()) {
+ JSFunctionRef(broker(), constant).Serialize();
+ }
+ }
}
break;
case Builtins::kObjectPrototypeIsPrototypeOf:
@@ -2181,13 +2389,6 @@ void SerializerForBackgroundCompilation::ProcessHintsForRegExpTest(
}
}
-void SerializerForBackgroundCompilation::ProcessHintsForFunctionCall(
- Hints const& target_hints) {
- for (auto constant : target_hints.constants()) {
- if (constant->IsJSFunction()) JSFunctionRef(broker(), constant).Serialize();
- }
-}
-
namespace {
void ProcessMapForFunctionBind(MapRef map) {
map.SerializePrototype();
@@ -2195,8 +2396,9 @@ void ProcessMapForFunctionBind(MapRef map) {
JSFunction::kNameDescriptorIndex) +
1;
if (map.NumberOfOwnDescriptors() >= min_nof_descriptors) {
- map.SerializeOwnDescriptor(JSFunction::kLengthDescriptorIndex);
- map.SerializeOwnDescriptor(JSFunction::kNameDescriptorIndex);
+ map.SerializeOwnDescriptor(
+ InternalIndex(JSFunction::kLengthDescriptorIndex));
+ map.SerializeOwnDescriptor(InternalIndex(JSFunction::kNameDescriptorIndex));
}
}
} // namespace
@@ -2261,7 +2463,8 @@ void SerializerForBackgroundCompilation::ProcessJump(
void SerializerForBackgroundCompilation::VisitReturn(
BytecodeArrayIterator* iterator) {
- environment()->return_value_hints().Add(environment()->accumulator_hints());
+ environment()->return_value_hints().Add(environment()->accumulator_hints(),
+ zone());
environment()->ClearEphemeralHints();
}
@@ -2301,7 +2504,8 @@ void SerializerForBackgroundCompilation::VisitConstruct(
HintsVector arguments(zone());
environment()->ExportRegisterHints(first_reg, reg_count, &arguments);
- ProcessCallOrConstruct(callee, new_target, arguments, slot);
+ ProcessCallOrConstruct(callee, new_target, arguments, slot,
+ kMissingArgumentsAreUndefined);
}
void SerializerForBackgroundCompilation::VisitConstructWithSpread(
@@ -2315,8 +2519,10 @@ void SerializerForBackgroundCompilation::VisitConstructWithSpread(
HintsVector arguments(zone());
environment()->ExportRegisterHints(first_reg, reg_count, &arguments);
-
- ProcessCallOrConstruct(callee, new_target, arguments, slot, true);
+ DCHECK(!arguments.empty());
+ arguments.pop_back(); // Remove the spread element.
+ ProcessCallOrConstruct(callee, new_target, arguments, slot,
+ kMissingArgumentsAreUnknown);
}
void SerializerForBackgroundCompilation::ProcessGlobalAccess(FeedbackSlot slot,
@@ -2333,7 +2539,7 @@ void SerializerForBackgroundCompilation::ProcessGlobalAccess(FeedbackSlot slot,
base::Optional<ObjectRef> value =
feedback.AsGlobalAccess().GetConstantHint();
if (value.has_value()) {
- environment()->accumulator_hints().AddConstant(value->object());
+ environment()->accumulator_hints().AddConstant(value->object(), zone());
}
} else {
DCHECK(feedback.IsInsufficient());
@@ -2480,9 +2686,16 @@ SerializerForBackgroundCompilation::ProcessMapForNamedPropertyAccess(
receiver_map.SerializeRootMap();
// For JSNativeContextSpecialization::ReduceNamedAccess.
- if (receiver_map.IsMapOfTargetGlobalProxy()) {
- broker()->target_native_context().global_proxy_object().GetPropertyCell(
+ JSGlobalProxyRef global_proxy =
+ broker()->target_native_context().global_proxy_object();
+ JSGlobalObjectRef global_object =
+ broker()->target_native_context().global_object();
+ if (receiver_map.equals(global_proxy.map())) {
+ base::Optional<PropertyCellRef> cell = global_object.GetPropertyCell(
name, SerializationPolicy::kSerializeIfNeeded);
+ if (access_mode == AccessMode::kLoad && cell.has_value()) {
+ new_accumulator_hints->AddConstant(cell->value().object(), zone());
+ }
}
PropertyAccessInfo access_info = broker()->GetPropertyAccessInfo(
@@ -2515,6 +2728,10 @@ SerializerForBackgroundCompilation::ProcessMapForNamedPropertyAccess(
FunctionTemplateInfoRef fti(broker(), access_info.constant());
if (fti.has_call_code()) fti.SerializeCallCode();
}
+ } else if (access_info.IsModuleExport()) {
+ // For JSNativeContextSpecialization::BuildPropertyLoad
+ DCHECK(!access_info.constant().is_null());
+ CellRef(broker(), access_info.constant());
}
// For PropertyAccessBuilder::TryBuildLoadConstantDataField
@@ -2535,7 +2752,7 @@ SerializerForBackgroundCompilation::ProcessMapForNamedPropertyAccess(
access_info.field_representation(), access_info.field_index(),
SerializationPolicy::kSerializeIfNeeded));
if (constant.has_value()) {
- new_accumulator_hints->AddConstant(constant->object());
+ new_accumulator_hints->AddConstant(constant->object(), zone());
}
}
}
@@ -2565,7 +2782,7 @@ void SerializerForBackgroundCompilation::ProcessKeyedPropertyAccess(
return;
}
- Hints new_accumulator_hints(zone());
+ Hints new_accumulator_hints;
switch (feedback.kind()) {
case ProcessedFeedback::kElementAccess:
ProcessElementAccess(receiver, key, feedback.AsElementAccess(),
@@ -2583,14 +2800,14 @@ void SerializerForBackgroundCompilation::ProcessKeyedPropertyAccess(
if (access_mode == AccessMode::kLoad) {
environment()->accumulator_hints().Clear();
- environment()->accumulator_hints().Add(new_accumulator_hints);
+ environment()->accumulator_hints().Add(new_accumulator_hints, zone());
} else {
DCHECK(new_accumulator_hints.IsEmpty());
}
}
void SerializerForBackgroundCompilation::ProcessNamedPropertyAccess(
- Hints receiver, NameRef const& name, FeedbackSlot slot,
+ Hints const& receiver, NameRef const& name, FeedbackSlot slot,
AccessMode access_mode) {
if (slot.IsInvalid() || feedback_vector().is_null()) return;
FeedbackSource source(feedback_vector(), slot);
@@ -2598,12 +2815,13 @@ void SerializerForBackgroundCompilation::ProcessNamedPropertyAccess(
broker()->ProcessFeedbackForPropertyAccess(source, access_mode, name);
if (BailoutOnUninitialized(feedback)) return;
- Hints new_accumulator_hints(zone());
+ Hints new_accumulator_hints;
switch (feedback.kind()) {
case ProcessedFeedback::kNamedAccess:
DCHECK(name.equals(feedback.AsNamedAccess().name()));
ProcessNamedAccess(receiver, feedback.AsNamedAccess(), access_mode,
&new_accumulator_hints);
+ // TODO(neis): Propagate feedback maps to receiver hints.
break;
case ProcessedFeedback::kInsufficient:
break;
@@ -2613,7 +2831,7 @@ void SerializerForBackgroundCompilation::ProcessNamedPropertyAccess(
if (access_mode == AccessMode::kLoad) {
environment()->accumulator_hints().Clear();
- environment()->accumulator_hints().Add(new_accumulator_hints);
+ environment()->accumulator_hints().Add(new_accumulator_hints, zone());
} else {
DCHECK(new_accumulator_hints.IsEmpty());
}
@@ -2622,7 +2840,7 @@ void SerializerForBackgroundCompilation::ProcessNamedPropertyAccess(
void SerializerForBackgroundCompilation::ProcessNamedAccess(
Hints receiver, NamedAccessFeedback const& feedback, AccessMode access_mode,
Hints* new_accumulator_hints) {
- for (Handle<Map> map : feedback.AsNamedAccess().maps()) {
+ for (Handle<Map> map : feedback.maps()) {
MapRef map_ref(broker(), map);
ProcessMapForNamedPropertyAccess(map_ref, feedback.name(), access_mode,
base::nullopt, new_accumulator_hints);
@@ -2635,8 +2853,6 @@ void SerializerForBackgroundCompilation::ProcessNamedAccess(
base::nullopt, new_accumulator_hints);
}
- JSGlobalProxyRef global_proxy =
- broker()->target_native_context().global_proxy_object();
for (Handle<Object> hint : receiver.constants()) {
ObjectRef object(broker(), hint);
if (access_mode == AccessMode::kLoad && object.IsJSObject()) {
@@ -2645,13 +2861,6 @@ void SerializerForBackgroundCompilation::ProcessNamedAccess(
object.AsJSObject(),
new_accumulator_hints);
}
- // For JSNativeContextSpecialization::ReduceNamedAccessFromNexus.
- if (object.equals(global_proxy)) {
- // TODO(neis): Record accumulator hint? Also for string.length and maybe
- // more.
- global_proxy.GetPropertyCell(feedback.name(),
- SerializationPolicy::kSerializeIfNeeded);
- }
// For JSNativeContextSpecialization::ReduceJSLoadNamed.
if (access_mode == AccessMode::kLoad && object.IsJSFunction() &&
feedback.name().equals(ObjectRef(
@@ -2659,9 +2868,12 @@ void SerializerForBackgroundCompilation::ProcessNamedAccess(
JSFunctionRef function = object.AsJSFunction();
function.Serialize();
if (new_accumulator_hints != nullptr && function.has_prototype()) {
- new_accumulator_hints->AddConstant(function.prototype().object());
+ new_accumulator_hints->AddConstant(function.prototype().object(),
+ zone());
}
}
+ // TODO(neis): Also record accumulator hint for string.length and maybe
+ // more?
}
}
@@ -2841,7 +3053,7 @@ void SerializerForBackgroundCompilation::VisitTestInstanceOf(
environment()->register_hints(iterator->GetRegisterOperand(0));
Hints rhs = environment()->accumulator_hints();
FeedbackSlot slot = iterator->GetSlotOperand(1);
- Hints new_accumulator_hints(zone());
+ Hints new_accumulator_hints;
if (slot.IsInvalid() || feedback_vector().is_null()) return;
FeedbackSource source(feedback_vector(), slot);
@@ -2853,7 +3065,7 @@ void SerializerForBackgroundCompilation::VisitTestInstanceOf(
InstanceOfFeedback const& rhs_feedback = feedback.AsInstanceOf();
if (rhs_feedback.value().has_value()) {
Handle<JSObject> constructor = rhs_feedback.value()->object();
- rhs.AddConstant(constructor);
+ rhs.AddConstant(constructor, zone());
}
}
@@ -2865,7 +3077,7 @@ void SerializerForBackgroundCompilation::VisitTestInstanceOf(
if (walk_prototypes) ProcessHintsForHasInPrototypeChain(lhs);
environment()->accumulator_hints().Clear();
- environment()->accumulator_hints().Add(new_accumulator_hints);
+ environment()->accumulator_hints().Add(new_accumulator_hints, zone());
}
void SerializerForBackgroundCompilation::VisitToNumeric(
diff --git a/deps/v8/src/compiler/serializer-for-background-compilation.h b/deps/v8/src/compiler/serializer-for-background-compilation.h
index 881ed61a55..8f7883eeba 100644
--- a/deps/v8/src/compiler/serializer-for-background-compilation.h
+++ b/deps/v8/src/compiler/serializer-for-background-compilation.h
@@ -17,6 +17,7 @@ namespace compiler {
class CompilationDependencies;
class JSHeapBroker;
+class ZoneStats;
enum class SerializerForBackgroundCompilationFlag : uint8_t {
kBailoutOnUninitialized = 1 << 0,
@@ -27,9 +28,9 @@ using SerializerForBackgroundCompilationFlags =
base::Flags<SerializerForBackgroundCompilationFlag>;
void RunSerializerForBackgroundCompilation(
- JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone,
- Handle<JSFunction> closure, SerializerForBackgroundCompilationFlags flags,
- BailoutId osr_offset);
+ ZoneStats* zone_stats, JSHeapBroker* broker,
+ CompilationDependencies* dependencies, Handle<JSFunction> closure,
+ SerializerForBackgroundCompilationFlags flags, BailoutId osr_offset);
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.cc b/deps/v8/src/compiler/simd-scalar-lowering.cc
index 783f3bcc11..2781cc248f 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.cc
+++ b/deps/v8/src/compiler/simd-scalar-lowering.cc
@@ -132,6 +132,7 @@ void SimdScalarLowering::LowerGraph() {
V(F32x4UConvertI32x4) \
V(F32x4Abs) \
V(F32x4Neg) \
+ V(F32x4Sqrt) \
V(F32x4RecipApprox) \
V(F32x4RecipSqrtApprox) \
V(F32x4Add) \
@@ -210,6 +211,7 @@ void SimdScalarLowering::LowerGraph() {
V(I8x16LeS) \
V(I8x16LtU) \
V(I8x16LeU) \
+ V(S8x16Swizzle) \
V(S8x16Shuffle)
MachineType SimdScalarLowering::MachineTypeFrom(SimdType simdType) {
@@ -940,6 +942,28 @@ void SimdScalarLowering::LowerNode(Node* node) {
}
break;
}
+ case IrOpcode::kSimd128ReverseBytes: {
+ DCHECK_EQ(1, node->InputCount());
+ bool is_float = ReplacementType(node->InputAt(0)) == SimdType::kFloat32x4;
+ replacements_[node->id()].type =
+ is_float ? SimdType::kFloat32x4 : SimdType::kInt32x4;
+ Node** rep = GetReplacementsWithType(
+ node->InputAt(0),
+ is_float ? SimdType::kFloat32x4 : SimdType::kInt32x4);
+ Node* rep_node[kNumLanes32];
+ for (int i = 0; i < kNumLanes32; ++i) {
+ Node* temp = is_float ? graph()->NewNode(
+ machine()->BitcastFloat32ToInt32(), rep[i])
+ : rep[i];
+ temp = graph()->NewNode(machine()->Word32ReverseBytes(), temp);
+ rep_node[kNumLanes32 - 1 - i] =
+ is_float
+ ? graph()->NewNode(machine()->BitcastInt32ToFloat32(), temp)
+ : temp;
+ }
+ ReplaceNode(node, rep_node, kNumLanes32);
+ break;
+ }
case IrOpcode::kLoad:
case IrOpcode::kUnalignedLoad:
case IrOpcode::kProtectedLoad: {
@@ -1219,6 +1243,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
}
F32X4_UNOP_CASE(Abs)
F32X4_UNOP_CASE(Neg)
+ F32X4_UNOP_CASE(Sqrt)
#undef F32X4_UNOP_CASE
case IrOpcode::kF32x4RecipApprox:
case IrOpcode::kF32x4RecipSqrtApprox: {
@@ -1368,6 +1393,45 @@ void SimdScalarLowering::LowerNode(Node* node) {
ReplaceNode(node, rep_node, num_lanes);
break;
}
+ case IrOpcode::kS8x16Swizzle: {
+ DCHECK_EQ(2, node->InputCount());
+ Node** rep_left = GetReplacementsWithType(node->InputAt(0), rep_type);
+ Node** indices = GetReplacementsWithType(node->InputAt(1), rep_type);
+ Node** rep_nodes = zone()->NewArray<Node*>(num_lanes);
+ Node* stack_slot = graph()->NewNode(
+ machine()->StackSlot(MachineRepresentation::kSimd128));
+
+ // Push all num_lanes values into stack slot.
+ const Operator* store_op = machine()->Store(
+ StoreRepresentation(MachineRepresentation::kWord8, kNoWriteBarrier));
+ Node* effect_input = graph()->start();
+ for (int i = num_lanes - 1; i >= 0; i--) {
+ // We want all the stores to happen first before any of the loads
+ // below, so connect them via effect edge from i-1 to i.
+ Node* store =
+ graph()->NewNode(store_op, stack_slot, mcgraph_->Int32Constant(i),
+ rep_left[i], effect_input, graph()->start());
+ effect_input = store;
+ }
+
+ for (int i = num_lanes - 1; i >= 0; i--) {
+ // Only select lane when index is < num_lanes, otherwise write 0 to
+ // lane. Use Uint32 to take care of negative indices.
+ Diamond d(graph(), common(),
+ graph()->NewNode(machine()->Uint32LessThan(), indices[i],
+ mcgraph_->Int32Constant(num_lanes)));
+
+ Node* load =
+ graph()->NewNode(machine()->Load(LoadRepresentation::Uint8()),
+ stack_slot, indices[i], effect_input, d.if_true);
+
+ rep_nodes[i] = d.Phi(MachineRepresentation::kWord8, load,
+ mcgraph_->Int32Constant(0));
+ }
+
+ ReplaceNode(node, rep_nodes, num_lanes);
+ break;
+ }
case IrOpcode::kS8x16Shuffle: {
DCHECK_EQ(2, node->InputCount());
const uint8_t* shuffle = S8x16ShuffleOf(node->op());
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index 1ca7bfe707..fadc9bf6d9 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -1197,7 +1197,7 @@ class RepresentationSelector {
// TODO(nicohartmann): Remove, once the deoptimizer can rematerialize
// truncated BigInts.
if (TypeOf(input).Is(Type::BigInt())) {
- ProcessInput(node, i, UseInfo::AnyTagged());
+ ConvertInput(node, i, UseInfo::AnyTagged());
}
(*types)[i] =
@@ -1220,11 +1220,22 @@ class RepresentationSelector {
// Accumulator is a special flower - we need to remember its type in
// a singleton typed-state-values node (as if it was a singleton
// state-values node).
+ Node* accumulator = node->InputAt(2);
if (propagate()) {
- EnqueueInput(node, 2, UseInfo::Any());
+ // TODO(nicohartmann): Remove, once the deoptimizer can rematerialize
+ // truncated BigInts.
+ if (TypeOf(accumulator).Is(Type::BigInt())) {
+ EnqueueInput(node, 2, UseInfo::AnyTagged());
+ } else {
+ EnqueueInput(node, 2, UseInfo::Any());
+ }
} else if (lower()) {
+ // TODO(nicohartmann): Remove, once the deoptimizer can rematerialize
+ // truncated BigInts.
+ if (TypeOf(accumulator).Is(Type::BigInt())) {
+ ConvertInput(node, 2, UseInfo::AnyTagged());
+ }
Zone* zone = jsgraph_->zone();
- Node* accumulator = node->InputAt(2);
if (accumulator == jsgraph_->OptimizedOutConstant()) {
node->ReplaceInput(2, jsgraph_->SingleDeadTypedStateValues());
} else {
@@ -1237,7 +1248,7 @@ class RepresentationSelector {
node->ReplaceInput(
2, jsgraph_->graph()->NewNode(jsgraph_->common()->TypedStateValues(
types, SparseInputMask::Dense()),
- accumulator));
+ node->InputAt(2)));
}
}
@@ -2667,7 +2678,11 @@ class RepresentationSelector {
case IrOpcode::kReferenceEqual: {
VisitBinop(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
if (lower()) {
- NodeProperties::ChangeOp(node, lowering->machine()->WordEqual());
+ if (COMPRESS_POINTERS_BOOL) {
+ NodeProperties::ChangeOp(node, lowering->machine()->Word32Equal());
+ } else {
+ NodeProperties::ChangeOp(node, lowering->machine()->WordEqual());
+ }
}
return;
}
@@ -2894,6 +2909,18 @@ class RepresentationSelector {
SetOutput(node, MachineRepresentation::kTaggedPointer);
return;
}
+ case IrOpcode::kLoadMessage: {
+ if (truncation.IsUnused()) return VisitUnused(node);
+ VisitUnop(node, UseInfo::Word(), MachineRepresentation::kTagged);
+ return;
+ }
+ case IrOpcode::kStoreMessage: {
+ ProcessInput(node, 0, UseInfo::Word());
+ ProcessInput(node, 1, UseInfo::AnyTagged());
+ ProcessRemainingInputs(node, 2);
+ SetOutput(node, MachineRepresentation::kNone);
+ return;
+ }
case IrOpcode::kLoadFieldByIndex: {
if (truncation.IsUnused()) return VisitUnused(node);
VisitBinop(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
@@ -2945,6 +2972,11 @@ class RepresentationSelector {
access.machine_type.representation());
return;
}
+ case IrOpcode::kLoadStackArgument: {
+ if (truncation.IsUnused()) return VisitUnused(node);
+ VisitBinop(node, UseInfo::Word(), MachineRepresentation::kTagged);
+ return;
+ }
case IrOpcode::kStoreElement: {
ElementAccess access = ElementAccessOf(node->op());
Node* value_node = node->InputAt(2);
diff --git a/deps/v8/src/compiler/simplified-operator-reducer.cc b/deps/v8/src/compiler/simplified-operator-reducer.cc
index 885a86286e..0f293d2b38 100644
--- a/deps/v8/src/compiler/simplified-operator-reducer.cc
+++ b/deps/v8/src/compiler/simplified-operator-reducer.cc
@@ -155,23 +155,6 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
Node* new_node = graph()->NewNode(
simplified()->ChangeInt31ToCompressedSigned(), m.InputAt(0));
return Replace(new_node);
- } else if (m.IsCheckedInt32ToTaggedSigned()) {
- // Create a new checked node that outputs CompressedSigned values, with
- // an explicit decompression after it.
- Node* new_checked = graph()->CloneNode(m.node());
- NodeProperties::ChangeOp(
- new_checked, simplified()->CheckedInt32ToCompressedSigned(
- CheckParametersOf(m.node()->op()).feedback()));
- Node* new_decompression = graph()->NewNode(
- machine()->ChangeCompressedSignedToTaggedSigned(), new_checked);
-
- // For all uses of the old checked node, instead insert the new "checked
- // + decompression". Also, update control and effect.
- ReplaceWithValue(m.node(), new_decompression, new_checked, new_checked);
-
- // In the current node, we can skip the decompression since we are going
- // to have a Decompression + Compression combo.
- return Replace(new_checked);
}
break;
}
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index 6b86a95e01..63d24274ec 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -1149,6 +1149,17 @@ struct SimplifiedOperatorGlobalCache final {
};
LoadFieldByIndexOperator kLoadFieldByIndex;
+ struct LoadStackArgumentOperator final : public Operator {
+ LoadStackArgumentOperator()
+ : Operator( // --
+ IrOpcode::kLoadStackArgument, // opcode
+ Operator::kNoDeopt | Operator::kNoThrow |
+ Operator::kNoWrite, // flags
+ "LoadStackArgument", // name
+ 2, 1, 1, 1, 1, 0) {} // counts
+ };
+ LoadStackArgumentOperator kLoadStackArgument;
+
#define SPECULATIVE_NUMBER_BINOP(Name) \
template <NumberOperationHint kHint> \
struct Name##Operator final : public Operator1<NumberOperationHint> { \
@@ -1754,6 +1765,24 @@ SPECULATIVE_NUMBER_BINOP_LIST(SPECULATIVE_NUMBER_BINOP)
ACCESS_OP_LIST(ACCESS)
#undef ACCESS
+const Operator* SimplifiedOperatorBuilder::LoadMessage() {
+ return new (zone())
+ Operator(IrOpcode::kLoadMessage,
+ Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite,
+ "LoadMessage", 1, 1, 1, 1, 1, 0);
+}
+
+const Operator* SimplifiedOperatorBuilder::StoreMessage() {
+ return new (zone())
+ Operator(IrOpcode::kStoreMessage,
+ Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoRead,
+ "StoreMessage", 2, 1, 1, 0, 1, 0);
+}
+
+const Operator* SimplifiedOperatorBuilder::LoadStackArgument() {
+ return &cache_.kLoadStackArgument;
+}
+
const Operator* SimplifiedOperatorBuilder::TransitionAndStoreElement(
Handle<Map> double_map, Handle<Map> fast_map) {
TransitionAndStoreElementParameters parameters(double_map, fast_map);
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index 58e9bfdffb..a1438cdce0 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -876,6 +876,9 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
Type type, AllocationType allocation = AllocationType::kYoung,
AllowLargeObjects allow_large_objects = AllowLargeObjects::kFalse);
+ const Operator* LoadMessage();
+ const Operator* StoreMessage();
+
const Operator* LoadFieldByIndex();
const Operator* LoadField(FieldAccess const&);
const Operator* StoreField(FieldAccess const&);
@@ -883,6 +886,9 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
// load-element [base + index]
const Operator* LoadElement(ElementAccess const&);
+ // load-stack-argument [base + index]
+ const Operator* LoadStackArgument();
+
// store-element [base + index], value
const Operator* StoreElement(ElementAccess const&);
diff --git a/deps/v8/src/compiler/store-store-elimination.cc b/deps/v8/src/compiler/store-store-elimination.cc
index bd53fb895f..08accd61c5 100644
--- a/deps/v8/src/compiler/store-store-elimination.cc
+++ b/deps/v8/src/compiler/store-store-elimination.cc
@@ -2,14 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include <iterator>
-
#include "src/compiler/store-store-elimination.h"
#include "src/codegen/tick-counter.h"
#include "src/compiler/all-nodes.h"
+#include "src/compiler/common-operator.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/node-properties.h"
+#include "src/compiler/persistent-map.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
@@ -41,7 +43,199 @@ namespace compiler {
#define DCHECK_EXTRA(condition, fmt, ...) ((void)0)
#endif
-void StoreStoreElimination::RedundantStoreFinder::Find() {
+namespace {
+
+using StoreOffset = uint32_t;
+
+struct UnobservableStore {
+ NodeId id_;
+ StoreOffset offset_;
+
+ bool operator==(const UnobservableStore other) const {
+ return (id_ == other.id_) && (offset_ == other.offset_);
+ }
+
+ bool operator<(const UnobservableStore other) const {
+ return (id_ < other.id_) || (id_ == other.id_ && offset_ < other.offset_);
+ }
+};
+
+size_t hash_value(const UnobservableStore& p) {
+ return base::hash_combine(p.id_, p.offset_);
+}
+
+// Instances of UnobservablesSet are immutable. They represent either a set of
+// UnobservableStores, or the "unvisited empty set".
+//
+// We apply some sharing to save memory. The class UnobservablesSet is only a
+// pointer wide, and a copy does not use any heap (or temp_zone) memory. Most
+// changes to an UnobservablesSet might allocate in the temp_zone.
+//
+// The size of an instance should be the size of a pointer, plus additional
+// space in the zone in the case of non-unvisited UnobservablesSets. Copying
+// an UnobservablesSet allocates no memory.
+class UnobservablesSet final {
+ private:
+ using KeyT = UnobservableStore;
+ using ValueT = bool; // Emulates set semantics in the map.
+
+ // The PersistentMap uses a special value to signify 'not present'. We use
+ // a boolean value to emulate set semantics.
+ static constexpr ValueT kNotPresent = false;
+ static constexpr ValueT kPresent = true;
+
+ public:
+ using SetT = PersistentMap<KeyT, ValueT>;
+
+ // Creates a new UnobservablesSet, with the null set.
+ static UnobservablesSet Unvisited() { return UnobservablesSet(); }
+
+ // Create a new empty UnobservablesSet. This allocates in the zone, and
+ // can probably be optimized to use a global singleton.
+ static UnobservablesSet VisitedEmpty(Zone* zone);
+ UnobservablesSet(const UnobservablesSet& other) V8_NOEXCEPT = default;
+
+ // Computes the intersection of two UnobservablesSets. If one of the sets is
+ // empty, will return empty.
+ UnobservablesSet Intersect(const UnobservablesSet& other,
+ const UnobservablesSet& empty, Zone* zone) const;
+
+ // Returns a set that it is the current one, plus the observation obs passed
+ // as parameter. If said obs it's already in the set, we don't have to
+ // create a new one.
+ UnobservablesSet Add(UnobservableStore obs, Zone* zone) const;
+
+ // Returns a set that it is the current one, except for all of the
+ // observations with offset off. This is done by creating a new set and
+ // copying all observations with different offsets.
+ // This can probably be done better if the observations are stored first by
+ // offset and then by node.
+ // We are removing all nodes with offset off since different nodes may
+ // alias one another, and we currently we don't have the means to know if
+ // two nodes are definitely the same value.
+ UnobservablesSet RemoveSameOffset(StoreOffset off, Zone* zone) const;
+
+ const SetT* set() const { return set_; }
+
+ bool IsUnvisited() const { return set_ == nullptr; }
+ bool IsEmpty() const {
+ return set_ == nullptr || set_->begin() == set_->end();
+ }
+ bool Contains(UnobservableStore obs) const {
+ return set_ != nullptr && set_->Get(obs) != kNotPresent;
+ }
+
+ bool operator==(const UnobservablesSet& other) const {
+ if (IsUnvisited() || other.IsUnvisited()) {
+ return IsEmpty() && other.IsEmpty();
+ } else {
+ // Both pointers guaranteed not to be nullptrs.
+ return *set() == *(other.set());
+ }
+ }
+
+ bool operator!=(const UnobservablesSet& other) const {
+ return !(*this == other);
+ }
+
+ private:
+ UnobservablesSet() = default;
+ explicit UnobservablesSet(const SetT* set) : set_(set) {}
+
+ static SetT* NewSet(Zone* zone) {
+ return new (zone->New(sizeof(UnobservablesSet::SetT)))
+ UnobservablesSet::SetT(zone, kNotPresent);
+ }
+
+ static void SetAdd(SetT* set, const KeyT& key) { set->Set(key, kPresent); }
+ static void SetErase(SetT* set, const KeyT& key) {
+ set->Set(key, kNotPresent);
+ }
+
+ const SetT* set_ = nullptr;
+};
+
+class RedundantStoreFinder final {
+ public:
+ // Note that we Initialize unobservable_ with js_graph->graph->NodeCount()
+ // amount of empty sets.
+ RedundantStoreFinder(JSGraph* js_graph, TickCounter* tick_counter,
+ Zone* temp_zone)
+ : jsgraph_(js_graph),
+ tick_counter_(tick_counter),
+ temp_zone_(temp_zone),
+ revisit_(temp_zone),
+ in_revisit_(js_graph->graph()->NodeCount(), temp_zone),
+ unobservable_(js_graph->graph()->NodeCount(),
+ UnobservablesSet::Unvisited(), temp_zone),
+ to_remove_(temp_zone),
+ unobservables_visited_empty_(
+ UnobservablesSet::VisitedEmpty(temp_zone)) {}
+
+ // Crawls from the end of the graph to the beginning, with the objective of
+ // finding redundant stores.
+ void Find();
+
+ // This method is used for const correctness to go through the final list of
+ // redundant stores that are replaced on the graph.
+ const ZoneSet<Node*>& to_remove_const() { return to_remove_; }
+
+ private:
+ // Assumption: All effectful nodes are reachable from End via a sequence of
+ // control, then a sequence of effect edges.
+ // Visit goes through the control chain, visiting effectful nodes that it
+ // encounters.
+ void Visit(Node* node);
+
+ // Marks effect inputs for visiting, if we are able to update this path of
+ // the graph.
+ void VisitEffectfulNode(Node* node);
+
+ // Compute the intersection of the UnobservablesSets of all effect uses and
+ // return it.
+ // The result UnobservablesSet will never be null.
+ UnobservablesSet RecomputeUseIntersection(Node* node);
+
+ // Recompute unobservables-set for a node. Will also mark superfluous nodes
+ // as to be removed.
+ UnobservablesSet RecomputeSet(Node* node, const UnobservablesSet& uses);
+
+ // Returns true if node's opcode cannot observe StoreFields.
+ static bool CannotObserveStoreField(Node* node);
+
+ void MarkForRevisit(Node* node);
+ bool HasBeenVisited(Node* node);
+
+ // To safely cast an offset from a FieldAccess, which has a potentially
+ // wider range (namely int).
+ StoreOffset ToOffset(const FieldAccess& access) {
+ DCHECK_GE(access.offset, 0);
+ return static_cast<StoreOffset>(access.offset);
+ }
+
+ JSGraph* jsgraph() const { return jsgraph_; }
+ Isolate* isolate() { return jsgraph()->isolate(); }
+ Zone* temp_zone() const { return temp_zone_; }
+ UnobservablesSet& unobservable_for_id(NodeId id) {
+ DCHECK_LT(id, unobservable_.size());
+ return unobservable_[id];
+ }
+ ZoneSet<Node*>& to_remove() { return to_remove_; }
+
+ JSGraph* const jsgraph_;
+ TickCounter* const tick_counter_;
+ Zone* const temp_zone_;
+
+ ZoneStack<Node*> revisit_;
+ ZoneVector<bool> in_revisit_;
+
+ // Maps node IDs to UnobservableNodeSets.
+ ZoneVector<UnobservablesSet> unobservable_;
+ ZoneSet<Node*> to_remove_;
+ const UnobservablesSet unobservables_visited_empty_;
+};
+
+void RedundantStoreFinder::Find() {
Visit(jsgraph()->graph()->end());
while (!revisit_.empty()) {
@@ -65,7 +259,7 @@ void StoreStoreElimination::RedundantStoreFinder::Find() {
#endif
}
-void StoreStoreElimination::RedundantStoreFinder::MarkForRevisit(Node* node) {
+void RedundantStoreFinder::MarkForRevisit(Node* node) {
DCHECK_LT(node->id(), in_revisit_.size());
if (!in_revisit_[node->id()]) {
revisit_.push(node);
@@ -73,32 +267,12 @@ void StoreStoreElimination::RedundantStoreFinder::MarkForRevisit(Node* node) {
}
}
-bool StoreStoreElimination::RedundantStoreFinder::HasBeenVisited(Node* node) {
+bool RedundantStoreFinder::HasBeenVisited(Node* node) {
return !unobservable_for_id(node->id()).IsUnvisited();
}
-void StoreStoreElimination::Run(JSGraph* js_graph, TickCounter* tick_counter,
- Zone* temp_zone) {
- // Find superfluous nodes
- RedundantStoreFinder finder(js_graph, tick_counter, temp_zone);
- finder.Find();
-
- // Remove superfluous nodes
- for (Node* node : finder.to_remove_const()) {
- if (FLAG_trace_store_elimination) {
- PrintF("StoreStoreElimination::Run: Eliminating node #%d:%s\n",
- node->id(), node->op()->mnemonic());
- }
- Node* previous_effect = NodeProperties::GetEffectInput(node);
- NodeProperties::ReplaceUses(node, nullptr, previous_effect, nullptr,
- nullptr);
- node->Kill();
- }
-}
-
-StoreStoreElimination::UnobservablesSet
-StoreStoreElimination::RedundantStoreFinder::RecomputeSet(
- Node* node, const StoreStoreElimination::UnobservablesSet& uses) {
+UnobservablesSet RedundantStoreFinder::RecomputeSet(
+ Node* node, const UnobservablesSet& uses) {
switch (node->op()->opcode()) {
case IrOpcode::kStoreField: {
Node* stored_to = node->InputAt(0);
@@ -150,8 +324,7 @@ StoreStoreElimination::RedundantStoreFinder::RecomputeSet(
UNREACHABLE();
}
-bool StoreStoreElimination::RedundantStoreFinder::CannotObserveStoreField(
- Node* node) {
+bool RedundantStoreFinder::CannotObserveStoreField(Node* node) {
IrOpcode::Value opcode = node->opcode();
return opcode == IrOpcode::kLoadElement || opcode == IrOpcode::kLoad ||
opcode == IrOpcode::kStore || opcode == IrOpcode::kEffectPhi ||
@@ -159,7 +332,7 @@ bool StoreStoreElimination::RedundantStoreFinder::CannotObserveStoreField(
opcode == IrOpcode::kUnsafePointerAdd || opcode == IrOpcode::kRetain;
}
-void StoreStoreElimination::RedundantStoreFinder::Visit(Node* node) {
+void RedundantStoreFinder::Visit(Node* node) {
if (!HasBeenVisited(node)) {
for (int i = 0; i < node->op()->ControlInputCount(); i++) {
Node* control_input = NodeProperties::GetControlInput(node, i);
@@ -180,19 +353,15 @@ void StoreStoreElimination::RedundantStoreFinder::Visit(Node* node) {
}
}
-void StoreStoreElimination::RedundantStoreFinder::VisitEffectfulNode(
- Node* node) {
+void RedundantStoreFinder::VisitEffectfulNode(Node* node) {
if (HasBeenVisited(node)) {
TRACE("- Revisiting: #%d:%s", node->id(), node->op()->mnemonic());
}
- StoreStoreElimination::UnobservablesSet after_set =
- RecomputeUseIntersection(node);
- StoreStoreElimination::UnobservablesSet before_set =
- RecomputeSet(node, after_set);
+ UnobservablesSet after_set = RecomputeUseIntersection(node);
+ UnobservablesSet before_set = RecomputeSet(node, after_set);
DCHECK(!before_set.IsUnvisited());
- StoreStoreElimination::UnobservablesSet stores_for_node =
- unobservable_for_id(node->id());
+ UnobservablesSet stores_for_node = unobservable_for_id(node->id());
bool cur_set_changed =
stores_for_node.IsUnvisited() || stores_for_node != before_set;
if (!cur_set_changed) {
@@ -212,9 +381,7 @@ void StoreStoreElimination::RedundantStoreFinder::VisitEffectfulNode(
}
}
-StoreStoreElimination::UnobservablesSet
-StoreStoreElimination::RedundantStoreFinder::RecomputeUseIntersection(
- Node* node) {
+UnobservablesSet RedundantStoreFinder::RecomputeUseIntersection(Node* node) {
// There were no effect uses. Break early.
if (node->op()->EffectOutputCount() == 0) {
IrOpcode::Value opcode = node->opcode();
@@ -236,8 +403,7 @@ StoreStoreElimination::RedundantStoreFinder::RecomputeUseIntersection(
// {first} == false indicates that cur_set is the intersection of at least one
// thing.
bool first = true;
- StoreStoreElimination::UnobservablesSet cur_set =
- StoreStoreElimination::UnobservablesSet::Unvisited(); // irrelevant
+ UnobservablesSet cur_set = UnobservablesSet::Unvisited(); // irrelevant
for (Edge edge : node->use_edges()) {
if (!NodeProperties::IsEffectEdge(edge)) {
continue;
@@ -245,8 +411,7 @@ StoreStoreElimination::RedundantStoreFinder::RecomputeUseIntersection(
// Intersect with the new use node.
Node* use = edge.from();
- StoreStoreElimination::UnobservablesSet new_set =
- unobservable_for_id(use->id());
+ UnobservablesSet new_set = unobservable_for_id(use->id());
if (first) {
first = false;
cur_set = new_set;
@@ -268,72 +433,70 @@ StoreStoreElimination::RedundantStoreFinder::RecomputeUseIntersection(
return cur_set;
}
-StoreStoreElimination::UnobservablesSet::UnobservablesSet() : set_(nullptr) {}
-
-StoreStoreElimination::UnobservablesSet
-StoreStoreElimination::UnobservablesSet::VisitedEmpty(Zone* zone) {
- ZoneSet<UnobservableStore>* empty_set =
- new (zone->New(sizeof(ZoneSet<UnobservableStore>)))
- ZoneSet<UnobservableStore>(zone);
- return StoreStoreElimination::UnobservablesSet(empty_set);
+UnobservablesSet UnobservablesSet::VisitedEmpty(Zone* zone) {
+ return UnobservablesSet(NewSet(zone));
}
-StoreStoreElimination::UnobservablesSet
-StoreStoreElimination::UnobservablesSet::Intersect(
- const StoreStoreElimination::UnobservablesSet& other,
- const StoreStoreElimination::UnobservablesSet& empty, Zone* zone) const {
- if (IsEmpty() || other.IsEmpty()) {
- return empty;
- } else {
- ZoneSet<UnobservableStore>* intersection =
- new (zone->New(sizeof(ZoneSet<UnobservableStore>)))
- ZoneSet<UnobservableStore>(zone);
- // Put the intersection of set() and other.set() in intersection.
- set_intersection(set()->begin(), set()->end(), other.set()->begin(),
- other.set()->end(),
- std::inserter(*intersection, intersection->end()));
-
- return StoreStoreElimination::UnobservablesSet(intersection);
+UnobservablesSet UnobservablesSet::Intersect(const UnobservablesSet& other,
+ const UnobservablesSet& empty,
+ Zone* zone) const {
+ if (IsEmpty() || other.IsEmpty()) return empty;
+
+ UnobservablesSet::SetT* intersection = NewSet(zone);
+ for (const auto& triple : set()->Zip(*other.set())) {
+ if (std::get<1>(triple) && std::get<2>(triple)) {
+ intersection->Set(std::get<0>(triple), kPresent);
+ }
}
+
+ return UnobservablesSet(intersection);
}
-StoreStoreElimination::UnobservablesSet
-StoreStoreElimination::UnobservablesSet::Add(UnobservableStore obs,
- Zone* zone) const {
- bool found = set()->find(obs) != set()->end();
- if (found) {
- return *this;
- } else {
- // Make a new empty set.
- ZoneSet<UnobservableStore>* new_set =
- new (zone->New(sizeof(ZoneSet<UnobservableStore>)))
- ZoneSet<UnobservableStore>(zone);
- // Copy the old elements over.
- *new_set = *set();
- // Add the new element.
- bool inserted = new_set->insert(obs).second;
- DCHECK(inserted);
- USE(inserted); // silence warning about unused variable
-
- return StoreStoreElimination::UnobservablesSet(new_set);
+UnobservablesSet UnobservablesSet::Add(UnobservableStore obs,
+ Zone* zone) const {
+ if (set()->Get(obs) != kNotPresent) return *this;
+
+ UnobservablesSet::SetT* new_set = NewSet(zone);
+ *new_set = *set();
+ SetAdd(new_set, obs);
+
+ return UnobservablesSet(new_set);
+}
+
+UnobservablesSet UnobservablesSet::RemoveSameOffset(StoreOffset offset,
+ Zone* zone) const {
+ UnobservablesSet::SetT* new_set = NewSet(zone);
+ *new_set = *set();
+
+ // Remove elements with the given offset.
+ for (const auto& entry : *new_set) {
+ const UnobservableStore& obs = entry.first;
+ if (obs.offset_ == offset) SetErase(new_set, obs);
}
+
+ return UnobservablesSet(new_set);
}
-StoreStoreElimination::UnobservablesSet
-StoreStoreElimination::UnobservablesSet::RemoveSameOffset(StoreOffset offset,
- Zone* zone) const {
- // Make a new empty set.
- ZoneSet<UnobservableStore>* new_set =
- new (zone->New(sizeof(ZoneSet<UnobservableStore>)))
- ZoneSet<UnobservableStore>(zone);
- // Copy all elements over that have a different offset.
- for (auto obs : *set()) {
- if (obs.offset_ != offset) {
- new_set->insert(obs);
+} // namespace
+
+// static
+void StoreStoreElimination::Run(JSGraph* js_graph, TickCounter* tick_counter,
+ Zone* temp_zone) {
+ // Find superfluous nodes
+ RedundantStoreFinder finder(js_graph, tick_counter, temp_zone);
+ finder.Find();
+
+ // Remove superfluous nodes
+ for (Node* node : finder.to_remove_const()) {
+ if (FLAG_trace_store_elimination) {
+ PrintF("StoreStoreElimination::Run: Eliminating node #%d:%s\n",
+ node->id(), node->op()->mnemonic());
}
+ Node* previous_effect = NodeProperties::GetEffectInput(node);
+ NodeProperties::ReplaceUses(node, nullptr, previous_effect, nullptr,
+ nullptr);
+ node->Kill();
}
-
- return StoreStoreElimination::UnobservablesSet(new_set);
}
#undef TRACE
diff --git a/deps/v8/src/compiler/store-store-elimination.h b/deps/v8/src/compiler/store-store-elimination.h
index 7704938fc0..0813adb1f0 100644
--- a/deps/v8/src/compiler/store-store-elimination.h
+++ b/deps/v8/src/compiler/store-store-elimination.h
@@ -5,18 +5,18 @@
#ifndef V8_COMPILER_STORE_STORE_ELIMINATION_H_
#define V8_COMPILER_STORE_STORE_ELIMINATION_H_
-#include "src/compiler/common-operator.h"
-#include "src/compiler/js-graph.h"
-#include "src/compiler/simplified-operator.h"
-#include "src/zone/zone-containers.h"
+#include "src/common/globals.h"
namespace v8 {
namespace internal {
class TickCounter;
+class Zone;
namespace compiler {
+class JSGraph;
+
// Store-store elimination.
//
// The aim of this optimization is to detect the following pattern in the
@@ -44,176 +44,10 @@ namespace compiler {
//
// This implementation needs all dead nodes removed from the graph, and the
// graph should be trimmed.
-class StoreStoreElimination final {
+class StoreStoreElimination final : public AllStatic {
public:
static void Run(JSGraph* js_graph, TickCounter* tick_counter,
Zone* temp_zone);
-
- private:
- using StoreOffset = uint32_t;
-
- struct UnobservableStore {
- NodeId id_;
- StoreOffset offset_;
-
- bool operator==(const UnobservableStore other) const {
- return (id_ == other.id_) && (offset_ == other.offset_);
- }
-
- bool operator<(const UnobservableStore other) const {
- return (id_ < other.id_) || (id_ == other.id_ && offset_ < other.offset_);
- }
- };
-
- // Instances of UnobservablesSet are immutable. They represent either a set of
- // UnobservableStores, or the "unvisited empty set".
- //
- // We apply some sharing to save memory. The class UnobservablesSet is only a
- // pointer wide, and a copy does not use any heap (or temp_zone) memory. Most
- // changes to an UnobservablesSet might allocate in the temp_zone.
- //
- // The size of an instance should be the size of a pointer, plus additional
- // space in the zone in the case of non-unvisited UnobservablesSets. Copying
- // an UnobservablesSet allocates no memory.
- class UnobservablesSet final {
- public:
- // Creates a new UnobservablesSet, with the null set.
- static UnobservablesSet Unvisited() { return UnobservablesSet(); }
-
- // Create a new empty UnobservablesSet. This allocates in the zone, and
- // can probably be optimized to use a global singleton.
- static UnobservablesSet VisitedEmpty(Zone* zone);
- UnobservablesSet(const UnobservablesSet& other) V8_NOEXCEPT = default;
-
- // Computes the intersection of two UnobservablesSets. If one of the sets is
- // empty, will return empty.
- UnobservablesSet Intersect(const UnobservablesSet& other,
- const UnobservablesSet& empty, Zone* zone) const;
-
- // Returns a set that it is the current one, plus the observation obs passed
- // as parameter. If said obs it's already in the set, we don't have to
- // create a new one.
- UnobservablesSet Add(UnobservableStore obs, Zone* zone) const;
-
- // Returns a set that it is the current one, except for all of the
- // observations with offset off. This is done by creating a new set and
- // copying all observations with different offsets.
- // This can probably be done better if the observations are stored first by
- // offset and then by node.
- // We are removing all nodes with offset off since different nodes may
- // alias one another, and we currently we don't have the means to know if
- // two nodes are definitely the same value.
- UnobservablesSet RemoveSameOffset(StoreOffset off, Zone* zone) const;
-
- const ZoneSet<UnobservableStore>* set() const { return set_; }
-
- bool IsUnvisited() const { return set_ == nullptr; }
- bool IsEmpty() const { return set_ == nullptr || set_->empty(); }
- bool Contains(UnobservableStore obs) const {
- return set_ != nullptr && (set_->find(obs) != set_->end());
- }
-
- bool operator==(const UnobservablesSet& other) const {
- if (IsUnvisited() || other.IsUnvisited()) {
- return IsEmpty() && other.IsEmpty();
- } else {
- // Both pointers guaranteed not to be nullptrs.
- return *set() == *(other.set());
- }
- }
-
- bool operator!=(const UnobservablesSet& other) const {
- return !(*this == other);
- }
-
- private:
- UnobservablesSet();
- explicit UnobservablesSet(const ZoneSet<UnobservableStore>* set)
- : set_(set) {}
- const ZoneSet<UnobservableStore>* set_;
- };
-
- class RedundantStoreFinder final {
- public:
- // Note that we Initialize unobservable_ with js_graph->graph->NodeCount()
- // amount of empty sets.
- RedundantStoreFinder(JSGraph* js_graph, TickCounter* tick_counter,
- Zone* temp_zone)
- : jsgraph_(js_graph),
- tick_counter_(tick_counter),
- temp_zone_(temp_zone),
- revisit_(temp_zone),
- in_revisit_(js_graph->graph()->NodeCount(), temp_zone),
- unobservable_(js_graph->graph()->NodeCount(),
- StoreStoreElimination::UnobservablesSet::Unvisited(),
- temp_zone),
- to_remove_(temp_zone),
- unobservables_visited_empty_(
- StoreStoreElimination::UnobservablesSet::VisitedEmpty(
- temp_zone)) {}
-
- // Crawls from the end of the graph to the beginning, with the objective of
- // finding redundant stores.
- void Find();
-
- // This method is used for const correctness to go through the final list of
- // redundant stores that are replaced on the graph.
- const ZoneSet<Node*>& to_remove_const() { return to_remove_; }
-
- private:
- // Assumption: All effectful nodes are reachable from End via a sequence of
- // control, then a sequence of effect edges.
- // Visit goes through the control chain, visiting effectful nodes that it
- // encounters.
- void Visit(Node* node);
-
- // Marks effect inputs for visiting, if we are able to update this path of
- // the graph.
- void VisitEffectfulNode(Node* node);
-
- // Compute the intersection of the UnobservablesSets of all effect uses and
- // return it.
- // The result UnobservablesSet will never be null.
- UnobservablesSet RecomputeUseIntersection(Node* node);
-
- // Recompute unobservables-set for a node. Will also mark superfluous nodes
- // as to be removed.
- UnobservablesSet RecomputeSet(Node* node, const UnobservablesSet& uses);
-
- // Returns true if node's opcode cannot observe StoreFields.
- static bool CannotObserveStoreField(Node* node);
-
- void MarkForRevisit(Node* node);
- bool HasBeenVisited(Node* node);
-
- // To safely cast an offset from a FieldAccess, which has a potentially
- // wider range (namely int).
- StoreOffset ToOffset(const FieldAccess& access) {
- DCHECK_GE(access.offset, 0);
- return static_cast<StoreOffset>(access.offset);
- }
-
- JSGraph* jsgraph() const { return jsgraph_; }
- Isolate* isolate() { return jsgraph()->isolate(); }
- Zone* temp_zone() const { return temp_zone_; }
- UnobservablesSet& unobservable_for_id(NodeId id) {
- DCHECK_LT(id, unobservable_.size());
- return unobservable_[id];
- }
- ZoneSet<Node*>& to_remove() { return to_remove_; }
-
- JSGraph* const jsgraph_;
- TickCounter* const tick_counter_;
- Zone* const temp_zone_;
-
- ZoneStack<Node*> revisit_;
- ZoneVector<bool> in_revisit_;
-
- // Maps node IDs to UnobservableNodeSets.
- ZoneVector<UnobservablesSet> unobservable_;
- ZoneSet<Node*> to_remove_;
- const UnobservablesSet unobservables_visited_empty_;
- };
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index 6ba1b39431..e5ee0aa733 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -1339,6 +1339,10 @@ Type Typer::Visitor::TypeJSCreateLiteralRegExp(Node* node) {
return Type::OtherObject();
}
+Type Typer::Visitor::TypeJSGetTemplateObject(Node* node) {
+ return Type::Array();
+}
+
Type Typer::Visitor::TypeJSLoadProperty(Node* node) {
return Type::NonInternal();
}
@@ -2192,10 +2196,16 @@ Type Typer::Visitor::TypeLoadField(Node* node) {
return FieldAccessOf(node->op()).type;
}
+Type Typer::Visitor::TypeLoadMessage(Node* node) { return Type::Any(); }
+
Type Typer::Visitor::TypeLoadElement(Node* node) {
return ElementAccessOf(node->op()).type;
}
+Type Typer::Visitor::TypeLoadStackArgument(Node* node) {
+ return Type::NonInternal();
+}
+
Type Typer::Visitor::TypeLoadFromObject(Node* node) { UNREACHABLE(); }
Type Typer::Visitor::TypeLoadTypedElement(Node* node) {
@@ -2222,6 +2232,8 @@ Type Typer::Visitor::TypeLoadDataViewElement(Node* node) {
Type Typer::Visitor::TypeStoreField(Node* node) { UNREACHABLE(); }
+Type Typer::Visitor::TypeStoreMessage(Node* node) { UNREACHABLE(); }
+
Type Typer::Visitor::TypeStoreElement(Node* node) { UNREACHABLE(); }
Type Typer::Visitor::TypeStoreToObject(Node* node) { UNREACHABLE(); }
diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc
index 018c54c3d5..caa086bbd3 100644
--- a/deps/v8/src/compiler/types.cc
+++ b/deps/v8/src/compiler/types.cc
@@ -183,7 +183,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case HEAP_NUMBER_TYPE:
return kNumber;
case JS_OBJECT_TYPE:
- case JS_ARGUMENTS_TYPE:
+ case JS_ARGUMENTS_OBJECT_TYPE:
case JS_ERROR_TYPE:
case JS_GLOBAL_OBJECT_TYPE:
case JS_GLOBAL_PROXY_TYPE:
@@ -207,16 +207,16 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case JS_MESSAGE_OBJECT_TYPE:
case JS_DATE_TYPE:
#ifdef V8_INTL_SUPPORT
- case JS_INTL_V8_BREAK_ITERATOR_TYPE:
- case JS_INTL_COLLATOR_TYPE:
- case JS_INTL_DATE_TIME_FORMAT_TYPE:
- case JS_INTL_LIST_FORMAT_TYPE:
- case JS_INTL_LOCALE_TYPE:
- case JS_INTL_NUMBER_FORMAT_TYPE:
- case JS_INTL_PLURAL_RULES_TYPE:
- case JS_INTL_RELATIVE_TIME_FORMAT_TYPE:
- case JS_INTL_SEGMENT_ITERATOR_TYPE:
- case JS_INTL_SEGMENTER_TYPE:
+ case JS_V8_BREAK_ITERATOR_TYPE:
+ case JS_COLLATOR_TYPE:
+ case JS_DATE_TIME_FORMAT_TYPE:
+ case JS_LIST_FORMAT_TYPE:
+ case JS_LOCALE_TYPE:
+ case JS_NUMBER_FORMAT_TYPE:
+ case JS_PLURAL_RULES_TYPE:
+ case JS_RELATIVE_TIME_FORMAT_TYPE:
+ case JS_SEGMENT_ITERATOR_TYPE:
+ case JS_SEGMENTER_TYPE:
#endif // V8_INTL_SUPPORT
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
@@ -225,8 +225,8 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case JS_MODULE_NAMESPACE_TYPE:
case JS_ARRAY_BUFFER_TYPE:
case JS_ARRAY_ITERATOR_TYPE:
- case JS_REGEXP_TYPE: // TODO(rossberg): there should be a RegExp type.
- case JS_REGEXP_STRING_ITERATOR_TYPE:
+ case JS_REG_EXP_TYPE:
+ case JS_REG_EXP_STRING_ITERATOR_TYPE:
case JS_TYPED_ARRAY_TYPE:
case JS_DATA_VIEW_TYPE:
case JS_SET_TYPE:
@@ -244,12 +244,12 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case JS_WEAK_REF_TYPE:
case JS_WEAK_SET_TYPE:
case JS_PROMISE_TYPE:
- case WASM_EXCEPTION_TYPE:
- case WASM_GLOBAL_TYPE:
- case WASM_INSTANCE_TYPE:
- case WASM_MEMORY_TYPE:
- case WASM_MODULE_TYPE:
- case WASM_TABLE_TYPE:
+ case WASM_EXCEPTION_OBJECT_TYPE:
+ case WASM_GLOBAL_OBJECT_TYPE:
+ case WASM_INSTANCE_OBJECT_TYPE:
+ case WASM_MEMORY_OBJECT_TYPE:
+ case WASM_MODULE_OBJECT_TYPE:
+ case WASM_TABLE_OBJECT_TYPE:
case WEAK_CELL_TYPE:
DCHECK(!map.is_callable());
DCHECK(!map.is_undetectable());
@@ -365,7 +365,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case PROMISE_REJECT_REACTION_JOB_TASK_TYPE:
case PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE:
#define MAKE_TORQUE_CLASS_TYPE(V) case V:
- TORQUE_DEFINED_INSTANCE_TYPES(MAKE_TORQUE_CLASS_TYPE)
+ TORQUE_INTERNAL_INSTANCE_TYPES(MAKE_TORQUE_CLASS_TYPE)
#undef MAKE_TORQUE_CLASS_TYPE
UNREACHABLE();
}
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index 608d6ffee6..d7fdd4269e 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -732,6 +732,10 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
// Type is OtherObject.
CheckTypeIs(node, Type::OtherObject());
break;
+ case IrOpcode::kJSGetTemplateObject:
+ // Type is Array
+ CheckTypeIs(node, Type::Array());
+ break;
case IrOpcode::kJSLoadProperty:
// Type can be anything.
CheckTypeIs(node, Type::Any());
@@ -1594,12 +1598,14 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckTypeIs(node, Type::NonInternal());
break;
case IrOpcode::kLoadField:
+ case IrOpcode::kLoadMessage:
// Object -> fieldtype
// TODO(rossberg): activate once machine ops are typed.
// CheckValueInputIs(node, 0, Type::Object());
// CheckTypeIs(node, FieldAccessOf(node->op()).type));
break;
case IrOpcode::kLoadElement:
+ case IrOpcode::kLoadStackArgument:
// Object -> elementtype
// TODO(rossberg): activate once machine ops are typed.
// CheckValueInputIs(node, 0, Type::Object());
@@ -1613,6 +1619,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kLoadDataViewElement:
break;
case IrOpcode::kStoreField:
+ case IrOpcode::kStoreMessage:
// (Object, fieldtype) -> _|_
// TODO(rossberg): activate once machine ops are typed.
// CheckValueInputIs(node, 0, Type::Object());
@@ -1700,6 +1707,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kWord64Ctz:
case IrOpcode::kWord64ReverseBits:
case IrOpcode::kWord64ReverseBytes:
+ case IrOpcode::kSimd128ReverseBytes:
case IrOpcode::kInt64AbsWithOverflow:
case IrOpcode::kWord64Equal:
case IrOpcode::kInt32Add:
@@ -1801,7 +1809,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kBitcastInt32ToFloat32:
case IrOpcode::kBitcastInt64ToFloat64:
case IrOpcode::kBitcastTaggedToWord:
- case IrOpcode::kBitcastTaggedSignedToWord:
+ case IrOpcode::kBitcastTaggedToWordForTagAndSmiBits:
case IrOpcode::kBitcastWordToTagged:
case IrOpcode::kBitcastWordToTaggedSigned:
case IrOpcode::kBitcastWord32ToCompressedSigned:
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index 28f9943e59..ddc97ce503 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -176,8 +176,6 @@ WasmGraphBuilder::WasmGraphBuilder(
: zone_(zone),
mcgraph_(mcgraph),
env_(env),
- cur_buffer_(def_buffer_),
- cur_bufsize_(kDefaultBufferSize),
has_simd_(ContainsSimd(sig)),
untrusted_code_mitigations_(FLAG_untrusted_code_mitigations),
sig_(sig),
@@ -255,24 +253,19 @@ Node* WasmGraphBuilder::Merge(unsigned count, Node** controls) {
return graph()->NewNode(mcgraph()->common()->Merge(count), count, controls);
}
-Node* WasmGraphBuilder::Phi(wasm::ValueType type, unsigned count, Node** vals,
- Node* control) {
- DCHECK(IrOpcode::IsMergeOpcode(control->opcode()));
- Vector<Node*> buf = Realloc(vals, count, count + 1);
- buf[count] = control;
+Node* WasmGraphBuilder::Phi(wasm::ValueType type, unsigned count,
+ Node** vals_and_control) {
+ DCHECK(IrOpcode::IsMergeOpcode(vals_and_control[count]->opcode()));
return graph()->NewNode(
mcgraph()->common()->Phi(wasm::ValueTypes::MachineRepresentationFor(type),
count),
- count + 1, buf.begin());
+ count + 1, vals_and_control);
}
-Node* WasmGraphBuilder::EffectPhi(unsigned count, Node** effects,
- Node* control) {
- DCHECK(IrOpcode::IsMergeOpcode(control->opcode()));
- Vector<Node*> buf = Realloc(effects, count, count + 1);
- buf[count] = control;
+Node* WasmGraphBuilder::EffectPhi(unsigned count, Node** effects_and_control) {
+ DCHECK(IrOpcode::IsMergeOpcode(effects_and_control[count]->opcode()));
return graph()->NewNode(mcgraph()->common()->EffectPhi(count), count + 1,
- buf.begin());
+ effects_and_control);
}
Node* WasmGraphBuilder::RefNull() {
@@ -1114,6 +1107,10 @@ Node* WasmGraphBuilder::ZeroCheck64(wasm::TrapReason reason, Node* node,
}
Node* WasmGraphBuilder::Switch(unsigned count, Node* key) {
+ // The instruction selector will use {kArchTableSwitch} for large switches,
+ // which has limited input count, see {InstructionSelector::EmitTableSwitch}.
+ DCHECK_LE(count, Instruction::kMaxInputCount - 2); // value_range + 2
+ DCHECK_LE(count, wasm::kV8MaxWasmFunctionBrTableSize + 1); // plus IfDefault
return graph()->NewNode(mcgraph()->common()->Switch(count), key, Control());
}
@@ -1266,27 +1263,9 @@ Node* WasmGraphBuilder::BuildChangeEndiannessStore(
case 8:
result = graph()->NewNode(m->Word64ReverseBytes(), value);
break;
- case 16: {
- Node* byte_reversed_lanes[4];
- for (int lane = 0; lane < 4; lane++) {
- byte_reversed_lanes[lane] = graph()->NewNode(
- m->Word32ReverseBytes(),
- graph()->NewNode(mcgraph()->machine()->I32x4ExtractLane(lane),
- value));
- }
-
- // This is making a copy of the value.
- result =
- graph()->NewNode(mcgraph()->machine()->S128And(), value, value);
-
- for (int lane = 0; lane < 4; lane++) {
- result =
- graph()->NewNode(mcgraph()->machine()->I32x4ReplaceLane(3 - lane),
- result, byte_reversed_lanes[lane]);
- }
-
+ case 16:
+ result = graph()->NewNode(m->Simd128ReverseBytes(), value);
break;
- }
default:
UNREACHABLE();
break;
@@ -1405,27 +1384,9 @@ Node* WasmGraphBuilder::BuildChangeEndiannessLoad(Node* node,
case 8:
result = graph()->NewNode(m->Word64ReverseBytes(), value);
break;
- case 16: {
- Node* byte_reversed_lanes[4];
- for (int lane = 0; lane < 4; lane++) {
- byte_reversed_lanes[lane] = graph()->NewNode(
- m->Word32ReverseBytes(),
- graph()->NewNode(mcgraph()->machine()->I32x4ExtractLane(lane),
- value));
- }
-
- // This is making a copy of the value.
- result =
- graph()->NewNode(mcgraph()->machine()->S128And(), value, value);
-
- for (int lane = 0; lane < 4; lane++) {
- result =
- graph()->NewNode(mcgraph()->machine()->I32x4ReplaceLane(3 - lane),
- result, byte_reversed_lanes[lane]);
- }
-
+ case 16:
+ result = graph()->NewNode(m->Simd128ReverseBytes(), value);
break;
- }
default:
UNREACHABLE();
}
@@ -2295,13 +2256,14 @@ Node* WasmGraphBuilder::GetExceptionTag(Node* except_obj) {
return BuildCallToRuntime(Runtime::kWasmExceptionGetTag, &except_obj, 1);
}
-Vector<Node*> WasmGraphBuilder::GetExceptionValues(
- Node* except_obj, const wasm::WasmException* exception) {
+Node* WasmGraphBuilder::GetExceptionValues(Node* except_obj,
+ const wasm::WasmException* exception,
+ Vector<Node*> values) {
Node* values_array =
BuildCallToRuntime(Runtime::kWasmExceptionGetValues, &except_obj, 1);
uint32_t index = 0;
const wasm::WasmExceptionSig* sig = exception->sig;
- Vector<Node*> values = Buffer(sig->parameter_count());
+ DCHECK_EQ(sig->parameter_count(), values.size());
for (size_t i = 0; i < sig->parameter_count(); ++i) {
Node* value;
switch (sig->GetParam(i)) {
@@ -2347,7 +2309,7 @@ Vector<Node*> WasmGraphBuilder::GetExceptionValues(
values[i] = value;
}
DCHECK_EQ(index, WasmExceptionPackage::GetEncodedSize(exception));
- return values;
+ return values_array;
}
Node* WasmGraphBuilder::BuildI32DivS(Node* left, Node* right,
@@ -2682,7 +2644,8 @@ Node* WasmGraphBuilder::BuildCCall(MachineSignature* sig, Node* function,
return SetEffect(graph()->NewNode(op, arraysize(call_args), call_args));
}
-Node* WasmGraphBuilder::BuildCallNode(wasm::FunctionSig* sig, Node** args,
+Node* WasmGraphBuilder::BuildCallNode(wasm::FunctionSig* sig,
+ Vector<Node*> args,
wasm::WasmCodePosition position,
Node* instance_node, const Operator* op) {
if (instance_node == nullptr) {
@@ -2695,25 +2658,28 @@ Node* WasmGraphBuilder::BuildCallNode(wasm::FunctionSig* sig, Node** args,
const size_t count = 1 + params + extra;
// Reallocate the buffer to make space for extra inputs.
- args = Realloc(args, 1 + params, count).begin();
+ base::SmallVector<Node*, 16 + extra> inputs(count);
+ DCHECK_EQ(1 + params, args.size());
// Make room for the instance_node parameter at index 1, just after code.
- memmove(&args[2], &args[1], params * sizeof(Node*));
- args[1] = instance_node;
+ inputs[0] = args[0]; // code
+ inputs[1] = instance_node;
+ if (params > 0) memcpy(&inputs[2], &args[1], params * sizeof(Node*));
// Add effect and control inputs.
- args[params + 2] = Effect();
- args[params + 3] = Control();
+ inputs[params + 2] = Effect();
+ inputs[params + 3] = Control();
- Node* call = SetEffect(graph()->NewNode(op, static_cast<int>(count), args));
+ Node* call =
+ SetEffect(graph()->NewNode(op, static_cast<int>(count), inputs.begin()));
DCHECK(position == wasm::kNoCodePosition || position > 0);
if (position > 0) SetSourcePosition(call, position);
return call;
}
-Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args,
- Node*** rets,
+Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig,
+ Vector<Node*> args, Vector<Node*> rets,
wasm::WasmCodePosition position,
Node* instance_node,
UseRetpoline use_retpoline) {
@@ -2725,21 +2691,22 @@ Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args,
size_t ret_count = sig->return_count();
if (ret_count == 0) return call; // No return value.
- *rets = Buffer(ret_count).begin();
+ DCHECK_EQ(ret_count, rets.size());
if (ret_count == 1) {
// Only a single return value.
- (*rets)[0] = call;
+ rets[0] = call;
} else {
// Create projections for all return values.
for (size_t i = 0; i < ret_count; i++) {
- (*rets)[i] = graph()->NewNode(mcgraph()->common()->Projection(i), call,
- graph()->start());
+ rets[i] = graph()->NewNode(mcgraph()->common()->Projection(i), call,
+ graph()->start());
}
}
return call;
}
-Node* WasmGraphBuilder::BuildWasmReturnCall(wasm::FunctionSig* sig, Node** args,
+Node* WasmGraphBuilder::BuildWasmReturnCall(wasm::FunctionSig* sig,
+ Vector<Node*> args,
wasm::WasmCodePosition position,
Node* instance_node,
UseRetpoline use_retpoline) {
@@ -2753,8 +2720,8 @@ Node* WasmGraphBuilder::BuildWasmReturnCall(wasm::FunctionSig* sig, Node** args,
return call;
}
-Node* WasmGraphBuilder::BuildImportCall(wasm::FunctionSig* sig, Node** args,
- Node*** rets,
+Node* WasmGraphBuilder::BuildImportCall(wasm::FunctionSig* sig,
+ Vector<Node*> args, Vector<Node*> rets,
wasm::WasmCodePosition position,
int func_index,
IsReturnCall continuation) {
@@ -2779,13 +2746,13 @@ Node* WasmGraphBuilder::BuildImportCall(wasm::FunctionSig* sig, Node** args,
case kCallContinues:
return BuildWasmCall(sig, args, rets, position, ref_node, use_retpoline);
case kReturnCall:
- DCHECK_NULL(rets);
+ DCHECK(rets.empty());
return BuildWasmReturnCall(sig, args, position, ref_node, use_retpoline);
}
}
-Node* WasmGraphBuilder::BuildImportCall(wasm::FunctionSig* sig, Node** args,
- Node*** rets,
+Node* WasmGraphBuilder::BuildImportCall(wasm::FunctionSig* sig,
+ Vector<Node*> args, Vector<Node*> rets,
wasm::WasmCodePosition position,
Node* func_index,
IsReturnCall continuation) {
@@ -2829,12 +2796,13 @@ Node* WasmGraphBuilder::BuildImportCall(wasm::FunctionSig* sig, Node** args,
case kCallContinues:
return BuildWasmCall(sig, args, rets, position, ref_node, use_retpoline);
case kReturnCall:
- DCHECK_NULL(rets);
+ DCHECK(rets.empty());
return BuildWasmReturnCall(sig, args, position, ref_node, use_retpoline);
}
}
-Node* WasmGraphBuilder::CallDirect(uint32_t index, Node** args, Node*** rets,
+Node* WasmGraphBuilder::CallDirect(uint32_t index, Vector<Node*> args,
+ Vector<Node*> rets,
wasm::WasmCodePosition position) {
DCHECK_NULL(args[0]);
wasm::FunctionSig* sig = env_->module->functions[index].sig;
@@ -2853,7 +2821,7 @@ Node* WasmGraphBuilder::CallDirect(uint32_t index, Node** args, Node*** rets,
}
Node* WasmGraphBuilder::CallIndirect(uint32_t table_index, uint32_t sig_index,
- Node** args, Node*** rets,
+ Vector<Node*> args, Vector<Node*> rets,
wasm::WasmCodePosition position) {
return BuildIndirectCall(table_index, sig_index, args, rets, position,
kCallContinues);
@@ -2902,8 +2870,9 @@ void WasmGraphBuilder::LoadIndirectFunctionTable(uint32_t table_index,
}
Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
- uint32_t sig_index, Node** args,
- Node*** rets,
+ uint32_t sig_index,
+ Vector<Node*> args,
+ Vector<Node*> rets,
wasm::WasmCodePosition position,
IsReturnCall continuation) {
DCHECK_NOT_NULL(args[0]);
@@ -2993,14 +2962,14 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
}
}
-Node* WasmGraphBuilder::ReturnCall(uint32_t index, Node** args,
+Node* WasmGraphBuilder::ReturnCall(uint32_t index, Vector<Node*> args,
wasm::WasmCodePosition position) {
DCHECK_NULL(args[0]);
wasm::FunctionSig* sig = env_->module->functions[index].sig;
if (env_ && index < env_->module->num_imported_functions) {
// Return Call to an imported function.
- return BuildImportCall(sig, args, nullptr, position, index, kReturnCall);
+ return BuildImportCall(sig, args, {}, position, index, kReturnCall);
}
// A direct tail call to a wasm function defined in this module.
@@ -3013,9 +2982,10 @@ Node* WasmGraphBuilder::ReturnCall(uint32_t index, Node** args,
}
Node* WasmGraphBuilder::ReturnCallIndirect(uint32_t table_index,
- uint32_t sig_index, Node** args,
+ uint32_t sig_index,
+ Vector<Node*> args,
wasm::WasmCodePosition position) {
- return BuildIndirectCall(table_index, sig_index, args, nullptr, position,
+ return BuildIndirectCall(table_index, sig_index, args, {}, position,
kReturnCall);
}
@@ -3062,6 +3032,14 @@ bool CanCover(Node* value, IrOpcode::Value opcode) {
return true;
}
+Node* WasmGraphBuilder::BuildTruncateIntPtrToInt32(Node* value) {
+ if (mcgraph()->machine()->Is64()) {
+ value =
+ graph()->NewNode(mcgraph()->machine()->TruncateInt64ToInt32(), value);
+ }
+ return value;
+}
+
Node* WasmGraphBuilder::BuildChangeInt32ToIntPtr(Node* value) {
if (mcgraph()->machine()->Is64()) {
value = graph()->NewNode(mcgraph()->machine()->ChangeInt32ToInt64(), value);
@@ -3070,12 +3048,20 @@ Node* WasmGraphBuilder::BuildChangeInt32ToIntPtr(Node* value) {
}
Node* WasmGraphBuilder::BuildChangeInt32ToSmi(Node* value) {
+ if (COMPRESS_POINTERS_BOOL) {
+ return graph()->NewNode(mcgraph()->machine()->Word32Shl(), value,
+ BuildSmiShiftBitsConstant32());
+ }
value = BuildChangeInt32ToIntPtr(value);
return graph()->NewNode(mcgraph()->machine()->WordShl(), value,
BuildSmiShiftBitsConstant());
}
Node* WasmGraphBuilder::BuildChangeUint31ToSmi(Node* value) {
+ if (COMPRESS_POINTERS_BOOL) {
+ return graph()->NewNode(mcgraph()->machine()->Word32Shl(), value,
+ BuildSmiShiftBitsConstant32());
+ }
return graph()->NewNode(mcgraph()->machine()->WordShl(),
Uint32ToUintptr(value), BuildSmiShiftBitsConstant());
}
@@ -3084,16 +3070,32 @@ Node* WasmGraphBuilder::BuildSmiShiftBitsConstant() {
return mcgraph()->IntPtrConstant(kSmiShiftSize + kSmiTagSize);
}
+Node* WasmGraphBuilder::BuildSmiShiftBitsConstant32() {
+ return mcgraph()->Int32Constant(kSmiShiftSize + kSmiTagSize);
+}
+
Node* WasmGraphBuilder::BuildChangeSmiToInt32(Node* value) {
- value = graph()->NewNode(mcgraph()->machine()->WordSar(), value,
- BuildSmiShiftBitsConstant());
- if (mcgraph()->machine()->Is64()) {
+ if (COMPRESS_POINTERS_BOOL) {
value =
graph()->NewNode(mcgraph()->machine()->TruncateInt64ToInt32(), value);
+ value = graph()->NewNode(mcgraph()->machine()->Word32Sar(), value,
+ BuildSmiShiftBitsConstant32());
+ } else {
+ value = BuildChangeSmiToIntPtr(value);
+ value = BuildTruncateIntPtrToInt32(value);
}
return value;
}
+Node* WasmGraphBuilder::BuildChangeSmiToIntPtr(Node* value) {
+ if (COMPRESS_POINTERS_BOOL) {
+ value = BuildChangeSmiToInt32(value);
+ return BuildChangeInt32ToIntPtr(value);
+ }
+ return graph()->NewNode(mcgraph()->machine()->WordSar(), value,
+ BuildSmiShiftBitsConstant());
+}
+
Node* WasmGraphBuilder::BuildConvertUint32ToSmiWithSaturation(Node* value,
uint32_t maxval) {
DCHECK(Smi::IsValid(maxval));
@@ -3181,14 +3183,16 @@ Node* WasmGraphBuilder::CreateOrMergeIntoPhi(MachineRepresentation rep,
if (IsPhiWithMerge(tnode, merge)) {
AppendToPhi(tnode, fnode);
} else if (tnode != fnode) {
+ // Note that it is not safe to use {Buffer} here since this method is used
+ // via {CheckForException} while the {Buffer} is in use by another method.
uint32_t count = merge->InputCount();
// + 1 for the merge node.
- Vector<Node*> vals = Buffer(count + 1);
- for (uint32_t j = 0; j < count - 1; j++) vals[j] = tnode;
- vals[count - 1] = fnode;
- vals[count] = merge;
- return graph()->NewNode(mcgraph()->common()->Phi(rep, count), count + 1,
- vals.begin());
+ base::SmallVector<Node*, 9> inputs(count + 1);
+ for (uint32_t j = 0; j < count - 1; j++) inputs[j] = tnode;
+ inputs[count - 1] = fnode;
+ inputs[count] = merge;
+ tnode = graph()->NewNode(mcgraph()->common()->Phi(rep, count), count + 1,
+ inputs.begin());
}
return tnode;
}
@@ -3198,13 +3202,18 @@ Node* WasmGraphBuilder::CreateOrMergeIntoEffectPhi(Node* merge, Node* tnode,
if (IsPhiWithMerge(tnode, merge)) {
AppendToPhi(tnode, fnode);
} else if (tnode != fnode) {
+ // Note that it is not safe to use {Buffer} here since this method is used
+ // via {CheckForException} while the {Buffer} is in use by another method.
uint32_t count = merge->InputCount();
- Vector<Node*> effects = Buffer(count);
+ // + 1 for the merge node.
+ base::SmallVector<Node*, 9> inputs(count + 1);
for (uint32_t j = 0; j < count - 1; j++) {
- effects[j] = tnode;
+ inputs[j] = tnode;
}
- effects[count - 1] = fnode;
- tnode = EffectPhi(count, effects.begin(), merge);
+ inputs[count - 1] = fnode;
+ inputs[count] = merge;
+ tnode = graph()->NewNode(mcgraph()->common()->EffectPhi(count), count + 1,
+ inputs.begin());
}
return tnode;
}
@@ -3310,10 +3319,7 @@ Node* WasmGraphBuilder::CurrentMemoryPages() {
Node* result =
graph()->NewNode(mcgraph()->machine()->WordShr(), mem_size,
mcgraph()->Int32Constant(wasm::kWasmPageSizeLog2));
- if (mcgraph()->machine()->Is64()) {
- result =
- graph()->NewNode(mcgraph()->machine()->TruncateInt64ToInt32(), result);
- }
+ result = BuildTruncateIntPtrToInt32(result);
return result;
}
@@ -3365,7 +3371,7 @@ Node* WasmGraphBuilder::BuildCallToRuntime(Runtime::FunctionId f,
parameter_count, effect_, Control());
}
-Node* WasmGraphBuilder::GetGlobal(uint32_t index) {
+Node* WasmGraphBuilder::GlobalGet(uint32_t index) {
const wasm::WasmGlobal& global = env_->module->globals[index];
if (wasm::ValueTypes::IsReferenceType(global.type)) {
if (global.mutability && global.imported) {
@@ -3395,7 +3401,7 @@ Node* WasmGraphBuilder::GetGlobal(uint32_t index) {
return result;
}
-Node* WasmGraphBuilder::SetGlobal(uint32_t index, Node* val) {
+Node* WasmGraphBuilder::GlobalSet(uint32_t index, Node* val) {
const wasm::WasmGlobal& global = env_->module->globals[index];
if (wasm::ValueTypes::IsReferenceType(global.type)) {
if (global.mutability && global.imported) {
@@ -4008,6 +4014,8 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
return graph()->NewNode(mcgraph()->machine()->F64x2Abs(), inputs[0]);
case wasm::kExprF64x2Neg:
return graph()->NewNode(mcgraph()->machine()->F64x2Neg(), inputs[0]);
+ case wasm::kExprF64x2Sqrt:
+ return graph()->NewNode(mcgraph()->machine()->F64x2Sqrt(), inputs[0]);
case wasm::kExprF64x2Add:
return graph()->NewNode(mcgraph()->machine()->F64x2Add(), inputs[0],
inputs[1]);
@@ -4044,6 +4052,12 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprF64x2Ge:
return graph()->NewNode(mcgraph()->machine()->F64x2Le(), inputs[1],
inputs[0]);
+ case wasm::kExprF64x2Qfma:
+ return graph()->NewNode(mcgraph()->machine()->F64x2Qfma(), inputs[0],
+ inputs[1], inputs[2]);
+ case wasm::kExprF64x2Qfms:
+ return graph()->NewNode(mcgraph()->machine()->F64x2Qfms(), inputs[0],
+ inputs[1], inputs[2]);
case wasm::kExprF32x4Splat:
return graph()->NewNode(mcgraph()->machine()->F32x4Splat(), inputs[0]);
case wasm::kExprF32x4SConvertI32x4:
@@ -4056,6 +4070,8 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
return graph()->NewNode(mcgraph()->machine()->F32x4Abs(), inputs[0]);
case wasm::kExprF32x4Neg:
return graph()->NewNode(mcgraph()->machine()->F32x4Neg(), inputs[0]);
+ case wasm::kExprF32x4Sqrt:
+ return graph()->NewNode(mcgraph()->machine()->F32x4Sqrt(), inputs[0]);
case wasm::kExprF32x4RecipApprox:
return graph()->NewNode(mcgraph()->machine()->F32x4RecipApprox(),
inputs[0]);
@@ -4101,6 +4117,12 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprF32x4Ge:
return graph()->NewNode(mcgraph()->machine()->F32x4Le(), inputs[1],
inputs[0]);
+ case wasm::kExprF32x4Qfma:
+ return graph()->NewNode(mcgraph()->machine()->F32x4Qfma(), inputs[0],
+ inputs[1], inputs[2]);
+ case wasm::kExprF32x4Qfms:
+ return graph()->NewNode(mcgraph()->machine()->F32x4Qfms(), inputs[0],
+ inputs[1], inputs[2]);
case wasm::kExprI64x2Splat:
return graph()->NewNode(mcgraph()->machine()->I64x2Splat(), inputs[0]);
case wasm::kExprI64x2Neg:
@@ -4459,6 +4481,9 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
return graph()->NewNode(mcgraph()->machine()->S1x16AnyTrue(), inputs[0]);
case wasm::kExprS1x16AllTrue:
return graph()->NewNode(mcgraph()->machine()->S1x16AllTrue(), inputs[0]);
+ case wasm::kExprS8x16Swizzle:
+ return graph()->NewNode(mcgraph()->machine()->S8x16Swizzle(), inputs[0],
+ inputs[1]);
default:
FATAL_UNSUPPORTED_OPCODE(opcode);
}
@@ -4492,13 +4517,23 @@ Node* WasmGraphBuilder::SimdLaneOp(wasm::WasmOpcode opcode, uint8_t lane,
case wasm::kExprI32x4ReplaceLane:
return graph()->NewNode(mcgraph()->machine()->I32x4ReplaceLane(lane),
inputs[0], inputs[1]);
- case wasm::kExprI16x8ExtractLane:
+ case wasm::kExprI16x8ExtractLaneS:
+ return graph()->NewNode(
+ mcgraph()->machine()->SignExtendWord16ToInt32(),
+ graph()->NewNode(mcgraph()->machine()->I16x8ExtractLane(lane),
+ inputs[0]));
+ case wasm::kExprI16x8ExtractLaneU:
return graph()->NewNode(mcgraph()->machine()->I16x8ExtractLane(lane),
inputs[0]);
case wasm::kExprI16x8ReplaceLane:
return graph()->NewNode(mcgraph()->machine()->I16x8ReplaceLane(lane),
inputs[0], inputs[1]);
- case wasm::kExprI8x16ExtractLane:
+ case wasm::kExprI8x16ExtractLaneS:
+ return graph()->NewNode(
+ mcgraph()->machine()->SignExtendWord8ToInt32(),
+ graph()->NewNode(mcgraph()->machine()->I8x16ExtractLane(lane),
+ inputs[0]));
+ case wasm::kExprI8x16ExtractLaneU:
return graph()->NewNode(mcgraph()->machine()->I8x16ExtractLane(lane),
inputs[0]);
case wasm::kExprI8x16ReplaceLane:
@@ -5076,7 +5111,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
CallDescriptor* GetI64ToBigIntCallDescriptor() {
if (!lowering_special_case_) {
- lowering_special_case_ = base::make_unique<Int64LoweringSpecialCase>();
+ lowering_special_case_ = std::make_unique<Int64LoweringSpecialCase>();
}
if (lowering_special_case_->i64_to_bigint_call_descriptor) {
@@ -5112,7 +5147,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
CallDescriptor* GetBigIntToI64CallDescriptor() {
if (!lowering_special_case_) {
- lowering_special_case_ = base::make_unique<Int64LoweringSpecialCase>();
+ lowering_special_case_ = std::make_unique<Int64LoweringSpecialCase>();
}
if (lowering_special_case_->bigint_to_i64_call_descriptor) {
@@ -5613,7 +5648,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* function_index_smi = LOAD_RAW(
function_data,
WasmExportedFunctionData::kFunctionIndexOffset - kHeapObjectTag,
- MachineType::TypeCompressedTagged());
+ MachineType::TypeCompressedTaggedSigned());
Node* function_index = BuildChangeSmiToInt32(function_index_smi);
return function_index;
}
@@ -5622,13 +5657,30 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* jump_table_offset_smi = LOAD_RAW(
function_data,
WasmExportedFunctionData::kJumpTableOffsetOffset - kHeapObjectTag,
- MachineType::TypeCompressedTagged());
- Node* jump_table_offset = BuildChangeSmiToInt32(jump_table_offset_smi);
+ MachineType::TypeCompressedTaggedSigned());
+ Node* jump_table_offset = BuildChangeSmiToIntPtr(jump_table_offset_smi);
return jump_table_offset;
}
+ Node* BuildMultiReturnFixedArrayFromIterable(const wasm::FunctionSig* sig,
+ Node* iterable, Node* context) {
+ Node* iterable_to_fixed_array =
+ BuildLoadBuiltinFromIsolateRoot(Builtins::kIterableToFixedArrayForWasm);
+ IterableToFixedArrayForWasmDescriptor interface_descriptor;
+ Node* length = BuildChangeUint31ToSmi(
+ Uint32Constant(static_cast<uint32_t>(sig->return_count())));
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ mcgraph()->zone(), interface_descriptor,
+ interface_descriptor.GetStackParameterCount(), CallDescriptor::kNoFlags,
+ Operator::kNoProperties, StubCallMode::kCallCodeObject);
+ return SetEffect(graph()->NewNode(
+ mcgraph()->common()->Call(call_descriptor), iterable_to_fixed_array,
+ iterable, length, context, Effect(), Control()));
+ }
+
void BuildJSToWasmWrapper(bool is_import) {
const int wasm_count = static_cast<int>(sig_->parameter_count());
+ const int rets_count = static_cast<int>(sig_->return_count());
// Build the start and the JS parameter nodes.
SetEffect(SetControl(Start(wasm_count + 5)));
@@ -5662,8 +5714,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
}
const int args_count = wasm_count + 1; // +1 for wasm_code.
- Vector<Node*> args = Buffer(args_count);
- Node** rets;
+ base::SmallVector<Node*, 16> args(args_count);
+ base::SmallVector<Node*, 1> rets(rets_count);
// Convert JS parameters to wasm numbers.
for (int i = 0; i < wasm_count; ++i) {
@@ -5680,8 +5732,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// Load function index from {WasmExportedFunctionData}.
Node* function_index =
BuildLoadFunctionIndexFromExportedFunctionData(function_data);
- BuildImportCall(sig_, args.begin(), &rets, wasm::kNoCodePosition,
- function_index, kCallContinues);
+ BuildImportCall(sig_, VectorOf(args), VectorOf(rets),
+ wasm::kNoCodePosition, function_index, kCallContinues);
} else {
// Call to a wasm function defined in this module.
// The call target is the jump table slot for that function.
@@ -5693,8 +5745,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
mcgraph()->machine()->IntAdd(), jump_table_start, jump_table_offset);
args[0] = jump_table_slot;
- BuildWasmCall(sig_, args.begin(), &rets, wasm::kNoCodePosition, nullptr,
- kNoRetpoline);
+ BuildWasmCall(sig_, VectorOf(args), VectorOf(rets), wasm::kNoCodePosition,
+ nullptr, kNoRetpoline);
}
// Clear the ThreadInWasm flag.
@@ -5765,7 +5817,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
sloppy_receiver = false;
V8_FALLTHROUGH; // fallthru
case WasmImportCallKind::kJSFunctionArityMatchSloppy: {
- Vector<Node*> args = Buffer(wasm_count + 7);
+ base::SmallVector<Node*, 16> args(wasm_count + 7);
int pos = 0;
Node* function_context =
LOAD_RAW(callable_node,
@@ -5785,7 +5837,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
graph()->zone(), false, wasm_count + 1, CallDescriptor::kNoFlags);
// Convert wasm numbers to JS values.
- pos = AddArgumentNodes(args, pos, wasm_count, sig_);
+ pos = AddArgumentNodes(VectorOf(args), pos, wasm_count, sig_);
args[pos++] = undefined_node; // new target
args[pos++] = mcgraph()->Int32Constant(wasm_count); // argument count
@@ -5805,7 +5857,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
sloppy_receiver = false;
V8_FALLTHROUGH; // fallthru
case WasmImportCallKind::kJSFunctionArityMismatchSloppy: {
- Vector<Node*> args = Buffer(wasm_count + 9);
+ base::SmallVector<Node*, 16> args(wasm_count + 9);
int pos = 0;
Node* function_context =
LOAD_RAW(callable_node,
@@ -5852,7 +5904,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
flags, Operator::kNoProperties);
// Convert wasm numbers to JS values.
- pos = AddArgumentNodes(args, pos, wasm_count, sig_);
+ pos = AddArgumentNodes(VectorOf(args), pos, wasm_count, sig_);
args[pos++] = function_context;
args[pos++] = Effect();
args[pos++] = Control();
@@ -5866,7 +5918,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// === General case of unknown callable ==================================
// =======================================================================
case WasmImportCallKind::kUseCallBuiltin: {
- Vector<Node*> args = Buffer(wasm_count + 7);
+ base::SmallVector<Node*, 16> args(wasm_count + 7);
int pos = 0;
args[pos++] =
BuildLoadBuiltinFromIsolateRoot(Builtins::kCall_ReceiverIsAny);
@@ -5879,7 +5931,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
CallDescriptor::kNoFlags, Operator::kNoProperties);
// Convert wasm numbers to JS values.
- pos = AddArgumentNodes(args, pos, wasm_count, sig_);
+ pos = AddArgumentNodes(VectorOf(args), pos, wasm_count, sig_);
// The native_context is sufficient here, because all kind of callables
// which depend on the context provide their own context. The context
@@ -5903,15 +5955,24 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
SetEffect(call);
SetSourcePosition(call, 0);
- // Convert the return value back.
- Node* val = sig_->return_count() == 0
- ? mcgraph()->Int32Constant(0)
- : FromJS(call, native_context, sig_->GetReturn());
-
- // Set the ThreadInWasm flag again.
- BuildModifyThreadInWasmFlag(true);
-
- Return(val);
+ // Convert the return value(s) back.
+ if (sig_->return_count() <= 1) {
+ Node* val = sig_->return_count() == 0
+ ? mcgraph()->Int32Constant(0)
+ : FromJS(call, native_context, sig_->GetReturn());
+ BuildModifyThreadInWasmFlag(true);
+ Return(val);
+ } else {
+ Node* fixed_array =
+ BuildMultiReturnFixedArrayFromIterable(sig_, call, native_context);
+ base::SmallVector<Node*, 8> wasm_values(sig_->return_count());
+ for (unsigned i = 0; i < sig_->return_count(); ++i) {
+ wasm_values[i] = FromJS(LOAD_FIXED_ARRAY_SLOT_ANY(fixed_array, i),
+ native_context, sig_->GetReturn(i));
+ }
+ BuildModifyThreadInWasmFlag(true);
+ Return(VectorOf(wasm_values));
+ }
if (ContainsInt64(sig_)) LowerInt64(kCalledFromWasm);
return true;
@@ -6006,7 +6067,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
if (return_count == 0) {
Return(Int32Constant(0));
} else {
- Vector<Node*> returns = Buffer(return_count);
+ base::SmallVector<Node*, 8> returns(return_count);
offset = 0;
for (size_t i = 0; i < return_count; ++i) {
wasm::ValueType type = sig_->GetReturn(i);
@@ -6016,7 +6077,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
returns[i] = val;
offset += wasm::ValueTypes::ElementSizeInBytes(type);
}
- Return(returns);
+ Return(VectorOf(returns));
}
if (ContainsInt64(sig_)) LowerInt64(kCalledFromWasm);
@@ -6078,7 +6139,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
if (return_count == 0) {
Return(Int32Constant(0));
} else {
- Vector<Node*> returns = Buffer(return_count);
+ base::SmallVector<Node*, 8> returns(return_count);
offset = 0;
for (size_t i = 0; i < return_count; ++i) {
wasm::ValueType type = sig_->GetReturn(i);
@@ -6088,7 +6149,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
returns[i] = val;
offset += wasm::ValueTypes::ElementSizeInBytes(type);
}
- Return(returns);
+ Return(VectorOf(returns));
}
if (ContainsInt64(sig_)) LowerInt64(kCalledFromWasm);
@@ -6130,10 +6191,10 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
wasm::ObjectAccess::ToTagged(WasmJSFunctionData::kCallableOffset));
// Call the underlying closure.
- Vector<Node*> args = Buffer(wasm_count + 7);
+ base::SmallVector<Node*, 16> args(wasm_count + 7);
int pos = 0;
- args[pos++] = graph()->NewNode(mcgraph()->common()->HeapConstant(
- BUILTIN_CODE(isolate, Call_ReceiverIsAny)));
+ args[pos++] =
+ BuildLoadBuiltinFromIsolateRoot(Builtins::kCall_ReceiverIsAny);
args[pos++] = callable;
args[pos++] = mcgraph()->Int32Constant(wasm_count); // argument count
args[pos++] = BuildLoadUndefinedValueFromInstance(); // receiver
@@ -6158,14 +6219,30 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* call = SetEffect(graph()->NewNode(
mcgraph()->common()->Call(call_descriptor), pos, args.begin()));
- // TODO(wasm): Extend this to support multi-return.
- DCHECK_LE(sig_->return_count(), 1);
-
// Convert return JS values to wasm numbers and back to JS values.
- Node* jsval =
- sig_->return_count() == 0
- ? BuildLoadUndefinedValueFromInstance()
- : ToJS(FromJS(call, context, sig_->GetReturn()), sig_->GetReturn());
+ Node* jsval;
+ if (sig_->return_count() == 0) {
+ jsval = BuildLoadUndefinedValueFromInstance();
+ } else if (sig_->return_count() == 1) {
+ jsval = ToJS(FromJS(call, context, sig_->GetReturn()), sig_->GetReturn());
+ } else {
+ Node* fixed_array =
+ BuildMultiReturnFixedArrayFromIterable(sig_, call, context);
+ int32_t return_count = static_cast<int32_t>(sig_->return_count());
+ Node* size =
+ graph()->NewNode(mcgraph()->common()->NumberConstant(return_count));
+ Node* result_fixed_array =
+ BuildCallToRuntime(Runtime::kWasmNewMultiReturnFixedArray, &size, 1);
+ for (unsigned i = 0; i < sig_->return_count(); ++i) {
+ const auto& type = sig_->GetReturn(i);
+ Node* elem = LOAD_FIXED_ARRAY_SLOT_ANY(fixed_array, i);
+ Node* cast = ToJS(FromJS(elem, context, type), type);
+ STORE_FIXED_ARRAY_SLOT_ANY(result_fixed_array, i, cast);
+ }
+ jsval = BuildCallToRuntimeWithContext(Runtime::kWasmNewMultiReturnJSArray,
+ context, &result_fixed_array, 1,
+ effect_, Control());
+ }
Return(jsval);
}
@@ -6184,7 +6261,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
kNoWriteBarrier);
int wasm_arg_count = static_cast<int>(sig_->parameter_count());
- Vector<Node*> args = Buffer(wasm_arg_count + 4);
+ base::SmallVector<Node*, 16> args(wasm_arg_count + 4);
int pos = 0;
args[pos++] = code_entry;
@@ -6222,14 +6299,12 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
pos = 0;
offset = 0;
for (wasm::ValueType type : sig_->returns()) {
- StoreRepresentation store_rep(
- wasm::ValueTypes::MachineRepresentationFor(type), kNoWriteBarrier);
Node* value = sig_->return_count() == 1
? call
: graph()->NewNode(mcgraph()->common()->Projection(pos),
call, Control());
- SetEffect(graph()->NewNode(mcgraph()->machine()->Store(store_rep),
- arg_buffer, Int32Constant(offset), value,
+ SetEffect(graph()->NewNode(GetSafeStoreOperator(offset, type), arg_buffer,
+ Int32Constant(offset), value,
Effect(), Control()));
offset += wasm::ValueTypes::ElementSizeInBytes(type);
pos++;
@@ -6287,7 +6362,7 @@ std::unique_ptr<OptimizedCompilationJob> NewJSToWasmCompilationJob(
// Create the Graph.
//----------------------------------------------------------------------------
std::unique_ptr<Zone> zone =
- base::make_unique<Zone>(wasm_engine->allocator(), ZONE_NAME);
+ std::make_unique<Zone>(wasm_engine->allocator(), ZONE_NAME);
Graph* graph = new (zone.get()) Graph(zone.get());
CommonOperatorBuilder common(zone.get());
MachineOperatorBuilder machine(
@@ -6702,7 +6777,7 @@ wasm::WasmCompilationResult CompileWasmInterpreterEntry(
MaybeHandle<Code> CompileJSToJSWrapper(Isolate* isolate,
wasm::FunctionSig* sig) {
std::unique_ptr<Zone> zone =
- base::make_unique<Zone>(isolate->allocator(), ZONE_NAME);
+ std::make_unique<Zone>(isolate->allocator(), ZONE_NAME);
Graph* graph = new (zone.get()) Graph(zone.get());
CommonOperatorBuilder common(zone.get());
MachineOperatorBuilder machine(
@@ -6749,7 +6824,7 @@ MaybeHandle<Code> CompileJSToJSWrapper(Isolate* isolate,
MaybeHandle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig) {
std::unique_ptr<Zone> zone =
- base::make_unique<Zone>(isolate->allocator(), ZONE_NAME);
+ std::make_unique<Zone>(isolate->allocator(), ZONE_NAME);
Graph* graph = new (zone.get()) Graph(zone.get());
CommonOperatorBuilder common(zone.get());
MachineOperatorBuilder machine(
@@ -6916,6 +6991,7 @@ wasm::WasmCompilationResult ExecuteTurbofanWasmCompilation(
counters->wasm_compile_function_peak_memory_bytes()->AddSample(
static_cast<int>(mcgraph->graph()->zone()->allocation_size()));
auto result = info.ReleaseWasmCompilationResult();
+ CHECK_NOT_NULL(result); // Compilation expected to succeed.
DCHECK_EQ(wasm::ExecutionTier::kTurbofan, result->result_tier);
return std::move(*result);
}
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index dd86ea1499..de0ca58c23 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -179,16 +179,6 @@ class WasmGraphBuilder {
wasm::CompilationEnv* env, Zone* zone, MachineGraph* mcgraph,
wasm::FunctionSig* sig, compiler::SourcePositionTable* spt = nullptr);
- Vector<Node*> Buffer(size_t count) {
- if (count > cur_bufsize_) {
- size_t new_size = count + cur_bufsize_ + 5;
- cur_buffer_ =
- reinterpret_cast<Node**>(zone_->New(new_size * sizeof(Node*)));
- cur_bufsize_ = new_size;
- }
- return {cur_buffer_, count};
- }
-
//-----------------------------------------------------------------------
// Operations independent of {control} or {effect}.
//-----------------------------------------------------------------------
@@ -199,11 +189,11 @@ class WasmGraphBuilder {
Node* TerminateLoop(Node* effect, Node* control);
Node* TerminateThrow(Node* effect, Node* control);
Node* Merge(unsigned count, Node** controls);
- Node* Phi(wasm::ValueType type, unsigned count, Node** vals, Node* control);
+ Node* Phi(wasm::ValueType type, unsigned count, Node** vals_and_control);
Node* CreateOrMergeIntoPhi(MachineRepresentation rep, Node* merge,
Node* tnode, Node* fnode);
Node* CreateOrMergeIntoEffectPhi(Node* merge, Node* tnode, Node* fnode);
- Node* EffectPhi(unsigned count, Node** effects, Node* control);
+ Node* EffectPhi(unsigned count, Node** effects_and_control);
Node* RefNull();
Node* RefFunc(uint32_t function_index);
Node* Uint32Constant(uint32_t value);
@@ -223,8 +213,9 @@ class WasmGraphBuilder {
Node* ExceptionTagEqual(Node* caught_tag, Node* expected_tag);
Node* LoadExceptionTagFromTable(uint32_t exception_index);
Node* GetExceptionTag(Node* except_obj);
- Vector<Node*> GetExceptionValues(Node* except_obj,
- const wasm::WasmException* exception);
+ Node* GetExceptionValues(Node* except_obj,
+ const wasm::WasmException* exception,
+ Vector<Node*> values_out);
bool IsPhiWithMerge(Node* phi, Node* merge);
bool ThrowsException(Node* node, Node** if_success, Node** if_exception);
void AppendToMerge(Node* merge, Node* from);
@@ -275,20 +266,21 @@ class WasmGraphBuilder {
}
Node* Unreachable(wasm::WasmCodePosition position);
- Node* CallDirect(uint32_t index, Node** args, Node*** rets,
+ Node* CallDirect(uint32_t index, Vector<Node*> args, Vector<Node*> rets,
wasm::WasmCodePosition position);
- Node* CallIndirect(uint32_t table_index, uint32_t sig_index, Node** args,
- Node*** rets, wasm::WasmCodePosition position);
+ Node* CallIndirect(uint32_t table_index, uint32_t sig_index,
+ Vector<Node*> args, Vector<Node*> rets,
+ wasm::WasmCodePosition position);
- Node* ReturnCall(uint32_t index, Node** args,
+ Node* ReturnCall(uint32_t index, Vector<Node*> args,
wasm::WasmCodePosition position);
Node* ReturnCallIndirect(uint32_t table_index, uint32_t sig_index,
- Node** args, wasm::WasmCodePosition position);
+ Vector<Node*> args, wasm::WasmCodePosition position);
Node* Invert(Node* node);
- Node* GetGlobal(uint32_t index);
- Node* SetGlobal(uint32_t index, Node* val);
+ Node* GlobalGet(uint32_t index);
+ Node* GlobalSet(uint32_t index, Node* val);
Node* TableGet(uint32_t table_index, Node* index,
wasm::WasmCodePosition position);
Node* TableSet(uint32_t table_index, Node* index, Node* val,
@@ -427,8 +419,6 @@ class WasmGraphBuilder {
void RemoveBytecodePositionDecorator();
protected:
- static const int kDefaultBufferSize = 16;
-
Zone* const zone_;
MachineGraph* const mcgraph_;
wasm::CompilationEnv* const env_;
@@ -444,9 +434,6 @@ class WasmGraphBuilder {
SetOncePointer<Node> isolate_root_node_;
SetOncePointer<const Operator> stack_check_call_operator_;
- Node** cur_buffer_;
- size_t cur_bufsize_;
- Node* def_buffer_[kDefaultBufferSize];
bool has_simd_ = false;
bool needs_stack_check_ = false;
const bool untrusted_code_mitigations_ = true;
@@ -496,28 +483,29 @@ class WasmGraphBuilder {
template <typename... Args>
Node* BuildCCall(MachineSignature* sig, Node* function, Args... args);
- Node* BuildCallNode(wasm::FunctionSig* sig, Node** args,
+ Node* BuildCallNode(wasm::FunctionSig* sig, Vector<Node*> args,
wasm::WasmCodePosition position, Node* instance_node,
const Operator* op);
// Helper function for {BuildIndirectCall}.
void LoadIndirectFunctionTable(uint32_t table_index, Node** ift_size,
Node** ift_sig_ids, Node** ift_targets,
Node** ift_instances);
- Node* BuildIndirectCall(uint32_t table_index, uint32_t sig_index, Node** args,
- Node*** rets, wasm::WasmCodePosition position,
+ Node* BuildIndirectCall(uint32_t table_index, uint32_t sig_index,
+ Vector<Node*> args, Vector<Node*> rets,
+ wasm::WasmCodePosition position,
IsReturnCall continuation);
- Node* BuildWasmCall(wasm::FunctionSig* sig, Node** args, Node*** rets,
- wasm::WasmCodePosition position, Node* instance_node,
- UseRetpoline use_retpoline);
- Node* BuildWasmReturnCall(wasm::FunctionSig* sig, Node** args,
+ Node* BuildWasmCall(wasm::FunctionSig* sig, Vector<Node*> args,
+ Vector<Node*> rets, wasm::WasmCodePosition position,
+ Node* instance_node, UseRetpoline use_retpoline);
+ Node* BuildWasmReturnCall(wasm::FunctionSig* sig, Vector<Node*> args,
wasm::WasmCodePosition position,
Node* instance_node, UseRetpoline use_retpoline);
- Node* BuildImportCall(wasm::FunctionSig* sig, Node** args, Node*** rets,
- wasm::WasmCodePosition position, int func_index,
- IsReturnCall continuation);
- Node* BuildImportCall(wasm::FunctionSig* sig, Node** args, Node*** rets,
- wasm::WasmCodePosition position, Node* func_index,
- IsReturnCall continuation);
+ Node* BuildImportCall(wasm::FunctionSig* sig, Vector<Node*> args,
+ Vector<Node*> rets, wasm::WasmCodePosition position,
+ int func_index, IsReturnCall continuation);
+ Node* BuildImportCall(wasm::FunctionSig* sig, Vector<Node*> args,
+ Vector<Node*> rets, wasm::WasmCodePosition position,
+ Node* func_index, IsReturnCall continuation);
Node* BuildF32CopySign(Node* left, Node* right);
Node* BuildF64CopySign(Node* left, Node* right);
@@ -574,11 +562,14 @@ class WasmGraphBuilder {
MachineType result_type, wasm::TrapReason trap_zero,
wasm::WasmCodePosition position);
+ Node* BuildTruncateIntPtrToInt32(Node* value);
Node* BuildChangeInt32ToIntPtr(Node* value);
Node* BuildChangeInt32ToSmi(Node* value);
Node* BuildChangeUint31ToSmi(Node* value);
Node* BuildSmiShiftBitsConstant();
+ Node* BuildSmiShiftBitsConstant32();
Node* BuildChangeSmiToInt32(Node* value);
+ Node* BuildChangeSmiToIntPtr(Node* value);
// generates {index > max ? Smi(max) : Smi(index)}
Node* BuildConvertUint32ToSmiWithSaturation(Node* index, uint32_t maxval);
@@ -599,15 +590,8 @@ class WasmGraphBuilder {
Node* BuildDecodeException32BitValue(Node* values_array, uint32_t* index);
Node* BuildDecodeException64BitValue(Node* values_array, uint32_t* index);
- Vector<Node*> Realloc(Node* const* buffer, size_t old_count,
- size_t new_count) {
- DCHECK_GE(new_count, old_count); // Only support growing.
- Vector<Node*> buf = Buffer(new_count);
- if (buf.begin() != buffer) {
- memcpy(buf.begin(), buffer, old_count * sizeof(Node*));
- }
- return buf;
- }
+ Node* BuildMultiReturnFixedArrayFromIterable(const wasm::FunctionSig* sig,
+ Node* iterable, Node* context);
//-----------------------------------------------------------------------
// Operations involving the CEntry, a dependency we want to remove
diff --git a/deps/v8/src/compiler/zone-stats.h b/deps/v8/src/compiler/zone-stats.h
index 63d58eb99f..68036f116d 100644
--- a/deps/v8/src/compiler/zone-stats.h
+++ b/deps/v8/src/compiler/zone-stats.h
@@ -33,6 +33,8 @@ class V8_EXPORT_PRIVATE ZoneStats final {
zone_ = nullptr;
}
+ ZoneStats* zone_stats() const { return zone_stats_; }
+
private:
const char* zone_name_;
ZoneStats* const zone_stats_;
diff --git a/deps/v8/src/d8/OWNERS b/deps/v8/src/d8/OWNERS
index ff3b6d7372..0f3e3d8e5c 100644
--- a/deps/v8/src/d8/OWNERS
+++ b/deps/v8/src/d8/OWNERS
@@ -1,5 +1,5 @@
binji@chromium.org
bmeurer@chromium.org
-clemensh@chromium.org
+clemensb@chromium.org
verwaest@chromium.org
yangguo@chromium.org
diff --git a/deps/v8/src/d8/d8-platforms.cc b/deps/v8/src/d8/d8-platforms.cc
index 42ce14f4f7..8faf893c80 100644
--- a/deps/v8/src/d8/d8-platforms.cc
+++ b/deps/v8/src/d8/d8-platforms.cc
@@ -11,7 +11,6 @@
#include "src/base/platform/mutex.h"
#include "src/base/platform/platform.h"
#include "src/base/platform/time.h"
-#include "src/base/template-utils.h"
#include "src/base/utils/random-number-generator.h"
#include "src/d8/d8-platforms.h"
@@ -94,7 +93,7 @@ class PredictablePlatform : public Platform {
std::unique_ptr<Platform> MakePredictablePlatform(
std::unique_ptr<Platform> platform) {
- return base::make_unique<PredictablePlatform>(std::move(platform));
+ return std::make_unique<PredictablePlatform>(std::move(platform));
}
class DelayedTasksPlatform : public Platform {
@@ -284,14 +283,14 @@ class DelayedTasksPlatform : public Platform {
}
std::unique_ptr<Task> MakeDelayedTask(std::unique_ptr<Task> task) {
- return base::make_unique<DelayedTask>(std::move(task),
- GetRandomDelayInMilliseconds());
+ return std::make_unique<DelayedTask>(std::move(task),
+ GetRandomDelayInMilliseconds());
}
std::unique_ptr<IdleTask> MakeDelayedIdleTask(
std::unique_ptr<IdleTask> task) {
- return base::make_unique<DelayedIdleTask>(std::move(task),
- GetRandomDelayInMilliseconds());
+ return std::make_unique<DelayedIdleTask>(std::move(task),
+ GetRandomDelayInMilliseconds());
}
DISALLOW_COPY_AND_ASSIGN(DelayedTasksPlatform);
@@ -300,10 +299,10 @@ class DelayedTasksPlatform : public Platform {
std::unique_ptr<Platform> MakeDelayedTasksPlatform(
std::unique_ptr<Platform> platform, int64_t random_seed) {
if (random_seed) {
- return base::make_unique<DelayedTasksPlatform>(std::move(platform),
- random_seed);
+ return std::make_unique<DelayedTasksPlatform>(std::move(platform),
+ random_seed);
}
- return base::make_unique<DelayedTasksPlatform>(std::move(platform));
+ return std::make_unique<DelayedTasksPlatform>(std::move(platform));
}
} // namespace v8
diff --git a/deps/v8/src/d8/d8.cc b/deps/v8/src/d8/d8.cc
index 13a35b0cd3..33f2b70b14 100644
--- a/deps/v8/src/d8/d8.cc
+++ b/deps/v8/src/d8/d8.cc
@@ -71,6 +71,11 @@
#define CHECK(condition) assert(condition)
#endif
+#define TRACE_BS(...) \
+ do { \
+ if (i::FLAG_trace_backing_store) PrintF(__VA_ARGS__); \
+ } while (false)
+
namespace v8 {
namespace {
@@ -213,12 +218,20 @@ static Local<Value> Throw(Isolate* isolate, const char* message) {
.ToLocalChecked());
}
-static Local<Value> GetValue(v8::Isolate* isolate, Local<Context> context,
- Local<v8::Object> object, const char* property) {
+static MaybeLocal<Value> TryGetValue(v8::Isolate* isolate,
+ Local<Context> context,
+ Local<v8::Object> object,
+ const char* property) {
Local<String> v8_str =
String::NewFromUtf8(isolate, property, NewStringType::kNormal)
- .ToLocalChecked();
- return object->Get(context, v8_str).ToLocalChecked();
+ .FromMaybe(Local<String>());
+ if (v8_str.IsEmpty()) return Local<Value>();
+ return object->Get(context, v8_str);
+}
+
+static Local<Value> GetValue(v8::Isolate* isolate, Local<Context> context,
+ Local<v8::Object> object, const char* property) {
+ return TryGetValue(isolate, context, object, property).ToLocalChecked();
}
Worker* GetWorkerFromInternalField(Isolate* isolate, Local<Object> object) {
@@ -333,7 +346,6 @@ Global<Function> Shell::stringify_function_;
base::LazyMutex Shell::workers_mutex_;
bool Shell::allow_new_workers_ = true;
std::unordered_set<std::shared_ptr<Worker>> Shell::running_workers_;
-std::vector<ExternalizedContents> Shell::externalized_contents_;
std::atomic<bool> Shell::script_executed_{false};
base::LazyMutex Shell::isolate_status_lock_;
std::map<v8::Isolate*, bool> Shell::isolate_status_;
@@ -377,7 +389,7 @@ class BackgroundCompileThread : public base::Thread {
BackgroundCompileThread(Isolate* isolate, Local<String> source)
: base::Thread(GetThreadOptions("BackgroundCompileThread")),
source_(source),
- streamed_source_(base::make_unique<DummySourceStream>(source, isolate),
+ streamed_source_(std::make_unique<DummySourceStream>(source, isolate),
v8::ScriptCompiler::StreamedSource::UTF8),
task_(v8::ScriptCompiler::StartStreamingScript(isolate,
&streamed_source_)) {}
@@ -740,8 +752,60 @@ struct DynamicImportData {
Global<Promise::Resolver> resolver;
};
+struct ModuleResolutionData {
+ ModuleResolutionData(Isolate* isolate_, Local<Value> module_namespace_,
+ Local<Promise::Resolver> resolver_)
+ : isolate(isolate_) {
+ module_namespace.Reset(isolate, module_namespace_);
+ resolver.Reset(isolate, resolver_);
+ }
+
+ Isolate* isolate;
+ Global<Value> module_namespace;
+ Global<Promise::Resolver> resolver;
+};
+
} // namespace
+void Shell::ModuleResolutionSuccessCallback(
+ const FunctionCallbackInfo<Value>& info) {
+ std::unique_ptr<ModuleResolutionData> module_resolution_data(
+ static_cast<ModuleResolutionData*>(
+ info.Data().As<v8::External>()->Value()));
+ Isolate* isolate(module_resolution_data->isolate);
+ HandleScope handle_scope(isolate);
+
+ Local<Promise::Resolver> resolver(
+ module_resolution_data->resolver.Get(isolate));
+ Local<Value> module_namespace(
+ module_resolution_data->module_namespace.Get(isolate));
+
+ PerIsolateData* data = PerIsolateData::Get(isolate);
+ Local<Context> realm = data->realms_[data->realm_current_].Get(isolate);
+ Context::Scope context_scope(realm);
+
+ resolver->Resolve(realm, module_namespace).ToChecked();
+}
+
+void Shell::ModuleResolutionFailureCallback(
+ const FunctionCallbackInfo<Value>& info) {
+ std::unique_ptr<ModuleResolutionData> module_resolution_data(
+ static_cast<ModuleResolutionData*>(
+ info.Data().As<v8::External>()->Value()));
+ Isolate* isolate(module_resolution_data->isolate);
+ HandleScope handle_scope(isolate);
+
+ Local<Promise::Resolver> resolver(
+ module_resolution_data->resolver.Get(isolate));
+
+ PerIsolateData* data = PerIsolateData::Get(isolate);
+ Local<Context> realm = data->realms_[data->realm_current_].Get(isolate);
+ Context::Scope context_scope(realm);
+
+ DCHECK_EQ(info.Length(), 1);
+ resolver->Reject(realm, info[0]).ToChecked();
+}
+
MaybeLocal<Promise> Shell::HostImportModuleDynamically(
Local<Context> context, Local<ScriptOrModule> referrer,
Local<String> specifier) {
@@ -829,19 +893,44 @@ void Shell::DoHostImportModuleDynamically(void* import_data) {
if (root_module->InstantiateModule(realm, ResolveModuleCallback)
.FromMaybe(false)) {
maybe_result = root_module->Evaluate(realm);
+ CHECK_IMPLIES(i::FLAG_harmony_top_level_await, !maybe_result.IsEmpty());
EmptyMessageQueues(isolate);
}
- Local<Value> module;
- if (!maybe_result.ToLocal(&module)) {
+ Local<Value> result;
+ if (!maybe_result.ToLocal(&result)) {
DCHECK(try_catch.HasCaught());
resolver->Reject(realm, try_catch.Exception()).ToChecked();
return;
}
- DCHECK(!try_catch.HasCaught());
Local<Value> module_namespace = root_module->GetModuleNamespace();
- resolver->Resolve(realm, module_namespace).ToChecked();
+ if (i::FLAG_harmony_top_level_await) {
+ Local<Promise> result_promise(Local<Promise>::Cast(result));
+ if (result_promise->State() == Promise::kRejected) {
+ resolver->Reject(realm, result_promise->Result()).ToChecked();
+ return;
+ }
+
+ // Setup callbacks, and then chain them to the result promise.
+ // ModuleResolutionData will be deleted by the callbacks.
+ auto module_resolution_data =
+ new ModuleResolutionData(isolate, module_namespace, resolver);
+ Local<v8::External> edata = External::New(isolate, module_resolution_data);
+ Local<Function> callback_success;
+ CHECK(Function::New(realm, ModuleResolutionSuccessCallback, edata)
+ .ToLocal(&callback_success));
+ Local<Function> callback_failure;
+ CHECK(Function::New(realm, ModuleResolutionFailureCallback, edata)
+ .ToLocal(&callback_failure));
+ result_promise->Then(realm, callback_success, callback_failure)
+ .ToLocalChecked();
+ } else {
+ // TODO(joshualitt): Clean up exception handling after introucing new
+ // API for evaluating async modules.
+ DCHECK(!try_catch.HasCaught());
+ resolver->Resolve(realm, module_namespace).ToChecked();
+ }
}
bool Shell::ExecuteModule(Isolate* isolate, const char* file_name) {
@@ -857,7 +946,6 @@ bool Shell::ExecuteModule(Isolate* isolate, const char* file_name) {
try_catch.SetVerbose(true);
Local<Module> root_module;
- MaybeLocal<Value> maybe_exception;
if (!FetchModuleTree(realm, absolute_path).ToLocal(&root_module)) {
CHECK(try_catch.HasCaught());
@@ -869,6 +957,7 @@ bool Shell::ExecuteModule(Isolate* isolate, const char* file_name) {
if (root_module->InstantiateModule(realm, ResolveModuleCallback)
.FromMaybe(false)) {
maybe_result = root_module->Evaluate(realm);
+ CHECK_IMPLIES(i::FLAG_harmony_top_level_await, !maybe_result.IsEmpty());
EmptyMessageQueues(isolate);
}
Local<Value> result;
@@ -878,6 +967,30 @@ bool Shell::ExecuteModule(Isolate* isolate, const char* file_name) {
ReportException(isolate, &try_catch);
return false;
}
+ if (i::FLAG_harmony_top_level_await) {
+ // Loop until module execution finishes
+ // TODO(joshualitt): This is a bit wonky. "Real" engines would not be
+ // able to just busy loop waiting for execution to finish.
+ Local<Promise> result_promise(Local<Promise>::Cast(result));
+ while (result_promise->State() == Promise::kPending) {
+ isolate->RunMicrotasks();
+ }
+
+ if (result_promise->State() == Promise::kRejected) {
+ // If the exception has been caught by the promise pipeline, we rethrow
+ // here in order to ReportException.
+ // TODO(joshualitt): Clean this up after we create a new API for the case
+ // where TLA is enabled.
+ if (!try_catch.HasCaught()) {
+ isolate->ThrowException(result_promise->Result());
+ } else {
+ DCHECK_EQ(try_catch.Exception(), result_promise->Result());
+ }
+ ReportException(isolate, &try_catch);
+ return false;
+ }
+ }
+
DCHECK(!try_catch.HasCaught());
return true;
}
@@ -984,6 +1097,27 @@ void Shell::PerformanceNow(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
}
+// performance.measureMemory() implements JavaScript Memory API proposal.
+// See https://github.com/ulan/javascript-agent-memory/blob/master/explainer.md.
+void Shell::PerformanceMeasureMemory(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::MeasureMemoryMode mode = v8::MeasureMemoryMode::kSummary;
+ v8::Isolate* isolate = args.GetIsolate();
+ Local<Context> context = isolate->GetCurrentContext();
+ if (args.Length() >= 1 && args[0]->IsObject()) {
+ Local<Object> object = args[0].As<Object>();
+ Local<Value> value = TryGetValue(isolate, context, object, "detailed")
+ .FromMaybe(Local<Value>());
+ if (!value.IsEmpty() && value->IsBoolean() &&
+ value->BooleanValue(isolate)) {
+ mode = v8::MeasureMemoryMode::kDetailed;
+ }
+ }
+ v8::MaybeLocal<v8::Promise> result =
+ args.GetIsolate()->MeasureMemory(context, mode);
+ args.GetReturnValue().Set(result.FromMaybe(v8::Local<v8::Promise>()));
+}
+
// Realm.current() returns the index of the currently active realm.
void Shell::RealmCurrent(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
@@ -1820,6 +1954,10 @@ Local<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
String::NewFromUtf8(isolate, "now", NewStringType::kNormal)
.ToLocalChecked(),
FunctionTemplate::New(isolate, PerformanceNow));
+ performance_template->Set(
+ String::NewFromUtf8(isolate, "measureMemory", NewStringType::kNormal)
+ .ToLocalChecked(),
+ FunctionTemplate::New(isolate, PerformanceMeasureMemory));
global_template->Set(
String::NewFromUtf8(isolate, "performance", NewStringType::kNormal)
.ToLocalChecked(),
@@ -2364,6 +2502,33 @@ class InspectorClient : public v8_inspector::V8InspectorClient {
context_.Reset(isolate_, context);
}
+ void runMessageLoopOnPause(int contextGroupId) override {
+ v8::Isolate::AllowJavascriptExecutionScope allow_script(isolate_);
+ v8::HandleScope handle_scope(isolate_);
+ Local<String> callback_name =
+ v8::String::NewFromUtf8(isolate_, "handleInspectorMessage",
+ v8::NewStringType::kNormal)
+ .ToLocalChecked();
+ Local<Context> context = context_.Get(isolate_);
+ Local<Value> callback =
+ context->Global()->Get(context, callback_name).ToLocalChecked();
+ if (!callback->IsFunction()) return;
+
+ v8::TryCatch try_catch(isolate_);
+ is_paused = true;
+
+ while (is_paused) {
+ USE(Local<Function>::Cast(callback)->Call(context, Undefined(isolate_), 0,
+ {}));
+ if (try_catch.HasCaught()) {
+ Shell::ReportException(isolate_, &try_catch);
+ is_paused = false;
+ }
+ }
+ }
+
+ void quitMessageLoopOnPause() override { is_paused = false; }
+
private:
static v8_inspector::V8InspectorSession* GetSession(Local<Context> context) {
InspectorClient* inspector_client = static_cast<InspectorClient*>(
@@ -2402,6 +2567,7 @@ class InspectorClient : public v8_inspector::V8InspectorClient {
std::unique_ptr<v8_inspector::V8Inspector> inspector_;
std::unique_ptr<v8_inspector::V8InspectorSession> session_;
std::unique_ptr<v8_inspector::V8Inspector::Channel> channel_;
+ bool is_paused = false;
Global<Context> context_;
Isolate* isolate_;
};
@@ -2548,12 +2714,6 @@ void SourceGroup::JoinThread() {
thread_->Join();
}
-ExternalizedContents::~ExternalizedContents() {
- if (data_ != nullptr) {
- deleter_(data_, length_, deleter_data_);
- }
-}
-
void SerializationDataQueue::Enqueue(std::unique_ptr<SerializationData> data) {
base::MutexGuard lock_guard(&mutex_);
data_.push_back(std::move(data));
@@ -2841,9 +3001,6 @@ bool Shell::SetOptions(int argc, char* argv[]) {
options.icu_locale = argv[i] + 13;
argv[i] = nullptr;
#ifdef V8_USE_EXTERNAL_STARTUP_DATA
- } else if (strncmp(argv[i], "--natives_blob=", 15) == 0) {
- options.natives_blob = argv[i] + 15;
- argv[i] = nullptr;
} else if (strncmp(argv[i], "--snapshot_blob=", 16) == 0) {
options.snapshot_blob = argv[i] + 16;
argv[i] = nullptr;
@@ -3133,11 +3290,10 @@ class Serializer : public ValueSerializer::Delegate {
std::unique_ptr<SerializationData> Release() { return std::move(data_); }
- void AppendExternalizedContentsTo(std::vector<ExternalizedContents>* to) {
- to->insert(to->end(),
- std::make_move_iterator(externalized_contents_.begin()),
- std::make_move_iterator(externalized_contents_.end()));
- externalized_contents_.clear();
+ void AppendBackingStoresTo(std::vector<std::shared_ptr<BackingStore>>* to) {
+ to->insert(to->end(), std::make_move_iterator(backing_stores_.begin()),
+ std::make_move_iterator(backing_stores_.end()));
+ backing_stores_.clear();
}
protected:
@@ -3157,8 +3313,8 @@ class Serializer : public ValueSerializer::Delegate {
size_t index = shared_array_buffers_.size();
shared_array_buffers_.emplace_back(isolate_, shared_array_buffer);
- data_->shared_array_buffer_contents_.push_back(
- MaybeExternalize(shared_array_buffer));
+ data_->sab_backing_stores_.push_back(
+ shared_array_buffer->GetBackingStore());
return Just<uint32_t>(static_cast<uint32_t>(index));
}
@@ -3173,7 +3329,7 @@ class Serializer : public ValueSerializer::Delegate {
size_t index = wasm_modules_.size();
wasm_modules_.emplace_back(isolate_, module);
- data_->transferrable_modules_.push_back(module->GetTransferrableModule());
+ data_->compiled_wasm_modules_.push_back(module->GetCompiledModule());
return Just<uint32_t>(static_cast<uint32_t>(index));
}
@@ -3229,17 +3385,6 @@ class Serializer : public ValueSerializer::Delegate {
}
}
- template <typename T>
- typename T::Contents MaybeExternalize(Local<T> array_buffer) {
- if (array_buffer->IsExternal()) {
- return array_buffer->GetContents();
- } else {
- typename T::Contents contents = array_buffer->Externalize();
- externalized_contents_.emplace_back(contents);
- return contents;
- }
- }
-
Maybe<bool> FinalizeTransfer() {
for (const auto& global_array_buffer : array_buffers_) {
Local<ArrayBuffer> array_buffer =
@@ -3249,9 +3394,12 @@ class Serializer : public ValueSerializer::Delegate {
return Nothing<bool>();
}
- ArrayBuffer::Contents contents = MaybeExternalize(array_buffer);
+ auto backing_store = array_buffer->GetBackingStore();
+ if (!array_buffer->IsExternal()) {
+ array_buffer->Externalize(backing_store);
+ }
+ data_->backing_stores_.push_back(std::move(backing_store));
array_buffer->Detach();
- data_->array_buffer_contents_.push_back(contents);
}
return Just(true);
@@ -3263,7 +3411,7 @@ class Serializer : public ValueSerializer::Delegate {
std::vector<Global<ArrayBuffer>> array_buffers_;
std::vector<Global<SharedArrayBuffer>> shared_array_buffers_;
std::vector<Global<WasmModuleObject>> wasm_modules_;
- std::vector<ExternalizedContents> externalized_contents_;
+ std::vector<std::shared_ptr<v8::BackingStore>> backing_stores_;
size_t current_memory_usage_;
DISALLOW_COPY_AND_ASSIGN(Serializer);
@@ -3285,9 +3433,9 @@ class Deserializer : public ValueDeserializer::Delegate {
}
uint32_t index = 0;
- for (const auto& contents : data_->array_buffer_contents()) {
+ for (const auto& backing_store : data_->backing_stores()) {
Local<ArrayBuffer> array_buffer =
- ArrayBuffer::New(isolate_, contents.Data(), contents.ByteLength());
+ ArrayBuffer::New(isolate_, std::move(backing_store));
deserializer_.TransferArrayBuffer(index++, array_buffer);
}
@@ -3297,11 +3445,9 @@ class Deserializer : public ValueDeserializer::Delegate {
MaybeLocal<SharedArrayBuffer> GetSharedArrayBufferFromId(
Isolate* isolate, uint32_t clone_id) override {
DCHECK_NOT_NULL(data_);
- if (clone_id < data_->shared_array_buffer_contents().size()) {
- const SharedArrayBuffer::Contents contents =
- data_->shared_array_buffer_contents().at(clone_id);
- return SharedArrayBuffer::New(isolate_, contents.Data(),
- contents.ByteLength());
+ if (clone_id < data_->sab_backing_stores().size()) {
+ return SharedArrayBuffer::New(
+ isolate_, std::move(data_->sab_backing_stores().at(clone_id)));
}
return MaybeLocal<SharedArrayBuffer>();
}
@@ -3309,11 +3455,9 @@ class Deserializer : public ValueDeserializer::Delegate {
MaybeLocal<WasmModuleObject> GetWasmModuleFromId(
Isolate* isolate, uint32_t transfer_id) override {
DCHECK_NOT_NULL(data_);
- if (transfer_id < data_->transferrable_modules().size()) {
- return WasmModuleObject::FromTransferrableModule(
- isolate_, data_->transferrable_modules().at(transfer_id));
- }
- return MaybeLocal<WasmModuleObject>();
+ if (transfer_id >= data_->compiled_wasm_modules().size()) return {};
+ return WasmModuleObject::FromCompiledModule(
+ isolate_, data_->compiled_wasm_modules().at(transfer_id));
}
private:
@@ -3333,9 +3477,6 @@ std::unique_ptr<SerializationData> Shell::SerializeValue(
if (serializer.WriteValue(context, value, transfer).To(&ok)) {
data = serializer.Release();
}
- // Append externalized contents even when WriteValue fails.
- base::MutexGuard lock_guard(workers_mutex_.Pointer());
- serializer.AppendExternalizedContentsTo(&externalized_contents_);
return data;
}
@@ -3377,7 +3518,6 @@ void Shell::WaitForRunningWorkers() {
base::MutexGuard lock_guard(workers_mutex_.Pointer());
DCHECK(running_workers_.empty());
allow_new_workers_ = true;
- externalized_contents_.clear();
}
int Shell::Main(int argc, char* argv[]) {
@@ -3402,7 +3542,7 @@ int Shell::Main(int argc, char* argv[]) {
std::unique_ptr<platform::tracing::TracingController> tracing;
std::ofstream trace_file;
if (options.trace_enabled && !i::FLAG_verify_predictable) {
- tracing = base::make_unique<platform::tracing::TracingController>();
+ tracing = std::make_unique<platform::tracing::TracingController>();
trace_file.open(options.trace_path ? options.trace_path : "v8_trace.json");
DCHECK(trace_file.good());
@@ -3447,9 +3587,8 @@ int Shell::Main(int argc, char* argv[]) {
}
v8::V8::InitializePlatform(g_platform.get());
v8::V8::Initialize();
- if (options.natives_blob || options.snapshot_blob) {
- v8::V8::InitializeExternalStartupData(options.natives_blob,
- options.snapshot_blob);
+ if (options.snapshot_blob) {
+ v8::V8::InitializeExternalStartupDataFromFile(options.snapshot_blob);
} else {
v8::V8::InitializeExternalStartupData(argv[0]);
}
@@ -3626,3 +3765,4 @@ int main(int argc, char* argv[]) { return v8::Shell::Main(argc, argv); }
#undef CHECK
#undef DCHECK
+#undef TRACE_BS
diff --git a/deps/v8/src/d8/d8.h b/deps/v8/src/d8/d8.h
index 04fc5f5d34..458bad858a 100644
--- a/deps/v8/src/d8/d8.h
+++ b/deps/v8/src/d8/d8.h
@@ -111,70 +111,20 @@ class SourceGroup {
int end_offset_;
};
-// The backing store of an ArrayBuffer or SharedArrayBuffer, after
-// Externalize() has been called on it.
-class ExternalizedContents {
- public:
- explicit ExternalizedContents(const ArrayBuffer::Contents& contents)
- : data_(contents.Data()),
- length_(contents.ByteLength()),
- deleter_(contents.Deleter()),
- deleter_data_(contents.DeleterData()) {}
- explicit ExternalizedContents(const SharedArrayBuffer::Contents& contents)
- : data_(contents.Data()),
- length_(contents.ByteLength()),
- deleter_(contents.Deleter()),
- deleter_data_(contents.DeleterData()) {}
- ExternalizedContents(ExternalizedContents&& other) V8_NOEXCEPT
- : data_(other.data_),
- length_(other.length_),
- deleter_(other.deleter_),
- deleter_data_(other.deleter_data_) {
- other.data_ = nullptr;
- other.length_ = 0;
- other.deleter_ = nullptr;
- other.deleter_data_ = nullptr;
- }
- ExternalizedContents& operator=(ExternalizedContents&& other) V8_NOEXCEPT {
- if (this != &other) {
- data_ = other.data_;
- length_ = other.length_;
- deleter_ = other.deleter_;
- deleter_data_ = other.deleter_data_;
- other.data_ = nullptr;
- other.length_ = 0;
- other.deleter_ = nullptr;
- other.deleter_data_ = nullptr;
- }
- return *this;
- }
- ~ExternalizedContents();
-
- private:
- void* data_;
- size_t length_;
- ArrayBuffer::Contents::DeleterCallback deleter_;
- void* deleter_data_;
-
- DISALLOW_COPY_AND_ASSIGN(ExternalizedContents);
-};
-
class SerializationData {
public:
SerializationData() : size_(0) {}
uint8_t* data() { return data_.get(); }
size_t size() { return size_; }
- const std::vector<ArrayBuffer::Contents>& array_buffer_contents() {
- return array_buffer_contents_;
+ const std::vector<std::shared_ptr<v8::BackingStore>>& backing_stores() {
+ return backing_stores_;
}
- const std::vector<SharedArrayBuffer::Contents>&
- shared_array_buffer_contents() {
- return shared_array_buffer_contents_;
+ const std::vector<std::shared_ptr<v8::BackingStore>>& sab_backing_stores() {
+ return sab_backing_stores_;
}
- const std::vector<WasmModuleObject::TransferrableModule>&
- transferrable_modules() {
- return transferrable_modules_;
+ const std::vector<CompiledWasmModule>& compiled_wasm_modules() {
+ return compiled_wasm_modules_;
}
private:
@@ -184,9 +134,9 @@ class SerializationData {
std::unique_ptr<uint8_t, DataDeleter> data_;
size_t size_;
- std::vector<ArrayBuffer::Contents> array_buffer_contents_;
- std::vector<SharedArrayBuffer::Contents> shared_array_buffer_contents_;
- std::vector<WasmModuleObject::TransferrableModule> transferrable_modules_;
+ std::vector<std::shared_ptr<v8::BackingStore>> backing_stores_;
+ std::vector<std::shared_ptr<v8::BackingStore>> sab_backing_stores_;
+ std::vector<CompiledWasmModule> compiled_wasm_modules_;
private:
friend class Serializer;
@@ -334,7 +284,6 @@ class ShellOptions {
SourceGroup* isolate_sources = nullptr;
const char* icu_data_file = nullptr;
const char* icu_locale = nullptr;
- const char* natives_blob = nullptr;
const char* snapshot_blob = nullptr;
bool trace_enabled = false;
const char* trace_path = nullptr;
@@ -389,6 +338,8 @@ class Shell : public i::AllStatic {
static void MapCounters(v8::Isolate* isolate, const char* name);
static void PerformanceNow(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void PerformanceMeasureMemory(
+ const v8::FunctionCallbackInfo<v8::Value>& args);
static void RealmCurrent(const v8::FunctionCallbackInfo<v8::Value>& args);
static void RealmOwner(const v8::FunctionCallbackInfo<v8::Value>& args);
@@ -473,6 +424,10 @@ class Shell : public i::AllStatic {
static MaybeLocal<Promise> HostImportModuleDynamically(
Local<Context> context, Local<ScriptOrModule> referrer,
Local<String> specifier);
+ static void ModuleResolutionSuccessCallback(
+ const v8::FunctionCallbackInfo<v8::Value>& info);
+ static void ModuleResolutionFailureCallback(
+ const v8::FunctionCallbackInfo<v8::Value>& info);
static void HostInitializeImportMetaObject(Local<Context> context,
Local<Module> module,
Local<Object> meta);
@@ -519,7 +474,6 @@ class Shell : public i::AllStatic {
static base::LazyMutex workers_mutex_; // Guards the following members.
static bool allow_new_workers_;
static std::unordered_set<std::shared_ptr<Worker>> running_workers_;
- static std::vector<ExternalizedContents> externalized_contents_;
// Multiple isolates may update this flag concurrently.
static std::atomic<bool> script_executed_;
diff --git a/deps/v8/src/debug/debug-coverage.cc b/deps/v8/src/debug/debug-coverage.cc
index cb466ab6ab..5f368683f2 100644
--- a/deps/v8/src/debug/debug-coverage.cc
+++ b/deps/v8/src/debug/debug-coverage.cc
@@ -577,11 +577,15 @@ struct SharedFunctionInfoAndCount {
// Sort by:
// - start, ascending.
// - end, descending.
- // - count, ascending.
+ // - info.is_toplevel() first
+ // - count, descending.
bool operator<(const SharedFunctionInfoAndCount& that) const {
if (this->start != that.start) return this->start < that.start;
if (this->end != that.end) return this->end > that.end;
- return this->count < that.count;
+ if (this->info.is_toplevel() != that.info.is_toplevel()) {
+ return this->info.is_toplevel();
+ }
+ return this->count > that.count;
}
SharedFunctionInfo info;
@@ -653,12 +657,30 @@ std::unique_ptr<Coverage> Coverage::Collect(
// Find the correct outer function based on start position.
//
- // This is not robust when considering two functions with identical source
- // ranges. In this case, it is unclear which function is the inner / outer
- // function. Above, we ensure that such functions are sorted in ascending
- // `count` order, so at least our `parent_is_covered` optimization below
- // should be fine.
- // TODO(jgruber): Consider removing the optimization.
+ // This is, in general, not robust when considering two functions with
+ // identical source ranges; then the notion of inner and outer is unclear.
+ // Identical source ranges arise when the source range of top-most entity
+ // (e.g. function) in the script is identical to the whole script, e.g.
+ // <script>function foo() {}<script>. The script has its own shared
+ // function info, which has the same source range as the SFI for `foo`.
+ // Node.js creates an additional wrapper for scripts (again with identical
+ // source range) and those wrappers will have a call count of zero even if
+ // the wrapped script was executed (see v8:9212). We mitigate this issue
+ // by sorting top-level SFIs first among SFIs with the same source range:
+ // This ensures top-level SFIs are processed first. If a top-level SFI has
+ // a non-zero call count, it gets recorded due to `function_is_relevant`
+ // below (e.g. script wrappers), while top-level SFIs with zero call count
+ // do not get reported (this ensures node's extra wrappers do not get
+ // reported). If two SFIs with identical source ranges get reported, we
+ // report them in decreasing order of call count, as in all known cases
+ // this corresponds to the nesting order. In the case of the script tag
+ // example above, we report the zero call count of `foo` last. As it turns
+ // out, embedders started to rely on functions being reported in nesting
+ // order.
+ // TODO(jgruber): Investigate whether it is possible to remove node's
+ // extra top-level wrapper script, or change its source range, or ensure
+ // that it follows the invariant that nesting order is descending count
+ // order for SFIs with identical source ranges.
while (!nesting.empty() && functions->at(nesting.back()).end <= start) {
nesting.pop_back();
}
diff --git a/deps/v8/src/debug/debug-coverage.h b/deps/v8/src/debug/debug-coverage.h
index 9c1f0bcc2c..81b178181a 100644
--- a/deps/v8/src/debug/debug-coverage.h
+++ b/deps/v8/src/debug/debug-coverage.h
@@ -5,6 +5,7 @@
#ifndef V8_DEBUG_DEBUG_COVERAGE_H_
#define V8_DEBUG_DEBUG_COVERAGE_H_
+#include <memory>
#include <vector>
#include "src/debug/debug-interface.h"
diff --git a/deps/v8/src/debug/debug-evaluate.cc b/deps/v8/src/debug/debug-evaluate.cc
index 203885143f..3568860e7d 100644
--- a/deps/v8/src/debug/debug-evaluate.cc
+++ b/deps/v8/src/debug/debug-evaluate.cc
@@ -23,9 +23,13 @@ namespace internal {
MaybeHandle<Object> DebugEvaluate::Global(Isolate* isolate,
Handle<String> source,
- bool throw_on_side_effect) {
+ debug::EvaluateGlobalMode mode) {
// Disable breaks in side-effect free mode.
- DisableBreak disable_break_scope(isolate->debug(), throw_on_side_effect);
+ DisableBreak disable_break_scope(
+ isolate->debug(),
+ mode == debug::EvaluateGlobalMode::kDisableBreaks ||
+ mode ==
+ debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect);
Handle<Context> context = isolate->native_context();
ScriptOriginOptions origin_options(false, true);
@@ -42,11 +46,15 @@ MaybeHandle<Object> DebugEvaluate::Global(Isolate* isolate,
Handle<JSFunction> fun =
isolate->factory()->NewFunctionFromSharedFunctionInfo(shared_info,
context);
- if (throw_on_side_effect) isolate->debug()->StartSideEffectCheckMode();
+ if (mode == debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect) {
+ isolate->debug()->StartSideEffectCheckMode();
+ }
MaybeHandle<Object> result = Execution::Call(
isolate, fun, Handle<JSObject>(context->global_proxy(), isolate), 0,
nullptr);
- if (throw_on_side_effect) isolate->debug()->StopSideEffectCheckMode();
+ if (mode == debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect) {
+ isolate->debug()->StopSideEffectCheckMode();
+ }
return result;
}
@@ -174,31 +182,31 @@ DebugEvaluate::ContextBuilder::ContextBuilder(Isolate* isolate,
// - To make stack-allocated variables visible, we materialize them and
// use a debug-evaluate context to wrap both the materialized object and
// the original context.
- // - We use the original context chain from the function context to the
- // native context.
+ // - We also wrap all contexts on the chain between the original context
+ // and the function context.
// - Between the function scope and the native context, we only resolve
- // variable names that the current function already uses. Only for these
- // names we can be sure that they will be correctly resolved. For the
- // rest, we only resolve to with, script, and native contexts. We use a
- // whitelist to implement that.
+ // variable names that are guaranteed to not be shadowed by stack-allocated
+ // variables. Contexts between the function context and the original
+ // context have a blacklist attached to implement that.
// Context::Lookup has special handling for debug-evaluate contexts:
// - Look up in the materialized stack variables.
+ // - Check the blacklist to find out whether to abort further lookup.
// - Look up in the original context.
- // - Check the whitelist to find out whether to skip contexts during lookup.
- for (; scope_iterator_.InInnerScope(); scope_iterator_.Next()) {
+ for (; !scope_iterator_.Done(); scope_iterator_.Next()) {
ScopeIterator::ScopeType scope_type = scope_iterator_.Type();
if (scope_type == ScopeIterator::ScopeTypeScript) break;
ContextChainElement context_chain_element;
- if (scope_type == ScopeIterator::ScopeTypeLocal ||
- scope_iterator_.DeclaresLocals(ScopeIterator::Mode::STACK)) {
+ if (scope_iterator_.InInnerScope() &&
+ (scope_type == ScopeIterator::ScopeTypeLocal ||
+ scope_iterator_.DeclaresLocals(ScopeIterator::Mode::STACK))) {
context_chain_element.materialized_object =
scope_iterator_.ScopeObject(ScopeIterator::Mode::STACK);
}
if (scope_iterator_.HasContext()) {
context_chain_element.wrapped_context = scope_iterator_.CurrentContext();
}
- if (scope_type == ScopeIterator::ScopeTypeLocal) {
- context_chain_element.whitelist = scope_iterator_.GetNonLocals();
+ if (!scope_iterator_.InInnerScope()) {
+ context_chain_element.blacklist = scope_iterator_.GetLocals();
}
context_chain_.push_back(context_chain_element);
}
@@ -214,7 +222,7 @@ DebugEvaluate::ContextBuilder::ContextBuilder(Isolate* isolate,
scope_info->SetIsDebugEvaluateScope();
evaluation_context_ = factory->NewDebugEvaluateContext(
evaluation_context_, scope_info, element.materialized_object,
- element.wrapped_context, element.whitelist);
+ element.wrapped_context, element.blacklist);
}
}
diff --git a/deps/v8/src/debug/debug-evaluate.h b/deps/v8/src/debug/debug-evaluate.h
index 7819892050..b04bd76e22 100644
--- a/deps/v8/src/debug/debug-evaluate.h
+++ b/deps/v8/src/debug/debug-evaluate.h
@@ -24,7 +24,7 @@ class FrameInspector;
class DebugEvaluate : public AllStatic {
public:
static MaybeHandle<Object> Global(Isolate* isolate, Handle<String> source,
- bool throw_on_side_effect);
+ debug::EvaluateGlobalMode mode);
// Evaluate a piece of JavaScript in the context of a stack frame for
// debugging. Things that need special attention are:
@@ -83,7 +83,7 @@ class DebugEvaluate : public AllStatic {
struct ContextChainElement {
Handle<Context> wrapped_context;
Handle<JSObject> materialized_object;
- Handle<StringSet> whitelist;
+ Handle<StringSet> blacklist;
};
Handle<Context> evaluation_context_;
diff --git a/deps/v8/src/debug/debug-frames.cc b/deps/v8/src/debug/debug-frames.cc
index 78c4c323fc..19178d34ce 100644
--- a/deps/v8/src/debug/debug-frames.cc
+++ b/deps/v8/src/debug/debug-frames.cc
@@ -70,7 +70,7 @@ int FrameInspector::GetParametersCount() {
Handle<Object> FrameInspector::GetParameter(int index) {
if (is_optimized_) return deoptimized_frame_->GetParameter(index);
- // TODO(clemensh): Handle wasm_interpreted_frame_.
+ // TODO(clemensb): Handle wasm_interpreted_frame_.
return handle(frame_->GetParameter(index), isolate_);
}
@@ -93,8 +93,10 @@ bool FrameInspector::ParameterIsShadowedByContextLocal(
VariableMode mode;
InitializationFlag init_flag;
MaybeAssignedFlag maybe_assigned_flag;
+ IsStaticFlag is_static_flag;
return ScopeInfo::ContextSlotIndex(*info, *parameter_name, &mode, &init_flag,
- &maybe_assigned_flag) != -1;
+ &maybe_assigned_flag,
+ &is_static_flag) != -1;
}
RedirectActiveFunctions::RedirectActiveFunctions(SharedFunctionInfo shared,
diff --git a/deps/v8/src/debug/debug-frames.h b/deps/v8/src/debug/debug-frames.h
index 274d10030a..78248614e2 100644
--- a/deps/v8/src/debug/debug-frames.h
+++ b/deps/v8/src/debug/debug-frames.h
@@ -5,6 +5,8 @@
#ifndef V8_DEBUG_DEBUG_FRAMES_H_
#define V8_DEBUG_DEBUG_FRAMES_H_
+#include <memory>
+
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/isolate.h"
#include "src/execution/v8threads.h"
diff --git a/deps/v8/src/debug/debug-interface.h b/deps/v8/src/debug/debug-interface.h
index 59bc6d0863..5f10e2a55a 100644
--- a/deps/v8/src/debug/debug-interface.h
+++ b/deps/v8/src/debug/debug-interface.h
@@ -5,6 +5,8 @@
#ifndef V8_DEBUG_DEBUG_INTERFACE_H_
#define V8_DEBUG_DEBUG_INTERFACE_H_
+#include <memory>
+
#include "include/v8-inspector.h"
#include "include/v8-util.h"
#include "include/v8.h"
@@ -157,6 +159,7 @@ class WasmScript : public Script {
int NumFunctions() const;
int NumImportedFunctions() const;
+ MemorySpan<const uint8_t> Bytecode() const;
std::pair<int, int> GetFunctionRange(int function_index) const;
@@ -468,9 +471,15 @@ enum class NativeAccessorType {
int64_t GetNextRandomInt64(v8::Isolate* isolate);
+enum class EvaluateGlobalMode {
+ kDefault,
+ kDisableBreaks,
+ kDisableBreaksAndThrowOnSideEffect
+};
+
V8_EXPORT_PRIVATE v8::MaybeLocal<v8::Value> EvaluateGlobal(
v8::Isolate* isolate, v8::Local<v8::String> source,
- bool throw_on_side_effect);
+ EvaluateGlobalMode mode);
int GetDebuggingId(v8::Local<v8::Function> function);
diff --git a/deps/v8/src/debug/debug-scopes.cc b/deps/v8/src/debug/debug-scopes.cc
index 4569780d00..512b85fb09 100644
--- a/deps/v8/src/debug/debug-scopes.cc
+++ b/deps/v8/src/debug/debug-scopes.cc
@@ -84,6 +84,117 @@ void ScopeIterator::Restart() {
UnwrapEvaluationContext();
}
+namespace {
+
+// Takes the scope of a parsed script, a function and a break location
+// inside the function. The result is the innermost lexical scope around
+// the break point, which serves as the starting point of the ScopeIterator.
+// And the scope of the function that was passed in (called closure scope).
+//
+// The start scope is guaranteed to be either the closure scope itself,
+// or a child of the closure scope.
+class ScopeChainRetriever {
+ public:
+ ScopeChainRetriever(DeclarationScope* scope, Handle<JSFunction> function,
+ int position)
+ : scope_(scope),
+ break_scope_start_(function->shared().StartPosition()),
+ break_scope_end_(function->shared().EndPosition()),
+ is_default_constructor_(
+ IsDefaultConstructor(function->shared().kind())),
+ position_(position) {
+ DCHECK_NOT_NULL(scope);
+ RetrieveScopes();
+ }
+
+ DeclarationScope* ClosureScope() { return closure_scope_; }
+ Scope* StartScope() { return start_scope_; }
+
+ private:
+ DeclarationScope* scope_;
+ const int break_scope_start_;
+ const int break_scope_end_;
+ const bool is_default_constructor_;
+ const int position_;
+
+ DeclarationScope* closure_scope_ = nullptr;
+ Scope* start_scope_ = nullptr;
+
+ void RetrieveScopes() {
+ if (is_default_constructor_) {
+ // Even though the DefaultBaseConstructor is a child of a Class scope, the
+ // source positions are *not* nested. This means the actual scope for the
+ // DefaultBaseConstructor needs to be found by doing a DFS.
+ RetrieveScopeChainDefaultConstructor(scope_);
+ } else {
+ RetrieveScopeChain();
+ }
+ DCHECK_NOT_NULL(closure_scope_);
+ DCHECK_NOT_NULL(start_scope_);
+ }
+
+ bool RetrieveScopeChainDefaultConstructor(Scope* scope) {
+ const int beg_pos = scope->start_position();
+ const int end_pos = scope->end_position();
+ if (beg_pos == position_ && end_pos == position_) {
+ DCHECK(scope->is_function_scope());
+ DCHECK(
+ IsDefaultConstructor(scope->AsDeclarationScope()->function_kind()));
+ start_scope_ = scope;
+ closure_scope_ = scope->AsDeclarationScope();
+ return true;
+ }
+
+ for (Scope* inner_scope = scope->inner_scope(); inner_scope != nullptr;
+ inner_scope = inner_scope->sibling()) {
+ if (RetrieveScopeChainDefaultConstructor(inner_scope)) return true;
+ }
+ return false;
+ }
+
+ void RetrieveScopeChain() {
+ Scope* parent = nullptr;
+ Scope* current = scope_;
+ SetClosureScopeIfFound(current);
+
+ while (parent != current) {
+ parent = current;
+ for (Scope* inner_scope = current->inner_scope(); inner_scope != nullptr;
+ inner_scope = inner_scope->sibling()) {
+ if (SetClosureScopeIfFound(inner_scope) ||
+ ContainsPosition(inner_scope)) {
+ current = inner_scope;
+ break;
+ }
+ }
+ }
+ start_scope_ = current;
+ }
+
+ bool SetClosureScopeIfFound(Scope* scope) {
+ const int start = scope->start_position();
+ const int end = scope->end_position();
+ if (start == break_scope_start_ && end == break_scope_end_) {
+ closure_scope_ = scope->AsDeclarationScope();
+ return true;
+ }
+ return false;
+ }
+
+ bool ContainsPosition(Scope* scope) {
+ const int start = scope->start_position();
+ const int end = scope->end_position();
+ // In case the closure_scope_ hasn't been found yet, we are less strict
+ // about recursing downwards. This might be the case for nested arrow
+ // functions that have the same end position.
+ const bool position_fits_end =
+ closure_scope_ ? position_ < end : position_ <= end;
+ return start < position_ && position_fits_end;
+ }
+};
+
+} // namespace
+
void ScopeIterator::TryParseAndRetrieveScopes(ScopeIterator::Option option) {
// Catch the case when the debugger stops in an internal function.
Handle<SharedFunctionInfo> shared_info(function_->shared(), isolate_);
@@ -105,7 +216,6 @@ void ScopeIterator::TryParseAndRetrieveScopes(ScopeIterator::Option option) {
return;
}
- DCHECK_NE(IGNORE_NESTED_SCOPES, option);
bool ignore_nested_scopes = false;
if (shared_info->HasBreakInfo() && frame_inspector_ != nullptr) {
// The source position at return is always the end of the function,
@@ -123,44 +233,39 @@ void ScopeIterator::TryParseAndRetrieveScopes(ScopeIterator::Option option) {
}
// Reparse the code and analyze the scopes.
- // Check whether we are in global, eval or function code.
- if (scope_info->scope_type() == FUNCTION_SCOPE) {
- // Inner function.
- info_ = new ParseInfo(isolate_, shared_info);
- } else {
- // Global or eval code.
- Handle<Script> script(Script::cast(shared_info->script()), isolate_);
- info_ = new ParseInfo(isolate_, script);
- if (scope_info->scope_type() == EVAL_SCOPE) {
- info_->set_eval();
- if (!context_->IsNativeContext()) {
- info_->set_outer_scope_info(handle(context_->scope_info(), isolate_));
- }
- // Language mode may be inherited from the eval caller.
- // Retrieve it from shared function info.
- info_->set_language_mode(shared_info->language_mode());
- } else if (scope_info->scope_type() == MODULE_SCOPE) {
- DCHECK(info_->is_module());
- } else {
- DCHECK_EQ(SCRIPT_SCOPE, scope_info->scope_type());
+ Handle<Script> script(Script::cast(shared_info->script()), isolate_);
+ info_ = new ParseInfo(isolate_, script);
+ info_->set_eager();
+ if (scope_info->scope_type() == EVAL_SCOPE || script->is_wrapped()) {
+ info_->set_eval();
+ if (!context_->IsNativeContext()) {
+ info_->set_outer_scope_info(handle(context_->scope_info(), isolate_));
}
+ // Language mode may be inherited from the eval caller.
+ // Retrieve it from shared function info.
+ info_->set_language_mode(shared_info->language_mode());
+ } else if (scope_info->scope_type() == MODULE_SCOPE) {
+ DCHECK(info_->is_module());
+ } else {
+ DCHECK(scope_info->scope_type() == SCRIPT_SCOPE ||
+ scope_info->scope_type() == FUNCTION_SCOPE);
}
if (parsing::ParseAny(info_, shared_info, isolate_) &&
Rewriter::Rewrite(info_)) {
info_->ast_value_factory()->Internalize(isolate_);
- closure_scope_ = info_->literal()->scope();
+ DeclarationScope* literal_scope = info_->literal()->scope();
- if (option == COLLECT_NON_LOCALS) {
- DCHECK(non_locals_.is_null());
- non_locals_ = info_->literal()->scope()->CollectNonLocals(
- isolate_, info_, StringSet::New(isolate_));
- if (!closure_scope_->has_this_declaration() &&
- closure_scope_->HasThisReference()) {
- non_locals_ = StringSet::Add(isolate_, non_locals_,
- isolate_->factory()->this_string());
- }
- }
+ ScopeChainRetriever scope_chain_retriever(literal_scope, function_,
+ GetSourcePosition());
+ start_scope_ = scope_chain_retriever.StartScope();
+ current_scope_ = start_scope_;
+
+ // In case of a FUNCTION_SCOPE, the ScopeIterator expects
+ // {closure_scope_} to be set to the scope of the function.
+ closure_scope_ = scope_info->scope_type() == FUNCTION_SCOPE
+ ? scope_chain_retriever.ClosureScope()
+ : literal_scope;
CHECK(DeclarationScope::Analyze(info_));
if (ignore_nested_scopes) {
@@ -169,9 +274,8 @@ void ScopeIterator::TryParseAndRetrieveScopes(ScopeIterator::Option option) {
if (closure_scope_->NeedsContext()) {
context_ = handle(context_->closure_context(), isolate_);
}
- } else {
- RetrieveScopeChain(closure_scope_);
}
+
UnwrapEvaluationContext();
} else {
// A failed reparse indicates that the preparser has diverged from the
@@ -260,6 +364,38 @@ bool ScopeIterator::HasContext() const {
return !InInnerScope() || current_scope_->NeedsContext();
}
+void ScopeIterator::AdvanceOneScope() {
+ if (current_scope_->NeedsContext()) {
+ DCHECK(!context_->previous().is_null());
+ context_ = handle(context_->previous(), isolate_);
+ }
+ DCHECK(current_scope_->outer_scope() != nullptr);
+ current_scope_ = current_scope_->outer_scope();
+}
+
+void ScopeIterator::AdvanceToNonHiddenScope() {
+ do {
+ AdvanceOneScope();
+ } while (current_scope_->is_hidden());
+}
+
+void ScopeIterator::AdvanceContext() {
+ DCHECK(!context_->IsNativeContext());
+ context_ = handle(context_->previous(), isolate_);
+
+ // While advancing one context, we need to advance at least one
+ // scope, but until we hit the next scope that actually requires
+ // a context. All the locals collected along the way build the
+ // blacklist for debug-evaluate for this context.
+ locals_ = StringSet::New(isolate_);
+ do {
+ if (!current_scope_ || !current_scope_->outer_scope()) break;
+
+ current_scope_ = current_scope_->outer_scope();
+ CollectLocalsFromCurrentScope();
+ } while (!current_scope_->NeedsContext());
+}
+
void ScopeIterator::Next() {
DCHECK(!Done());
@@ -283,19 +419,17 @@ void ScopeIterator::Next() {
context_ = handle(context_->previous(), isolate_);
}
} else if (!inner) {
- DCHECK(!context_->IsNativeContext());
- context_ = handle(context_->previous(), isolate_);
+ AdvanceContext();
} else {
DCHECK_NOT_NULL(current_scope_);
- do {
- if (current_scope_->NeedsContext()) {
- DCHECK(!context_->previous().is_null());
- context_ = handle(context_->previous(), isolate_);
- }
- DCHECK_IMPLIES(InInnerScope(), current_scope_->outer_scope() != nullptr);
- current_scope_ = current_scope_->outer_scope();
- // Repeat to skip hidden scopes.
- } while (current_scope_->is_hidden());
+ AdvanceToNonHiddenScope();
+
+ if (!InInnerScope() && current_scope_ != closure_scope_) {
+ // Edge case when we just go past {closure_scope_}. This case
+ // already needs to start collecting locals for the blacklist.
+ locals_ = StringSet::New(isolate_);
+ CollectLocalsFromCurrentScope();
+ }
}
UnwrapEvaluationContext();
@@ -453,7 +587,20 @@ bool ScopeIterator::SetVariableValue(Handle<String> name,
return false;
}
-Handle<StringSet> ScopeIterator::GetNonLocals() { return non_locals_; }
+bool ScopeIterator::ClosureScopeHasThisReference() const {
+ return !closure_scope_->has_this_declaration() &&
+ closure_scope_->HasThisReference();
+}
+
+void ScopeIterator::CollectLocalsFromCurrentScope() {
+ DCHECK(locals_->IsStringSet());
+ for (Variable* var : *current_scope_->locals()) {
+ if (var->location() == VariableLocation::PARAMETER ||
+ var->location() == VariableLocation::LOCAL) {
+ locals_ = StringSet::Add(isolate_, locals_, var->name());
+ }
+ }
+}
#ifdef DEBUG
// Debug print of the content of the current scope.
@@ -524,31 +671,6 @@ int ScopeIterator::GetSourcePosition() {
}
}
-void ScopeIterator::RetrieveScopeChain(DeclarationScope* scope) {
- DCHECK_NOT_NULL(scope);
-
- const int position = GetSourcePosition();
-
- Scope* parent = nullptr;
- Scope* current = scope;
- while (parent != current) {
- parent = current;
- for (Scope* inner_scope = current->inner_scope(); inner_scope != nullptr;
- inner_scope = inner_scope->sibling()) {
- int beg_pos = inner_scope->start_position();
- int end_pos = inner_scope->end_position();
- DCHECK((beg_pos >= 0 && end_pos >= 0) || inner_scope->is_hidden());
- if (beg_pos < position && position < end_pos) {
- current = inner_scope;
- break;
- }
- }
- }
-
- start_scope_ = current;
- current_scope_ = current;
-}
-
void ScopeIterator::VisitScriptScope(const Visitor& visitor) const {
Handle<JSGlobalObject> global(context_->global_object(), isolate_);
Handle<ScriptContextTable> script_contexts(
@@ -884,9 +1006,10 @@ bool ScopeIterator::SetContextVariableValue(Handle<String> variable_name,
VariableMode mode;
InitializationFlag flag;
MaybeAssignedFlag maybe_assigned_flag;
+ IsStaticFlag is_static_flag;
int slot_index =
ScopeInfo::ContextSlotIndex(context_->scope_info(), *variable_name, &mode,
- &flag, &maybe_assigned_flag);
+ &flag, &maybe_assigned_flag, &is_static_flag);
if (slot_index < 0) return false;
context_->set(slot_index, *new_value);
diff --git a/deps/v8/src/debug/debug-scopes.h b/deps/v8/src/debug/debug-scopes.h
index 5c3361619a..29d84ed640 100644
--- a/deps/v8/src/debug/debug-scopes.h
+++ b/deps/v8/src/debug/debug-scopes.h
@@ -41,7 +41,7 @@ class ScopeIterator {
static const int kScopeDetailsFunctionIndex = 5;
static const int kScopeDetailsSize = 6;
- enum Option { DEFAULT, IGNORE_NESTED_SCOPES, COLLECT_NON_LOCALS };
+ enum Option { DEFAULT, COLLECT_NON_LOCALS };
ScopeIterator(Isolate* isolate, FrameInspector* frame_inspector,
Option options = DEFAULT);
@@ -77,8 +77,10 @@ class ScopeIterator {
// Set variable value and return true on success.
bool SetVariableValue(Handle<String> variable_name, Handle<Object> new_value);
+ bool ClosureScopeHasThisReference() const;
+
// Populate the set with collected non-local variable names.
- Handle<StringSet> GetNonLocals();
+ Handle<StringSet> GetLocals() { return locals_; }
// Similar to JSFunction::GetName return the function's name or it's inferred
// name.
@@ -110,7 +112,7 @@ class ScopeIterator {
Handle<JSFunction> function_;
Handle<Context> context_;
Handle<Script> script_;
- Handle<StringSet> non_locals_;
+ Handle<StringSet> locals_;
DeclarationScope* closure_scope_ = nullptr;
Scope* start_scope_ = nullptr;
Scope* current_scope_ = nullptr;
@@ -120,12 +122,15 @@ class ScopeIterator {
return frame_inspector_->javascript_frame();
}
+ void AdvanceOneScope();
+ void AdvanceToNonHiddenScope();
+ void AdvanceContext();
+ void CollectLocalsFromCurrentScope();
+
int GetSourcePosition();
void TryParseAndRetrieveScopes(ScopeIterator::Option option);
- void RetrieveScopeChain(DeclarationScope* scope);
-
void UnwrapEvaluationContext();
using Visitor =
diff --git a/deps/v8/src/debug/debug-stack-trace-iterator.cc b/deps/v8/src/debug/debug-stack-trace-iterator.cc
index 4f691e63a2..12f492dbd2 100644
--- a/deps/v8/src/debug/debug-stack-trace-iterator.cc
+++ b/deps/v8/src/debug/debug-stack-trace-iterator.cc
@@ -87,25 +87,26 @@ v8::MaybeLocal<v8::Value> DebugStackTraceIterator::GetReceiver() const {
// Arrow function defined in top level function without references to
// variables may have NativeContext as context.
if (!context->IsFunctionContext()) return v8::MaybeLocal<v8::Value>();
- ScopeIterator scope_iterator(isolate_, frame_inspector_.get(),
- ScopeIterator::COLLECT_NON_LOCALS);
+ ScopeIterator scope_iterator(isolate_, frame_inspector_.get());
// We lookup this variable in function context only when it is used in arrow
// function otherwise V8 can optimize it out.
- if (!scope_iterator.GetNonLocals()->Has(isolate_,
- isolate_->factory()->this_string()))
+ if (!scope_iterator.ClosureScopeHasThisReference()) {
return v8::MaybeLocal<v8::Value>();
+ }
DisallowHeapAllocation no_gc;
VariableMode mode;
InitializationFlag flag;
MaybeAssignedFlag maybe_assigned_flag;
+ IsStaticFlag is_static_flag;
int slot_index = ScopeInfo::ContextSlotIndex(
context->scope_info(), ReadOnlyRoots(isolate_->heap()).this_string(),
- &mode, &flag, &maybe_assigned_flag);
+ &mode, &flag, &maybe_assigned_flag, &is_static_flag);
if (slot_index < 0) return v8::MaybeLocal<v8::Value>();
Handle<Object> value = handle(context->get(slot_index), isolate_);
if (value->IsTheHole(isolate_)) return v8::MaybeLocal<v8::Value>();
return Utils::ToLocal(value);
}
+
Handle<Object> value = frame_inspector_->GetReceiver();
if (value.is_null() || (value->IsSmi() || !value->IsTheHole(isolate_))) {
return Utils::ToLocal(value);
diff --git a/deps/v8/src/debug/debug-stack-trace-iterator.h b/deps/v8/src/debug/debug-stack-trace-iterator.h
index 15b8a85c5e..3319bc15f5 100644
--- a/deps/v8/src/debug/debug-stack-trace-iterator.h
+++ b/deps/v8/src/debug/debug-stack-trace-iterator.h
@@ -5,6 +5,8 @@
#ifndef V8_DEBUG_DEBUG_STACK_TRACE_ITERATOR_H_
#define V8_DEBUG_DEBUG_STACK_TRACE_ITERATOR_H_
+#include <memory>
+
#include "src/debug/debug-frames.h"
#include "src/debug/debug-interface.h"
#include "src/execution/frames.h"
diff --git a/deps/v8/src/debug/debug-type-profile.h b/deps/v8/src/debug/debug-type-profile.h
index 16f739e453..f06af0c471 100644
--- a/deps/v8/src/debug/debug-type-profile.h
+++ b/deps/v8/src/debug/debug-type-profile.h
@@ -5,6 +5,7 @@
#ifndef V8_DEBUG_DEBUG_TYPE_PROFILE_H_
#define V8_DEBUG_DEBUG_TYPE_PROFILE_H_
+#include <memory>
#include <vector>
#include "src/debug/debug-interface.h"
diff --git a/deps/v8/src/debug/debug.cc b/deps/v8/src/debug/debug.cc
index aa308150ac..27f30d8c05 100644
--- a/deps/v8/src/debug/debug.cc
+++ b/deps/v8/src/debug/debug.cc
@@ -622,9 +622,7 @@ bool Debug::SetBreakPointForScript(Handle<Script> script,
Handle<BreakPoint> break_point =
isolate_->factory()->NewBreakPoint(*id, condition);
if (script->type() == Script::TYPE_WASM) {
- Handle<WasmModuleObject> module_object(
- WasmModuleObject::cast(script->wasm_module_object()), isolate_);
- return WasmModuleObject::SetBreakPoint(module_object, source_position,
+ return WasmModuleObject::SetBreakPoint(script, source_position,
break_point);
}
@@ -1039,7 +1037,7 @@ void Debug::PrepareStep(StepAction step_action) {
// and deoptimize every frame along the way.
bool in_current_frame = true;
for (; !frames_it.done(); frames_it.Advance()) {
- // TODO(clemensh): Implement stepping out from JS to wasm.
+ // TODO(clemensb): Implement stepping out from JS to wasm.
if (frames_it.frame()->is_wasm()) continue;
JavaScriptFrame* frame = JavaScriptFrame::cast(frames_it.frame());
if (last_step_action() == StepIn) {
@@ -1069,7 +1067,7 @@ void Debug::PrepareStep(StepAction step_action) {
thread_local_.target_frame_count_ = current_frame_count;
V8_FALLTHROUGH;
case StepIn:
- // TODO(clemensh): Implement stepping from JS into wasm.
+ // TODO(clemensb): Implement stepping from JS into wasm.
FloodWithOneShot(shared);
break;
}
@@ -1171,7 +1169,7 @@ void Debug::PrepareFunctionForDebugExecution(
if (debug_info->flags() & DebugInfo::kPreparedForDebugExecution) return;
// Make a copy of the bytecode array if available.
- Handle<Object> maybe_original_bytecode_array =
+ Handle<HeapObject> maybe_original_bytecode_array =
isolate_->factory()->undefined_value();
if (shared->HasBytecodeArray()) {
Handle<BytecodeArray> original_bytecode_array =
@@ -1250,7 +1248,7 @@ void Debug::InstallDebugBreakTrampoline() {
JSObject object = JSObject::cast(obj);
DescriptorArray descriptors = object.map().instance_descriptors();
- for (int i = 0; i < object.map().NumberOfOwnDescriptors(); ++i) {
+ for (InternalIndex i : object.map().IterateOwnDescriptors()) {
if (descriptors.GetDetails(i).kind() == PropertyKind::kAccessor) {
Object value = descriptors.GetStrongValue(i);
if (!value.IsAccessorPair()) continue;
@@ -1901,6 +1899,7 @@ bool Debug::CanBreakAtEntry(Handle<SharedFunctionInfo> shared) {
bool Debug::SetScriptSource(Handle<Script> script, Handle<String> source,
bool preview, debug::LiveEditResult* result) {
DebugScope debug_scope(this);
+ feature_tracker()->Track(DebugFeatureTracker::kLiveEdit);
running_live_edit_ = true;
LiveEdit::PatchScript(isolate_, script, source, preview, result);
running_live_edit_ = false;
@@ -1968,11 +1967,11 @@ void Debug::UpdateState() {
if (is_active) {
// Note that the debug context could have already been loaded to
// bootstrap test cases.
- isolate_->compilation_cache()->Disable();
+ isolate_->compilation_cache()->DisableScriptAndEval();
is_active = true;
feature_tracker()->Track(DebugFeatureTracker::kActive);
} else {
- isolate_->compilation_cache()->Enable();
+ isolate_->compilation_cache()->EnableScriptAndEval();
Unload();
}
is_active_ = is_active;
diff --git a/deps/v8/src/debug/debug.h b/deps/v8/src/debug/debug.h
index eef89f9372..73bcceb4a2 100644
--- a/deps/v8/src/debug/debug.h
+++ b/deps/v8/src/debug/debug.h
@@ -5,6 +5,7 @@
#ifndef V8_DEBUG_DEBUG_H_
#define V8_DEBUG_DEBUG_H_
+#include <memory>
#include <vector>
#include "src/codegen/source-position-table.h"
diff --git a/deps/v8/src/debug/interface-types.h b/deps/v8/src/debug/interface-types.h
index 2375827b1b..a4204bb739 100644
--- a/deps/v8/src/debug/interface-types.h
+++ b/deps/v8/src/debug/interface-types.h
@@ -129,7 +129,7 @@ class ConsoleCallArguments : private v8::FunctionCallbackInfo<v8::Value> {
}
explicit ConsoleCallArguments(const v8::FunctionCallbackInfo<v8::Value>&);
- explicit ConsoleCallArguments(internal::BuiltinArguments&);
+ explicit ConsoleCallArguments(const internal::BuiltinArguments&);
};
class ConsoleContext {
diff --git a/deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc b/deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc
index 2befb70264..45ff06eb70 100644
--- a/deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc
@@ -123,6 +123,17 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ vstr(d0, r1, dst_offset);
}
+ // Mark the stack as not iterable for the CPU profiler which won't be able to
+ // walk the stack without the return address.
+ {
+ UseScratchRegisterScope temps(masm);
+ Register is_iterable = temps.Acquire();
+ Register zero = r4;
+ __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
+ __ mov(zero, Operand(0));
+ __ strb(zero, MemOperand(is_iterable));
+ }
+
// Remove the saved registers from the stack.
__ add(sp, sp, Operand(kSavedRegistersAreaSize));
@@ -209,6 +220,15 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// Restore the registers from the stack.
__ ldm(ia_w, sp, restored_regs); // all but pc registers.
+ {
+ UseScratchRegisterScope temps(masm);
+ Register is_iterable = temps.Acquire();
+ Register one = r4;
+ __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
+ __ mov(one, Operand(1));
+ __ strb(one, MemOperand(is_iterable));
+ }
+
// Remove sp, lr and pc.
__ Drop(3);
{
@@ -218,6 +238,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ pop(lr);
__ Jump(scratch);
}
+
__ stop();
}
diff --git a/deps/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc b/deps/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc
index 82ae764e50..17091259d6 100644
--- a/deps/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc
+++ b/deps/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc
@@ -189,6 +189,15 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
CopyRegListToFrame(masm, x1, FrameDescription::double_registers_offset(),
saved_double_registers, x2, x3, kDoubleRegistersOffset);
+ // Mark the stack as not iterable for the CPU profiler which won't be able to
+ // walk the stack without the return address.
+ {
+ UseScratchRegisterScope temps(masm);
+ Register is_iterable = temps.AcquireX();
+ __ Mov(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
+ __ strb(xzr, MemOperand(is_iterable));
+ }
+
// Remove the saved registers from the stack.
DCHECK_EQ(kSavedRegistersAreaSize % kXRegSize, 0);
__ Drop(kSavedRegistersAreaSize / kXRegSize);
@@ -251,6 +260,15 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
RestoreRegList(masm, saved_double_registers, x1,
FrameDescription::double_registers_offset());
+ {
+ UseScratchRegisterScope temps(masm);
+ Register is_iterable = temps.AcquireX();
+ Register one = x4;
+ __ Mov(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
+ __ Mov(one, Operand(1));
+ __ strb(one, MemOperand(is_iterable));
+ }
+
// TODO(all): ARM copies a lot (if not all) of the last output frame onto the
// stack, then pops it all into registers. Here, we try to load it directly
// into the relevant registers. Is this correct? If so, we should improve the
diff --git a/deps/v8/src/deoptimizer/deoptimize-reason.h b/deps/v8/src/deoptimizer/deoptimize-reason.h
index ac2273460a..71eaa8b626 100644
--- a/deps/v8/src/deoptimizer/deoptimize-reason.h
+++ b/deps/v8/src/deoptimizer/deoptimize-reason.h
@@ -48,7 +48,6 @@ namespace internal {
V(NotASymbol, "not a Symbol") \
V(OutOfBounds, "out of bounds") \
V(Overflow, "overflow") \
- V(ReceiverNotAGlobalProxy, "receiver was not a global proxy") \
V(Smi, "Smi") \
V(Unknown, "(unknown)") \
V(ValueMismatch, "value mismatch") \
diff --git a/deps/v8/src/deoptimizer/deoptimizer.cc b/deps/v8/src/deoptimizer/deoptimizer.cc
index 64551c6899..fcb4c27d0b 100644
--- a/deps/v8/src/deoptimizer/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer/deoptimizer.cc
@@ -357,6 +357,9 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(NativeContext native_context) {
for (Code code : codes) {
isolate->heap()->InvalidateCodeDeoptimizationData(code);
}
+
+ native_context.GetOSROptimizedCodeCache().EvictMarkedCode(
+ native_context.GetIsolate());
}
void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
@@ -375,6 +378,7 @@ void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
while (!context.IsUndefined(isolate)) {
NativeContext native_context = NativeContext::cast(context);
MarkAllCodeForContext(native_context);
+ OSROptimizedCodeCache::Clear(native_context);
DeoptimizeMarkedCodeForContext(native_context);
context = native_context.next_context_link();
}
@@ -432,6 +436,13 @@ void Deoptimizer::DeoptimizeFunction(JSFunction function, Code code) {
code.set_deopt_already_counted(true);
}
DeoptimizeMarkedCodeForContext(function.context().native_context());
+ // TODO(mythria): Ideally EvictMarkCode should compact the cache without
+ // having to explicitly call this. We don't do this currently because
+ // compacting causes GC and DeoptimizeMarkedCodeForContext uses raw
+ // pointers. Update DeoptimizeMarkedCodeForContext to use handles and remove
+ // this call from here.
+ OSROptimizedCodeCache::Compact(
+ Handle<NativeContext>(function.context().native_context(), isolate));
}
}
@@ -3640,8 +3651,7 @@ void TranslatedState::EnsurePropertiesAllocatedAndMarked(
// Set markers for the double properties.
Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate());
- int field_count = map->NumberOfOwnDescriptors();
- for (int i = 0; i < field_count; i++) {
+ for (InternalIndex i : map->IterateOwnDescriptors()) {
FieldIndex index = FieldIndex::ForDescriptor(*map, i);
if (descriptors->GetDetails(i).representation().IsDouble() &&
!index.is_inobject()) {
@@ -3673,10 +3683,9 @@ void TranslatedState::EnsureJSObjectAllocated(TranslatedValue* slot,
Handle<ByteArray> object_storage = AllocateStorageFor(slot);
// Now we handle the interesting (JSObject) case.
Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate());
- int field_count = map->NumberOfOwnDescriptors();
// Set markers for the double properties.
- for (int i = 0; i < field_count; i++) {
+ for (InternalIndex i : map->IterateOwnDescriptors()) {
FieldIndex index = FieldIndex::ForDescriptor(*map, i);
if (descriptors->GetDetails(i).representation().IsDouble() &&
index.is_inobject()) {
@@ -3712,8 +3721,7 @@ void TranslatedState::InitializeJSObjectAt(
CHECK_GE(slot->GetChildrenCount(), 2);
// Notify the concurrent marker about the layout change.
- isolate()->heap()->NotifyObjectLayoutChange(
- *object_storage, slot->GetChildrenCount() * kTaggedSize, no_allocation);
+ isolate()->heap()->NotifyObjectLayoutChange(*object_storage, no_allocation);
// Fill the property array field.
{
@@ -3772,8 +3780,7 @@ void TranslatedState::InitializeObjectWithTaggedFieldsAt(
}
// Notify the concurrent marker about the layout change.
- isolate()->heap()->NotifyObjectLayoutChange(
- *object_storage, slot->GetChildrenCount() * kTaggedSize, no_allocation);
+ isolate()->heap()->NotifyObjectLayoutChange(*object_storage, no_allocation);
// Write the fields to the object.
for (int i = 1; i < slot->GetChildrenCount(); i++) {
diff --git a/deps/v8/src/deoptimizer/deoptimizer.h b/deps/v8/src/deoptimizer/deoptimizer.h
index 6d0a350aac..beb2a9aa50 100644
--- a/deps/v8/src/deoptimizer/deoptimizer.h
+++ b/deps/v8/src/deoptimizer/deoptimizer.h
@@ -488,14 +488,14 @@ class Deoptimizer : public Malloced {
DeoptimizeKind* type);
// Code generation support.
- static int input_offset() { return OFFSET_OF(Deoptimizer, input_); }
+ static int input_offset() { return offsetof(Deoptimizer, input_); }
static int output_count_offset() {
- return OFFSET_OF(Deoptimizer, output_count_);
+ return offsetof(Deoptimizer, output_count_);
}
- static int output_offset() { return OFFSET_OF(Deoptimizer, output_); }
+ static int output_offset() { return offsetof(Deoptimizer, output_); }
static int caller_frame_top_offset() {
- return OFFSET_OF(Deoptimizer, caller_frame_top_);
+ return offsetof(Deoptimizer, caller_frame_top_);
}
V8_EXPORT_PRIVATE static int GetDeoptimizedCodeCount(Isolate* isolate);
@@ -731,11 +731,11 @@ class FrameDescription {
int parameter_count() { return parameter_count_; }
static int registers_offset() {
- return OFFSET_OF(FrameDescription, register_values_.registers_);
+ return offsetof(FrameDescription, register_values_.registers_);
}
static int double_registers_offset() {
- return OFFSET_OF(FrameDescription, register_values_.double_registers_);
+ return offsetof(FrameDescription, register_values_.double_registers_);
}
static int frame_size_offset() {
diff --git a/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc b/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc
index 864e9dbe36..4036b73443 100644
--- a/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc
+++ b/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc
@@ -113,6 +113,17 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ stfd(d0, MemOperand(r4, dst_offset));
}
+ // Mark the stack as not iterable for the CPU profiler which won't be able to
+ // walk the stack without the return address.
+ {
+ UseScratchRegisterScope temps(masm);
+ Register is_iterable = temps.Acquire();
+ Register zero = r7;
+ __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
+ __ li(zero, Operand(0));
+ __ stb(zero, MemOperand(is_iterable));
+ }
+
// Remove the saved registers from the stack.
__ addi(sp, sp, Operand(kSavedRegistersAreaSize));
@@ -210,12 +221,22 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
{
UseScratchRegisterScope temps(masm);
+ Register is_iterable = temps.Acquire();
+ Register one = r7;
+ __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
+ __ li(one, Operand(1));
+ __ stb(one, MemOperand(is_iterable));
+ }
+
+ {
+ UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ pop(scratch); // get continuation, leave pc on stack
__ pop(r0);
__ mtlr(r0);
__ Jump(scratch);
}
+
__ stop();
}
diff --git a/deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc b/deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc
index 616a57ba0e..7ea6e56b8c 100644
--- a/deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc
+++ b/deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc
@@ -40,7 +40,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
}
// Push all GPRs onto the stack
- __ lay(sp, MemOperand(sp, -kNumberOfRegisters * kPointerSize));
+ __ lay(sp, MemOperand(sp, -kNumberOfRegisters * kSystemPointerSize));
__ StoreMultipleP(r0, sp, MemOperand(sp)); // Save all 16 registers
__ mov(r1, Operand(ExternalReference::Create(
@@ -48,7 +48,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ StoreP(fp, MemOperand(r1));
const int kSavedRegistersAreaSize =
- (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
+ (kNumberOfRegisters * kSystemPointerSize) + kDoubleRegsSize;
// The bailout id is passed using r10
__ LoadRR(r4, r10);
@@ -79,7 +79,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// r6: Fp-to-sp delta.
// Parm6: isolate is passed on the stack.
__ mov(r7, Operand(ExternalReference::isolate_address(isolate)));
- __ StoreP(r7, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
+ __ StoreP(r7, MemOperand(sp, kStackFrameExtraParamSlot * kSystemPointerSize));
// Call Deoptimizer::New().
{
@@ -94,13 +94,14 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// Copy core registers into FrameDescription::registers_[kNumRegisters].
// DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
// __ mvc(MemOperand(r3, FrameDescription::registers_offset()),
- // MemOperand(sp), kNumberOfRegisters * kPointerSize);
+ // MemOperand(sp), kNumberOfRegisters * kSystemPointerSize);
// Copy core registers into FrameDescription::registers_[kNumRegisters].
// TODO(john.yan): optimize the following code by using mvc instruction
DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
for (int i = 0; i < kNumberOfRegisters; i++) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- __ LoadP(r4, MemOperand(sp, i * kPointerSize));
+ int offset =
+ (i * kSystemPointerSize) + FrameDescription::registers_offset();
+ __ LoadP(r4, MemOperand(sp, i * kSystemPointerSize));
__ StoreP(r4, MemOperand(r3, offset));
}
@@ -110,12 +111,24 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
int dst_offset = code * kDoubleSize + double_regs_offset;
- int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize;
+ int src_offset =
+ code * kDoubleSize + kNumberOfRegisters * kSystemPointerSize;
// TODO(joransiu): MVC opportunity
__ LoadDouble(d0, MemOperand(sp, src_offset));
__ StoreDouble(d0, MemOperand(r3, dst_offset));
}
+ // Mark the stack as not iterable for the CPU profiler which won't be able to
+ // walk the stack without the return address.
+ {
+ UseScratchRegisterScope temps(masm);
+ Register is_iterable = temps.Acquire();
+ Register zero = r6;
+ __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
+ __ lhi(zero, Operand(0));
+ __ StoreByte(zero, MemOperand(is_iterable));
+ }
+
// Remove the saved registers from the stack.
__ la(sp, MemOperand(sp, kSavedRegistersAreaSize));
@@ -134,7 +147,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ bind(&pop_loop);
__ pop(r6);
__ StoreP(r6, MemOperand(r5, 0));
- __ la(r5, MemOperand(r5, kPointerSize));
+ __ la(r5, MemOperand(r5, kSystemPointerSize));
__ bind(&pop_loop_header);
__ CmpP(r4, sp);
__ bne(&pop_loop);
@@ -158,7 +171,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// r3 = one past the last FrameDescription**.
__ LoadlW(r3, MemOperand(r2, Deoptimizer::output_count_offset()));
__ LoadP(r6, MemOperand(r2, Deoptimizer::output_offset())); // r6 is output_.
- __ ShiftLeftP(r3, r3, Operand(kPointerSizeLog2));
+ __ ShiftLeftP(r3, r3, Operand(kSystemPointerSizeLog2));
__ AddP(r3, r6, r3);
__ b(&outer_loop_header, Label::kNear);
@@ -178,7 +191,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ CmpP(r5, Operand::Zero());
__ bne(&inner_push_loop); // test for gt?
- __ AddP(r6, r6, Operand(kPointerSize));
+ __ AddP(r6, r6, Operand(kSystemPointerSize));
__ bind(&outer_loop_header);
__ CmpP(r6, r3);
__ blt(&outer_push_loop);
@@ -200,15 +213,26 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// Restore the registers from the last output frame.
__ LoadRR(r1, r4);
for (int i = kNumberOfRegisters - 1; i > 0; i--) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ int offset =
+ (i * kSystemPointerSize) + FrameDescription::registers_offset();
if ((restored_regs & (1 << i)) != 0) {
__ LoadP(ToRegister(i), MemOperand(r1, offset));
}
}
+ {
+ UseScratchRegisterScope temps(masm);
+ Register is_iterable = temps.Acquire();
+ Register one = r6;
+ __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
+ __ lhi(one, Operand(1));
+ __ StoreByte(one, MemOperand(is_iterable));
+ }
+
__ pop(ip); // get continuation, leave pc on stack
__ pop(r14);
__ Jump(ip);
+
__ stop();
}
diff --git a/deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc b/deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc
index 29c81f195c..03d7c759c0 100644
--- a/deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc
@@ -81,7 +81,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// On windows put the arguments on the stack (PrepareCallCFunction
// has created space for this). On linux pass the arguments in r8 and r9.
-#ifdef _WIN64
+#ifdef V8_TARGET_OS_WIN
__ movq(Operand(rsp, 4 * kSystemPointerSize), arg5);
__ LoadAddress(arg5, ExternalReference::isolate_address(isolate));
__ movq(Operand(rsp, 5 * kSystemPointerSize), arg5);
diff --git a/deps/v8/src/diagnostics/arm/disasm-arm.cc b/deps/v8/src/diagnostics/arm/disasm-arm.cc
index 51b6594e70..891ab0662e 100644
--- a/deps/v8/src/diagnostics/arm/disasm-arm.cc
+++ b/deps/v8/src/diagnostics/arm/disasm-arm.cc
@@ -590,7 +590,7 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
}
case 't': { // 'target: target of branch instructions
DCHECK(STRING_STARTS_WITH(format, "target"));
- int off = (instr->SImmed24Value() << 2) + 8;
+ int off = (static_cast<uint32_t>(instr->SImmed24Value()) << 2) + 8u;
out_buffer_pos_ += SNPrintF(
out_buffer_ + out_buffer_pos_, "%+d -> %s", off,
converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + off));
@@ -1890,6 +1890,17 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
op, size, Vd, Vn, Vm);
break;
}
+ case 0x4: {
+ if (instr->Bit(4) == 0) {
+ // vshl.s<size> Qd, Qm, Qn.
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vshl.s%d q%d, q%d, q%d", size, Vd, Vm, Vn);
+ } else {
+ Unknown(instr);
+ }
+ break;
+ }
case 0x6: {
// vmin/vmax.s<size> Qd, Qm, Qn.
const char* op = instr->Bit(4) == 1 ? "vmin" : "vmax";
@@ -2083,6 +2094,17 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
op, size, Vd, Vn, Vm);
break;
}
+ case 0x4: {
+ if (instr->Bit(4) == 0) {
+ // vshl.u<size> Qd, Qm, Qn.
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vshl.u%d q%d, q%d, q%d", size, Vd, Vm, Vn);
+ } else {
+ Unknown(instr);
+ }
+ break;
+ }
case 0x6: {
// vmin/vmax.u<size> Qd, Qm, Qn.
const char* op = instr->Bit(4) == 1 ? "vmin" : "vmax";
diff --git a/deps/v8/src/diagnostics/arm64/disasm-arm64.cc b/deps/v8/src/diagnostics/arm64/disasm-arm64.cc
index 7141cdf283..db14689ad1 100644
--- a/deps/v8/src/diagnostics/arm64/disasm-arm64.cc
+++ b/deps/v8/src/diagnostics/arm64/disasm-arm64.cc
@@ -3840,8 +3840,8 @@ int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr,
case 'L': {
switch (format[2]) {
case 'L': { // ILLiteral - Immediate Load Literal.
- AppendToOutput("pc%+" PRId32, instr->ImmLLiteral()
- << kLoadLiteralScaleLog2);
+ AppendToOutput("pc%+" PRId32,
+ instr->ImmLLiteral() * kLoadLiteralScale);
return 9;
}
case 'S': { // ILS - Immediate Load/Store.
@@ -3960,7 +3960,7 @@ int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr,
unsigned rd_index, rn_index;
unsigned imm5 = instr->ImmNEON5();
unsigned imm4 = instr->ImmNEON4();
- int tz = CountTrailingZeros(imm5, 32);
+ int tz = base::bits::CountTrailingZeros(imm5);
if (tz <= 3) { // Defined for 0 <= tz <= 3 only.
rd_index = imm5 >> (tz + 1);
rn_index = imm4 >> tz;
@@ -4179,7 +4179,7 @@ int DisassemblingDecoder::SubstituteBranchTargetField(Instruction* instr,
default:
UNREACHABLE();
}
- offset <<= kInstrSizeLog2;
+ offset *= kInstrSize;
char sign = '+';
if (offset < 0) {
sign = '-';
diff --git a/deps/v8/src/diagnostics/basic-block-profiler.h b/deps/v8/src/diagnostics/basic-block-profiler.h
index 960b4b43e1..9639e0b661 100644
--- a/deps/v8/src/diagnostics/basic-block-profiler.h
+++ b/deps/v8/src/diagnostics/basic-block-profiler.h
@@ -7,6 +7,7 @@
#include <iosfwd>
#include <list>
+#include <memory>
#include <string>
#include <vector>
diff --git a/deps/v8/src/diagnostics/ia32/disasm-ia32.cc b/deps/v8/src/diagnostics/ia32/disasm-ia32.cc
index e8c9588bbe..ead0a5a709 100644
--- a/deps/v8/src/diagnostics/ia32/disasm-ia32.cc
+++ b/deps/v8/src/diagnostics/ia32/disasm-ia32.cc
@@ -1057,6 +1057,10 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
AppendToBuffer("vmovaps %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
break;
+ case 0x51:
+ AppendToBuffer("vsqrtps %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ break;
case 0x52:
AppendToBuffer("vrsqrtps %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
@@ -1075,6 +1079,11 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
+ case 0x56:
+ AppendToBuffer("vorps %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ break;
case 0x57:
AppendToBuffer("vxorps %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
@@ -1138,11 +1147,25 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
int mod, regop, rm, vvvv = vex_vreg();
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
+ case 0x28:
+ AppendToBuffer("vmovapd %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ break;
case 0x54:
AppendToBuffer("vandpd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
+ case 0x55:
+ AppendToBuffer("vandnpd %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ break;
+ case 0x56:
+ AppendToBuffer("vorpd %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ break;
case 0x57:
AppendToBuffer("vxorpd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
@@ -1200,11 +1223,26 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
current++;
AppendToBuffer(",%u", *current++);
break;
+ case 0x73:
+ AppendToBuffer("vps%sq %s,%s", sf_str[regop / 2],
+ NameOfXMMRegister(vvvv), NameOfXMMRegister(rm));
+ current++;
+ AppendToBuffer(",%u", *current++);
+ break;
case 0x7E:
AppendToBuffer("vmovd ");
current += PrintRightOperand(current);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
break;
+ case 0xC2: {
+ const char* const pseudo_op[] = {"eq", "lt", "le", "unord", "neq"};
+ AppendToBuffer("vcmppd %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(", (%s)", pseudo_op[*current]);
+ current++;
+ break;
+ }
case 0xC4:
AppendToBuffer("vpinsrw %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
@@ -1212,6 +1250,13 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
AppendToBuffer(",%d", Imm8(current));
current++;
break;
+ case 0xC6:
+ AppendToBuffer("vshufpd %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",%d", Imm8(current));
+ current++;
+ break;
#define DECLARE_SSE_AVX_DIS_CASE(instruction, notUsed1, notUsed2, opcode) \
case 0x##opcode: { \
AppendToBuffer("v" #instruction " %s,%s,", NameOfXMMRegister(regop), \
@@ -1763,17 +1808,17 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("ucomiss %s,", NameOfXMMRegister(regop));
data += PrintRightXMMOperand(data);
- } else if (f0byte >= 0x52 && f0byte <= 0x5F) {
+ } else if (f0byte >= 0x51 && f0byte <= 0x5F) {
const char* const pseudo_op[] = {
- "rsqrtps", "rcpps", "andps", "andnps", "orps",
- "xorps", "addps", "mulps", "cvtps2pd", "cvtdq2ps",
- "subps", "minps", "divps", "maxps",
+ "sqrtps", "rsqrtps", "rcpps", "andps", "andnps",
+ "orps", "xorps", "addps", "mulps", "cvtps2pd",
+ "cvtdq2ps", "subps", "minps", "divps", "maxps",
};
data += 2;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("%s %s,", pseudo_op[f0byte - 0x52],
+ AppendToBuffer("%s %s,", pseudo_op[f0byte - 0x51],
NameOfXMMRegister(regop));
data += PrintRightXMMOperand(data);
} else if (f0byte == 0x50) {
@@ -2026,7 +2071,13 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
data += 2;
} else if (*data == 0x0F) {
data++;
- if (*data == 0x38) {
+ if (*data == 0x28) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("movapd %s,", NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else if (*data == 0x38) {
data++;
byte op = *data;
data++;
@@ -2160,27 +2211,31 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
AppendToBuffer("movmskpd %s,%s", NameOfCPURegister(regop),
NameOfXMMRegister(rm));
data++;
- } else if (*data == 0x54) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("andpd %s,%s", NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (*data == 0x56) {
+ } else if (*data >= 0x54 && *data <= 0x59) {
+ const char* const pseudo_op[] = {
+ "andpd", "andnpd", "orpd", "xorpd", "addpd", "mulpd",
+ };
+ byte op = *data;
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("orpd %s,%s", NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (*data == 0x57) {
+ AppendToBuffer("%s %s,", pseudo_op[op - 0x54],
+ NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else if (*data >= 0x5c && *data <= 0x5f) {
+ const char* const pseudo_op[] = {
+ "subpd",
+ "minpd",
+ "divpd",
+ "maxpd",
+ };
+ byte op = *data;
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("xorpd %s,%s", NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
+ AppendToBuffer("%s %s,", pseudo_op[op - 0x5c],
+ NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
} else if (*data == 0x6E) {
data++;
int mod, regop, rm;
@@ -2257,6 +2312,15 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
AppendToBuffer("movd ");
data += PrintRightOperand(data);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ } else if (*data == 0xC2) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ const char* const pseudo_op[] = {"eq", "lt", "le", "unord", "neq"};
+ AppendToBuffer("cmppd %s, ", NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ AppendToBuffer(", (%s)", pseudo_op[*data]);
+ data++;
} else if (*data == 0xC4) {
data++;
int mod, regop, rm;
@@ -2265,6 +2329,15 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
data += PrintRightOperand(data);
AppendToBuffer(",%d", Imm8(data));
data++;
+ } else if (*data == 0xC6) {
+ // shufpd xmm, xmm/m128, imm8
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("shufpd %s,", NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ AppendToBuffer(",%d", Imm8(data));
+ data++;
} else if (*data == 0xE7) {
data++;
int mod, regop, rm;
diff --git a/deps/v8/src/diagnostics/objects-debug.cc b/deps/v8/src/diagnostics/objects-debug.cc
index 6860ead022..9c4b176dc6 100644
--- a/deps/v8/src/diagnostics/objects-debug.cc
+++ b/deps/v8/src/diagnostics/objects-debug.cc
@@ -26,6 +26,7 @@
#include "src/objects/field-type.h"
#include "src/objects/foreign-inl.h"
#include "src/objects/free-space-inl.h"
+#include "src/objects/function-kind.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/layout-descriptor.h"
@@ -258,25 +259,25 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
JSObject::cast(*this).JSObjectVerify(isolate);
break;
- case WASM_MODULE_TYPE:
+ case WASM_MODULE_OBJECT_TYPE:
WasmModuleObject::cast(*this).WasmModuleObjectVerify(isolate);
break;
- case WASM_TABLE_TYPE:
+ case WASM_TABLE_OBJECT_TYPE:
WasmTableObject::cast(*this).WasmTableObjectVerify(isolate);
break;
- case WASM_MEMORY_TYPE:
+ case WASM_MEMORY_OBJECT_TYPE:
WasmMemoryObject::cast(*this).WasmMemoryObjectVerify(isolate);
break;
- case WASM_GLOBAL_TYPE:
+ case WASM_GLOBAL_OBJECT_TYPE:
WasmGlobalObject::cast(*this).WasmGlobalObjectVerify(isolate);
break;
- case WASM_EXCEPTION_TYPE:
+ case WASM_EXCEPTION_OBJECT_TYPE:
WasmExceptionObject::cast(*this).WasmExceptionObjectVerify(isolate);
break;
- case WASM_INSTANCE_TYPE:
+ case WASM_INSTANCE_OBJECT_TYPE:
WasmInstanceObject::cast(*this).WasmInstanceObjectVerify(isolate);
break;
- case JS_ARGUMENTS_TYPE:
+ case JS_ARGUMENTS_OBJECT_TYPE:
JSArgumentsObject::cast(*this).JSArgumentsObjectVerify(isolate);
break;
case JS_GENERATOR_OBJECT_TYPE:
@@ -365,10 +366,10 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
case JS_PROMISE_TYPE:
JSPromise::cast(*this).JSPromiseVerify(isolate);
break;
- case JS_REGEXP_TYPE:
+ case JS_REG_EXP_TYPE:
JSRegExp::cast(*this).JSRegExpVerify(isolate);
break;
- case JS_REGEXP_STRING_ITERATOR_TYPE:
+ case JS_REG_EXP_STRING_ITERATOR_TYPE:
JSRegExpStringIterator::cast(*this).JSRegExpStringIteratorVerify(isolate);
break;
case FILLER_TYPE:
@@ -425,34 +426,34 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
CodeDataContainer::cast(*this).CodeDataContainerVerify(isolate);
break;
#ifdef V8_INTL_SUPPORT
- case JS_INTL_V8_BREAK_ITERATOR_TYPE:
+ case JS_V8_BREAK_ITERATOR_TYPE:
JSV8BreakIterator::cast(*this).JSV8BreakIteratorVerify(isolate);
break;
- case JS_INTL_COLLATOR_TYPE:
+ case JS_COLLATOR_TYPE:
JSCollator::cast(*this).JSCollatorVerify(isolate);
break;
- case JS_INTL_DATE_TIME_FORMAT_TYPE:
+ case JS_DATE_TIME_FORMAT_TYPE:
JSDateTimeFormat::cast(*this).JSDateTimeFormatVerify(isolate);
break;
- case JS_INTL_LIST_FORMAT_TYPE:
+ case JS_LIST_FORMAT_TYPE:
JSListFormat::cast(*this).JSListFormatVerify(isolate);
break;
- case JS_INTL_LOCALE_TYPE:
+ case JS_LOCALE_TYPE:
JSLocale::cast(*this).JSLocaleVerify(isolate);
break;
- case JS_INTL_NUMBER_FORMAT_TYPE:
+ case JS_NUMBER_FORMAT_TYPE:
JSNumberFormat::cast(*this).JSNumberFormatVerify(isolate);
break;
- case JS_INTL_PLURAL_RULES_TYPE:
+ case JS_PLURAL_RULES_TYPE:
JSPluralRules::cast(*this).JSPluralRulesVerify(isolate);
break;
- case JS_INTL_RELATIVE_TIME_FORMAT_TYPE:
+ case JS_RELATIVE_TIME_FORMAT_TYPE:
JSRelativeTimeFormat::cast(*this).JSRelativeTimeFormatVerify(isolate);
break;
- case JS_INTL_SEGMENT_ITERATOR_TYPE:
+ case JS_SEGMENT_ITERATOR_TYPE:
JSSegmentIterator::cast(*this).JSSegmentIteratorVerify(isolate);
break;
- case JS_INTL_SEGMENTER_TYPE:
+ case JS_SEGMENTER_TYPE:
JSSegmenter::cast(*this).JSSegmenterVerify(isolate);
break;
#endif // V8_INTL_SUPPORT
@@ -512,8 +513,6 @@ void BytecodeArray::BytecodeArrayVerify(Isolate* isolate) {
USE_TORQUE_VERIFIER(FreeSpace)
-USE_TORQUE_VERIFIER(FeedbackCell)
-
void FeedbackVector::FeedbackVectorVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::FeedbackVectorVerify(*this, isolate);
MaybeObject code = optimized_code_weak_or_smi();
@@ -590,7 +589,7 @@ void JSObject::JSObjectVerify(Isolate* isolate) {
bool is_transitionable_fast_elements_kind =
IsTransitionableFastElementsKind(map().elements_kind());
- for (int i = 0; i < map().NumberOfOwnDescriptors(); i++) {
+ for (InternalIndex i : map().IterateOwnDescriptors()) {
PropertyDetails details = descriptors.GetDetails(i);
if (details.location() == kField) {
DCHECK_EQ(kData, details.kind());
@@ -668,7 +667,7 @@ void Map::MapVerify(Isolate* isolate) {
CHECK(!is_dictionary_map());
CHECK(!is_access_check_needed());
DescriptorArray const descriptors = instance_descriptors();
- for (int i = 0; i < NumberOfOwnDescriptors(); ++i) {
+ for (InternalIndex i : IterateOwnDescriptors()) {
CHECK(!descriptors.GetKey(i).IsInterestingSymbol());
}
}
@@ -803,9 +802,9 @@ void DescriptorArray::DescriptorArrayVerify(Isolate* isolate) {
// Check that properties with private symbols names are non-enumerable, and
// that fields are in order.
int expected_field_index = 0;
- for (int descriptor = 0; descriptor < number_of_descriptors();
- descriptor++) {
- Object key = *(GetDescriptorSlot(descriptor) + kEntryKeyIndex);
+ for (InternalIndex descriptor :
+ InternalIndex::Range(number_of_descriptors())) {
+ Object key = *(GetDescriptorSlot(descriptor.as_int()) + kEntryKeyIndex);
// number_of_descriptors() may be out of sync with the actual descriptors
// written during descriptor array construction.
if (key.IsUndefined(isolate)) continue;
@@ -1050,7 +1049,7 @@ void SharedFunctionInfo::SharedFunctionInfoVerify(Isolate* isolate) {
if (scope_info().length() > 0) {
ScopeInfo info = scope_info();
CHECK(kind() == info.function_kind());
- CHECK_EQ(kind() == kModule, info.scope_type() == MODULE_SCOPE);
+ CHECK_EQ(internal::IsModule(kind()), info.scope_type() == MODULE_SCOPE);
}
if (IsApiFunction()) {
@@ -1449,7 +1448,7 @@ void JSRegExp::JSRegExpVerify(Isolate* isolate) {
CHECK(arr.get(JSRegExp::kIrregexpCaptureCountIndex).IsSmi());
CHECK(arr.get(JSRegExp::kIrregexpMaxRegisterCountIndex).IsSmi());
- CHECK(arr.get(JSRegExp::kIrregexpTierUpTicksIndex).IsSmi());
+ CHECK(arr.get(JSRegExp::kIrregexpTicksUntilTierUpIndex).IsSmi());
break;
}
default:
@@ -1543,10 +1542,18 @@ void Module::ModuleVerify(Isolate* isolate) {
void SourceTextModule::SourceTextModuleVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::SourceTextModuleVerify(*this, isolate);
- CHECK((status() >= kEvaluating && code().IsSourceTextModuleInfo()) ||
- (status() == kInstantiated && code().IsJSGeneratorObject()) ||
- (status() == kInstantiating && code().IsJSFunction()) ||
- (code().IsSharedFunctionInfo()));
+ if (status() == kErrored) {
+ CHECK(code().IsSourceTextModuleInfo());
+ } else if (status() == kEvaluating || status() == kEvaluated) {
+ CHECK(code().IsJSGeneratorObject());
+ } else {
+ CHECK((status() == kInstantiated && code().IsJSGeneratorObject()) ||
+ (status() == kInstantiating && code().IsJSFunction()) ||
+ (status() == kPreInstantiating && code().IsSharedFunctionInfo()) ||
+ (status() == kUninstantiated && code().IsSharedFunctionInfo()));
+ CHECK(top_level_capability().IsUndefined() && !AsyncParentModuleCount() &&
+ !pending_async_dependencies() && !async_evaluating());
+ }
CHECK_EQ(requested_modules().length(), info().module_requests().length());
}
@@ -1679,8 +1686,6 @@ void StoreHandler::StoreHandlerVerify(Isolate* isolate) {
USE_TORQUE_VERIFIER(AccessorInfo)
-USE_TORQUE_VERIFIER(AccessorPair)
-
void CallHandlerInfo::CallHandlerInfoVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::CallHandlerInfoVerify(*this, isolate);
CHECK(map() == ReadOnlyRoots(isolate).side_effect_call_handler_info_map() ||
@@ -1733,8 +1738,6 @@ void NormalizedMapCache::NormalizedMapCacheVerify(Isolate* isolate) {
}
}
-USE_TORQUE_VERIFIER(DebugInfo)
-
USE_TORQUE_VERIFIER(StackFrameInfo)
void PreparseData::PreparseDataVerify(Isolate* isolate) {
@@ -1749,19 +1752,6 @@ void PreparseData::PreparseDataVerify(Isolate* isolate) {
}
}
-void UncompiledDataWithPreparseData::UncompiledDataWithPreparseDataVerify(
- Isolate* isolate) {
- CHECK(IsUncompiledDataWithPreparseData());
- VerifyPointer(isolate, inferred_name());
- VerifyPointer(isolate, preparse_data());
-}
-
-void UncompiledDataWithoutPreparseData::UncompiledDataWithoutPreparseDataVerify(
- Isolate* isolate) {
- CHECK(IsUncompiledDataWithoutPreparseData());
- VerifyPointer(isolate, inferred_name());
-}
-
USE_TORQUE_VERIFIER(InterpreterData)
#ifdef V8_INTL_SUPPORT
diff --git a/deps/v8/src/diagnostics/objects-printer.cc b/deps/v8/src/diagnostics/objects-printer.cc
index 39614091c7..20afb9e520 100644
--- a/deps/v8/src/diagnostics/objects-printer.cc
+++ b/deps/v8/src/diagnostics/objects-printer.cc
@@ -215,25 +215,25 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case JS_API_OBJECT_TYPE:
case JS_SPECIAL_API_OBJECT_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
- case JS_ARGUMENTS_TYPE:
+ case JS_ARGUMENTS_OBJECT_TYPE:
case JS_ERROR_TYPE:
// TODO(titzer): debug printing for more wasm objects
- case WASM_EXCEPTION_TYPE:
+ case WASM_EXCEPTION_OBJECT_TYPE:
JSObject::cast(*this).JSObjectPrint(os);
break;
- case WASM_MODULE_TYPE:
+ case WASM_MODULE_OBJECT_TYPE:
WasmModuleObject::cast(*this).WasmModuleObjectPrint(os);
break;
- case WASM_MEMORY_TYPE:
+ case WASM_MEMORY_OBJECT_TYPE:
WasmMemoryObject::cast(*this).WasmMemoryObjectPrint(os);
break;
- case WASM_TABLE_TYPE:
+ case WASM_TABLE_OBJECT_TYPE:
WasmTableObject::cast(*this).WasmTableObjectPrint(os);
break;
- case WASM_GLOBAL_TYPE:
+ case WASM_GLOBAL_OBJECT_TYPE:
WasmGlobalObject::cast(*this).WasmGlobalObjectPrint(os);
break;
- case WASM_INSTANCE_TYPE:
+ case WASM_INSTANCE_OBJECT_TYPE:
WasmInstanceObject::cast(*this).WasmInstanceObjectPrint(os);
break;
case JS_ASYNC_FUNCTION_OBJECT_TYPE:
@@ -247,10 +247,10 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case JS_ARRAY_TYPE:
JSArray::cast(*this).JSArrayPrint(os);
break;
- case JS_REGEXP_TYPE:
+ case JS_REG_EXP_TYPE:
JSRegExp::cast(*this).JSRegExpPrint(os);
break;
- case JS_REGEXP_STRING_ITERATOR_TYPE:
+ case JS_REG_EXP_STRING_ITERATOR_TYPE:
JSRegExpStringIterator::cast(*this).JSRegExpStringIteratorPrint(os);
break;
case ODDBALL_TYPE:
@@ -362,34 +362,34 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
JSDataView::cast(*this).JSDataViewPrint(os);
break;
#ifdef V8_INTL_SUPPORT
- case JS_INTL_V8_BREAK_ITERATOR_TYPE:
+ case JS_V8_BREAK_ITERATOR_TYPE:
JSV8BreakIterator::cast(*this).JSV8BreakIteratorPrint(os);
break;
- case JS_INTL_COLLATOR_TYPE:
+ case JS_COLLATOR_TYPE:
JSCollator::cast(*this).JSCollatorPrint(os);
break;
- case JS_INTL_DATE_TIME_FORMAT_TYPE:
+ case JS_DATE_TIME_FORMAT_TYPE:
JSDateTimeFormat::cast(*this).JSDateTimeFormatPrint(os);
break;
- case JS_INTL_LIST_FORMAT_TYPE:
+ case JS_LIST_FORMAT_TYPE:
JSListFormat::cast(*this).JSListFormatPrint(os);
break;
- case JS_INTL_LOCALE_TYPE:
+ case JS_LOCALE_TYPE:
JSLocale::cast(*this).JSLocalePrint(os);
break;
- case JS_INTL_NUMBER_FORMAT_TYPE:
+ case JS_NUMBER_FORMAT_TYPE:
JSNumberFormat::cast(*this).JSNumberFormatPrint(os);
break;
- case JS_INTL_PLURAL_RULES_TYPE:
+ case JS_PLURAL_RULES_TYPE:
JSPluralRules::cast(*this).JSPluralRulesPrint(os);
break;
- case JS_INTL_RELATIVE_TIME_FORMAT_TYPE:
+ case JS_RELATIVE_TIME_FORMAT_TYPE:
JSRelativeTimeFormat::cast(*this).JSRelativeTimeFormatPrint(os);
break;
- case JS_INTL_SEGMENT_ITERATOR_TYPE:
+ case JS_SEGMENT_ITERATOR_TYPE:
JSSegmentIterator::cast(*this).JSSegmentIteratorPrint(os);
break;
- case JS_INTL_SEGMENTER_TYPE:
+ case JS_SEGMENTER_TYPE:
JSSegmenter::cast(*this).JSSegmenterPrint(os);
break;
#endif // V8_INTL_SUPPORT
@@ -477,8 +477,8 @@ bool JSObject::PrintProperties(std::ostream& os) { // NOLINT
if (HasFastProperties()) {
DescriptorArray descs = map().instance_descriptors();
int nof_inobject_properties = map().GetInObjectProperties();
- int i = 0;
- for (; i < map().NumberOfOwnDescriptors(); i++) {
+ for (InternalIndex i :
+ InternalIndex::Range(map().NumberOfOwnDescriptors())) {
os << "\n ";
descs.GetKey(i).NamePrint(os);
os << ": ";
@@ -506,7 +506,7 @@ bool JSObject::PrintProperties(std::ostream& os) { // NOLINT
os << " properties[" << field_index << "]";
}
}
- return i > 0;
+ return map().NumberOfOwnDescriptors() > 0;
} else if (IsJSGlobalObject()) {
JSGlobalObject::cast(*this).global_dictionary().Print(os);
} else {
@@ -1379,7 +1379,6 @@ void JSArrayBuffer::JSArrayBufferPrint(std::ostream& os) { // NOLINT
if (is_detachable()) os << "\n - detachable";
if (was_detached()) os << "\n - detached";
if (is_shared()) os << "\n - shared";
- if (is_wasm_memory()) os << "\n - is_wasm_memory";
JSObjectPrintBody(os, *this, !was_detached());
}
@@ -1389,6 +1388,12 @@ void JSTypedArray::JSTypedArrayPrint(std::ostream& os) { // NOLINT
os << "\n - byte_offset: " << byte_offset();
os << "\n - byte_length: " << byte_length();
os << "\n - length: " << length();
+ os << "\n - data_ptr: " << DataPtr();
+ Tagged_t base_ptr = static_cast<Tagged_t>(base_pointer().ptr());
+ os << "\n - base_pointer: "
+ << reinterpret_cast<void*>(static_cast<Address>(base_ptr));
+ os << "\n - external_pointer: "
+ << reinterpret_cast<void*>(external_pointer());
if (!buffer().IsJSArrayBuffer()) {
os << "\n <invalid buffer>\n";
return;
@@ -1627,7 +1632,7 @@ void Code::CodePrint(std::ostream& os) { // NOLINT
os << "\n";
#ifdef ENABLE_DISASSEMBLER
if (FLAG_use_verbose_printer) {
- Disassemble(nullptr, os);
+ Disassemble(nullptr, os, GetIsolate());
}
#endif
}
@@ -1911,9 +1916,6 @@ void WasmModuleObject::WasmModuleObjectPrint(std::ostream& os) { // NOLINT
if (has_asm_js_offset_table()) {
os << "\n - asm_js_offset_table: " << Brief(asm_js_offset_table());
}
- if (has_breakpoint_infos()) {
- os << "\n - breakpoint_infos: " << Brief(breakpoint_infos());
- }
os << "\n";
}
@@ -2146,6 +2148,9 @@ void Script::ScriptPrint(std::ostream& os) { // NOLINT
os << "\n - wrapped arguments: " << Brief(wrapped_arguments());
}
os << "\n - eval from position: " << eval_from_position();
+ if (has_wasm_breakpoint_infos()) {
+ os << "\n - wasm_breakpoint_infos: " << Brief(wasm_breakpoint_infos());
+ }
os << "\n - shared function infos: " << Brief(shared_function_infos());
os << "\n";
}
@@ -2280,6 +2285,7 @@ void ScopeInfo::ScopeInfoPrint(std::ostream& os) { // NOLINT
os << "\n - receiver: " << ReceiverVariableField::decode(flags);
}
if (HasClassBrand()) os << "\n - has class brand";
+ if (HasSavedClassVariableIndex()) os << "\n - has saved class variable index";
if (HasNewTarget()) os << "\n - needs new target";
if (HasFunctionName()) {
os << "\n - function name(" << FunctionVariableField::decode(flags)
@@ -2578,9 +2584,9 @@ void Map::MapPrint(std::ostream& os) { // NOLINT
}
void DescriptorArray::PrintDescriptors(std::ostream& os) {
- for (int i = 0; i < number_of_descriptors(); i++) {
+ for (InternalIndex i : InternalIndex::Range(number_of_descriptors())) {
Name key = GetKey(i);
- os << "\n [" << i << "]: ";
+ os << "\n [" << i.as_int() << "]: ";
#ifdef OBJECT_PRINT
key.NamePrint(os);
#else
@@ -2592,7 +2598,8 @@ void DescriptorArray::PrintDescriptors(std::ostream& os) {
os << "\n";
}
-void DescriptorArray::PrintDescriptorDetails(std::ostream& os, int descriptor,
+void DescriptorArray::PrintDescriptorDetails(std::ostream& os,
+ InternalIndex descriptor,
PropertyDetails::PrintMode mode) {
PropertyDetails details = GetDetails(descriptor);
details.PrintAsFastTo(os, mode);
@@ -2655,7 +2662,7 @@ void TransitionsAccessor::PrintOneTransition(std::ostream& os, Name key,
} else {
DCHECK(!IsSpecialTransition(roots, key));
os << "(transition to ";
- int descriptor = target.LastAdded();
+ InternalIndex descriptor = target.LastAdded();
DescriptorArray descriptors = target.instance_descriptors();
descriptors.PrintDescriptorDetails(os, descriptor,
PropertyDetails::kForTransitions);
@@ -2733,7 +2740,7 @@ void TransitionsAccessor::PrintTransitionTree(std::ostream& os, int level,
os << " ";
DCHECK(!IsSpecialTransition(ReadOnlyRoots(isolate_), key));
os << "to ";
- int descriptor = target.LastAdded();
+ InternalIndex descriptor = target.LastAdded();
DescriptorArray descriptors = target.instance_descriptors();
descriptors.PrintDescriptorDetails(os, descriptor,
PropertyDetails::kForTransitions);
@@ -2816,7 +2823,7 @@ V8_EXPORT_PRIVATE extern void _v8_internal_Print_Code(void* object) {
}
#ifdef ENABLE_DISASSEMBLER
i::StdoutStream os;
- code.Disassemble(nullptr, os, address);
+ code.Disassemble(nullptr, os, isolate, address);
#else // ENABLE_DISASSEMBLER
code.Print();
#endif // ENABLE_DISASSEMBLER
diff --git a/deps/v8/src/diagnostics/unwinding-info-win64.cc b/deps/v8/src/diagnostics/unwinding-info-win64.cc
index 5a2fc73631..6cc53da51f 100644
--- a/deps/v8/src/diagnostics/unwinding-info-win64.cc
+++ b/deps/v8/src/diagnostics/unwinding-info-win64.cc
@@ -16,37 +16,6 @@
#error "Unsupported OS"
#endif // V8_OS_WIN_X64
-// Forward declaration to keep this independent of Win8
-NTSYSAPI
-DWORD
-NTAPI
-RtlAddGrowableFunctionTable(
- _Out_ PVOID* DynamicTable,
- _In_reads_(MaximumEntryCount) PRUNTIME_FUNCTION FunctionTable,
- _In_ DWORD EntryCount,
- _In_ DWORD MaximumEntryCount,
- _In_ ULONG_PTR RangeBase,
- _In_ ULONG_PTR RangeEnd
- );
-
-
-NTSYSAPI
-void
-NTAPI
-RtlGrowFunctionTable(
- _Inout_ PVOID DynamicTable,
- _In_ DWORD NewEntryCount
- );
-
-
-NTSYSAPI
-void
-NTAPI
-RtlDeleteGrowableFunctionTable(
- _In_ PVOID DynamicTable
- );
-
-
namespace v8 {
namespace internal {
namespace win64_unwindinfo {
diff --git a/deps/v8/src/diagnostics/x64/disasm-x64.cc b/deps/v8/src/diagnostics/x64/disasm-x64.cc
index aada6a4381..2195556af7 100644
--- a/deps/v8/src/diagnostics/x64/disasm-x64.cc
+++ b/deps/v8/src/diagnostics/x64/disasm-x64.cc
@@ -91,9 +91,9 @@ static const ByteMnemonic zero_operands_instr[] = {
{0x61, UNSET_OP_ORDER, "popad"}, {0x9C, UNSET_OP_ORDER, "pushfd"},
{0x9D, UNSET_OP_ORDER, "popfd"}, {0x9E, UNSET_OP_ORDER, "sahf"},
{0x99, UNSET_OP_ORDER, "cdq"}, {0x9B, UNSET_OP_ORDER, "fwait"},
- {0xA4, UNSET_OP_ORDER, "movs"}, {0xA5, UNSET_OP_ORDER, "movs"},
- {0xA6, UNSET_OP_ORDER, "cmps"}, {0xA7, UNSET_OP_ORDER, "cmps"},
- {-1, UNSET_OP_ORDER, ""}};
+ {0xAB, UNSET_OP_ORDER, "stos"}, {0xA4, UNSET_OP_ORDER, "movs"},
+ {0xA5, UNSET_OP_ORDER, "movs"}, {0xA6, UNSET_OP_ORDER, "cmps"},
+ {0xA7, UNSET_OP_ORDER, "cmps"}, {-1, UNSET_OP_ORDER, ""}};
static const ByteMnemonic call_jump_instr[] = {{0xE8, UNSET_OP_ORDER, "call"},
{0xE9, UNSET_OP_ORDER, "jmp"},
@@ -1845,7 +1845,9 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
current += 1;
} else {
const char* mnemonic;
- if (opcode == 0x54) {
+ if (opcode == 0x51) {
+ mnemonic = "sqrtpd";
+ } else if (opcode == 0x54) {
mnemonic = "andpd";
} else if (opcode == 0x55) {
mnemonic = "andnpd";
@@ -2432,13 +2434,13 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
byte_size_operand_ = idesc.byte_size_operation;
switch (idesc.type) {
case ZERO_OPERANDS_INSTR:
- if (current >= 0xA4 && current <= 0xA7) {
+ if ((current >= 0xA4 && current <= 0xA7) ||
+ (current >= 0xAA && current <= 0xAD)) {
// String move or compare operations.
if (group_1_prefix_ == REP_PREFIX) {
// REP.
AppendToBuffer("rep ");
}
- if (rex_w()) AppendToBuffer("REX.W ");
AppendToBuffer("%s%c", idesc.mnem, operand_size_code());
} else {
AppendToBuffer("%s%c", idesc.mnem, operand_size_code());
diff --git a/deps/v8/src/execution/arguments-inl.h b/deps/v8/src/execution/arguments-inl.h
index ecdc4ef359..4565f5d265 100644
--- a/deps/v8/src/execution/arguments-inl.h
+++ b/deps/v8/src/execution/arguments-inl.h
@@ -14,15 +14,15 @@ namespace v8 {
namespace internal {
template <class S>
-Handle<S> Arguments::at(int index) {
+Handle<S> Arguments::at(int index) const {
return Handle<S>::cast(at<Object>(index));
}
-int Arguments::smi_at(int index) {
+int Arguments::smi_at(int index) const {
return Smi::ToInt(Object(*address_of_arg_at(index)));
}
-double Arguments::number_at(int index) { return (*this)[index].Number(); }
+double Arguments::number_at(int index) const { return (*this)[index].Number(); }
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/execution/arguments.h b/deps/v8/src/execution/arguments.h
index 8f07dd9db3..77bbe62dfc 100644
--- a/deps/v8/src/execution/arguments.h
+++ b/deps/v8/src/execution/arguments.h
@@ -37,24 +37,26 @@ class Arguments {
DCHECK_GE(length_, 0);
}
- Object operator[](int index) { return Object(*address_of_arg_at(index)); }
+ Object operator[](int index) const {
+ return Object(*address_of_arg_at(index));
+ }
template <class S = Object>
- inline Handle<S> at(int index);
+ inline Handle<S> at(int index) const;
- inline int smi_at(int index);
+ inline int smi_at(int index) const;
- inline double number_at(int index);
+ inline double number_at(int index) const;
inline void set_at(int index, Object value) {
*address_of_arg_at(index) = value.ptr();
}
- inline FullObjectSlot slot_at(int index) {
+ inline FullObjectSlot slot_at(int index) const {
return FullObjectSlot(address_of_arg_at(index));
}
- inline Address* address_of_arg_at(int index) {
+ inline Address* address_of_arg_at(int index) const {
DCHECK_LT(static_cast<uint32_t>(index), static_cast<uint32_t>(length_));
return reinterpret_cast<Address*>(reinterpret_cast<Address>(arguments_) -
index * kSystemPointerSize);
@@ -64,8 +66,8 @@ class Arguments {
int length() const { return static_cast<int>(length_); }
// Arguments on the stack are in reverse order (compared to an array).
- FullObjectSlot first_slot() { return slot_at(length() - 1); }
- FullObjectSlot last_slot() { return slot_at(0); }
+ FullObjectSlot first_slot() const { return slot_at(length() - 1); }
+ FullObjectSlot last_slot() const { return slot_at(0); }
private:
intptr_t length_;
@@ -73,7 +75,7 @@ class Arguments {
};
template <>
-inline Handle<Object> Arguments::at(int index) {
+inline Handle<Object> Arguments::at(int index) const {
return Handle<Object>(address_of_arg_at(index));
}
diff --git a/deps/v8/src/execution/arm/simulator-arm.cc b/deps/v8/src/execution/arm/simulator-arm.cc
index 2677135096..841ff4bfd4 100644
--- a/deps/v8/src/execution/arm/simulator-arm.cc
+++ b/deps/v8/src/execution/arm/simulator-arm.cc
@@ -12,6 +12,8 @@
#include "src/base/bits.h"
#include "src/base/lazy-instance.h"
+#include "src/base/memory.h"
+#include "src/base/overflowing-math.h"
#include "src/codegen/arm/constants-arm.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/macro-assembler.h"
@@ -899,16 +901,14 @@ int Simulator::ReadW(int32_t addr) {
// check the alignment here.
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyLoad(addr);
- intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
- return *ptr;
+ return base::ReadUnalignedValue<intptr_t>(addr);
}
int Simulator::ReadExW(int32_t addr) {
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyLoadExcl(addr, TransactionSize::Word);
GlobalMonitor::Get()->NotifyLoadExcl_Locked(addr, &global_monitor_processor_);
- intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
- return *ptr;
+ return base::ReadUnalignedValue<intptr_t>(addr);
}
void Simulator::WriteW(int32_t addr, int value) {
@@ -917,8 +917,7 @@ void Simulator::WriteW(int32_t addr, int value) {
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyStore(addr);
GlobalMonitor::Get()->NotifyStore_Locked(addr, &global_monitor_processor_);
- intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
- *ptr = value;
+ base::WriteUnalignedValue<intptr_t>(addr, value);
}
int Simulator::WriteExW(int32_t addr, int value) {
@@ -926,8 +925,7 @@ int Simulator::WriteExW(int32_t addr, int value) {
if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::Word) &&
GlobalMonitor::Get()->NotifyStoreExcl_Locked(
addr, &global_monitor_processor_)) {
- intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
- *ptr = value;
+ base::WriteUnalignedValue<intptr_t>(addr, value);
return 0;
} else {
return 1;
@@ -939,8 +937,7 @@ uint16_t Simulator::ReadHU(int32_t addr) {
// check the alignment here.
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyLoad(addr);
- uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
- return *ptr;
+ return base::ReadUnalignedValue<uint16_t>(addr);
}
int16_t Simulator::ReadH(int32_t addr) {
@@ -948,16 +945,14 @@ int16_t Simulator::ReadH(int32_t addr) {
// check the alignment here.
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyLoad(addr);
- int16_t* ptr = reinterpret_cast<int16_t*>(addr);
- return *ptr;
+ return base::ReadUnalignedValue<int16_t>(addr);
}
uint16_t Simulator::ReadExHU(int32_t addr) {
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyLoadExcl(addr, TransactionSize::HalfWord);
GlobalMonitor::Get()->NotifyLoadExcl_Locked(addr, &global_monitor_processor_);
- uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
- return *ptr;
+ return base::ReadUnalignedValue<uint16_t>(addr);
}
void Simulator::WriteH(int32_t addr, uint16_t value) {
@@ -966,8 +961,7 @@ void Simulator::WriteH(int32_t addr, uint16_t value) {
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyStore(addr);
GlobalMonitor::Get()->NotifyStore_Locked(addr, &global_monitor_processor_);
- uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
- *ptr = value;
+ base::WriteUnalignedValue(addr, value);
}
void Simulator::WriteH(int32_t addr, int16_t value) {
@@ -976,8 +970,7 @@ void Simulator::WriteH(int32_t addr, int16_t value) {
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyStore(addr);
GlobalMonitor::Get()->NotifyStore_Locked(addr, &global_monitor_processor_);
- int16_t* ptr = reinterpret_cast<int16_t*>(addr);
- *ptr = value;
+ base::WriteUnalignedValue(addr, value);
}
int Simulator::WriteExH(int32_t addr, uint16_t value) {
@@ -985,8 +978,7 @@ int Simulator::WriteExH(int32_t addr, uint16_t value) {
if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::HalfWord) &&
GlobalMonitor::Get()->NotifyStoreExcl_Locked(
addr, &global_monitor_processor_)) {
- uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
- *ptr = value;
+ base::WriteUnalignedValue(addr, value);
return 0;
} else {
return 1;
@@ -996,39 +988,34 @@ int Simulator::WriteExH(int32_t addr, uint16_t value) {
uint8_t Simulator::ReadBU(int32_t addr) {
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyLoad(addr);
- uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
- return *ptr;
+ return base::ReadUnalignedValue<uint8_t>(addr);
}
int8_t Simulator::ReadB(int32_t addr) {
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyLoad(addr);
- int8_t* ptr = reinterpret_cast<int8_t*>(addr);
- return *ptr;
+ return base::ReadUnalignedValue<int8_t>(addr);
}
uint8_t Simulator::ReadExBU(int32_t addr) {
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyLoadExcl(addr, TransactionSize::Byte);
GlobalMonitor::Get()->NotifyLoadExcl_Locked(addr, &global_monitor_processor_);
- uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
- return *ptr;
+ return base::ReadUnalignedValue<uint8_t>(addr);
}
void Simulator::WriteB(int32_t addr, uint8_t value) {
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyStore(addr);
GlobalMonitor::Get()->NotifyStore_Locked(addr, &global_monitor_processor_);
- uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
- *ptr = value;
+ base::WriteUnalignedValue(addr, value);
}
void Simulator::WriteB(int32_t addr, int8_t value) {
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyStore(addr);
GlobalMonitor::Get()->NotifyStore_Locked(addr, &global_monitor_processor_);
- int8_t* ptr = reinterpret_cast<int8_t*>(addr);
- *ptr = value;
+ base::WriteUnalignedValue(addr, value);
}
int Simulator::WriteExB(int32_t addr, uint8_t value) {
@@ -1036,8 +1023,7 @@ int Simulator::WriteExB(int32_t addr, uint8_t value) {
if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::Byte) &&
GlobalMonitor::Get()->NotifyStoreExcl_Locked(
addr, &global_monitor_processor_)) {
- uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
- *ptr = value;
+ base::WriteUnalignedValue(addr, value);
return 0;
} else {
return 1;
@@ -1049,16 +1035,14 @@ int32_t* Simulator::ReadDW(int32_t addr) {
// check the alignment here.
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyLoad(addr);
- int32_t* ptr = reinterpret_cast<int32_t*>(addr);
- return ptr;
+ return reinterpret_cast<int32_t*>(addr);
}
int32_t* Simulator::ReadExDW(int32_t addr) {
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyLoadExcl(addr, TransactionSize::DoubleWord);
GlobalMonitor::Get()->NotifyLoadExcl_Locked(addr, &global_monitor_processor_);
- int32_t* ptr = reinterpret_cast<int32_t*>(addr);
- return ptr;
+ return reinterpret_cast<int32_t*>(addr);
}
void Simulator::WriteDW(int32_t addr, int32_t value1, int32_t value2) {
@@ -1067,9 +1051,8 @@ void Simulator::WriteDW(int32_t addr, int32_t value1, int32_t value2) {
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyStore(addr);
GlobalMonitor::Get()->NotifyStore_Locked(addr, &global_monitor_processor_);
- int32_t* ptr = reinterpret_cast<int32_t*>(addr);
- *ptr++ = value1;
- *ptr = value2;
+ base::WriteUnalignedValue(addr, value1);
+ base::WriteUnalignedValue(addr + sizeof(value1), value2);
}
int Simulator::WriteExDW(int32_t addr, int32_t value1, int32_t value2) {
@@ -1077,9 +1060,8 @@ int Simulator::WriteExDW(int32_t addr, int32_t value1, int32_t value2) {
if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::DoubleWord) &&
GlobalMonitor::Get()->NotifyStoreExcl_Locked(
addr, &global_monitor_processor_)) {
- intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
- *ptr++ = value1;
- *ptr = value2;
+ base::WriteUnalignedValue(addr, value1);
+ base::WriteUnalignedValue(addr + sizeof(value1), value2);
return 0;
} else {
return 1;
@@ -1291,9 +1273,9 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
if (shift_amount == 0) {
*carry_out = c_flag_;
} else {
- result <<= (shift_amount - 1);
+ result = static_cast<uint32_t>(result) << (shift_amount - 1);
*carry_out = (result < 0);
- result <<= 1;
+ result = static_cast<uint32_t>(result) << 1;
}
break;
}
@@ -1316,9 +1298,7 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
if (shift_amount == 0) {
*carry_out = c_flag_;
} else {
- uint32_t left = static_cast<uint32_t>(result) >> shift_amount;
- uint32_t right = static_cast<uint32_t>(result) << (32 - shift_amount);
- result = right | left;
+ result = base::bits::RotateRight32(result, shift_amount);
*carry_out = (static_cast<uint32_t>(result) >> 31) != 0;
}
break;
@@ -1358,9 +1338,9 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
if (shift_amount == 0) {
*carry_out = c_flag_;
} else if (shift_amount < 32) {
- result <<= (shift_amount - 1);
+ result = static_cast<uint32_t>(result) << (shift_amount - 1);
*carry_out = (result < 0);
- result <<= 1;
+ result = static_cast<uint32_t>(result) << 1;
} else if (shift_amount == 32) {
*carry_out = (result & 1) == 1;
result = 0;
@@ -1395,9 +1375,8 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
if (shift_amount == 0) {
*carry_out = c_flag_;
} else {
- uint32_t left = static_cast<uint32_t>(result) >> shift_amount;
- uint32_t right = static_cast<uint32_t>(result) << (32 - shift_amount);
- result = right | left;
+ // Avoid undefined behavior. Rotating by multiples of 32 is no-op.
+ result = base::bits::RotateRight32(result, shift_amount & 31);
*carry_out = (static_cast<uint32_t>(result) >> 31) != 0;
}
break;
@@ -1580,6 +1559,34 @@ using SimulatorRuntimeDirectGetterCall = void (*)(int32_t arg0, int32_t arg1);
using SimulatorRuntimeProfilingGetterCall = void (*)(int32_t arg0, int32_t arg1,
void* arg2);
+// Separate for fine-grained UBSan blacklisting. Casting any given C++
+// function to {SimulatorRuntimeCall} is undefined behavior; but since
+// the target function can indeed be any function that's exposed via
+// the "fast C call" mechanism, we can't reconstruct its signature here.
+int64_t UnsafeGenericFunctionCall(intptr_t function, int32_t arg0, int32_t arg1,
+ int32_t arg2, int32_t arg3, int32_t arg4,
+ int32_t arg5, int32_t arg6, int32_t arg7,
+ int32_t arg8, int32_t arg9) {
+ SimulatorRuntimeCall target =
+ reinterpret_cast<SimulatorRuntimeCall>(function);
+ return target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9);
+}
+void UnsafeDirectApiCall(intptr_t function, int32_t arg0) {
+ SimulatorRuntimeDirectApiCall target =
+ reinterpret_cast<SimulatorRuntimeDirectApiCall>(function);
+ target(arg0);
+}
+void UnsafeProfilingApiCall(intptr_t function, int32_t arg0, int32_t arg1) {
+ SimulatorRuntimeProfilingApiCall target =
+ reinterpret_cast<SimulatorRuntimeProfilingApiCall>(function);
+ target(arg0, Redirection::ReverseRedirection(arg1));
+}
+void UnsafeDirectGetterCall(intptr_t function, int32_t arg0, int32_t arg1) {
+ SimulatorRuntimeDirectGetterCall target =
+ reinterpret_cast<SimulatorRuntimeDirectGetterCall>(function);
+ target(arg0, arg1);
+}
+
// Software interrupt instructions are used by the simulator to call into the
// C-based V8 runtime.
void Simulator::SoftwareInterrupt(Instruction* instr) {
@@ -1710,9 +1717,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
PrintF("\n");
}
CHECK(stack_aligned);
- SimulatorRuntimeDirectApiCall target =
- reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
- target(arg0);
+ UnsafeDirectApiCall(external, arg0);
} else if (redirection->type() == ExternalReference::PROFILING_API_CALL) {
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
PrintF("Call to host function at %p args %08x %08x",
@@ -1723,9 +1728,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
PrintF("\n");
}
CHECK(stack_aligned);
- SimulatorRuntimeProfilingApiCall target =
- reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
- target(arg0, Redirection::ReverseRedirection(arg1));
+ UnsafeProfilingApiCall(external, arg0, arg1);
} else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
PrintF("Call to host function at %p args %08x %08x",
@@ -1736,9 +1739,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
PrintF("\n");
}
CHECK(stack_aligned);
- SimulatorRuntimeDirectGetterCall target =
- reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
- target(arg0, arg1);
+ UnsafeDirectGetterCall(external, arg0, arg1);
} else if (redirection->type() ==
ExternalReference::PROFILING_GETTER_CALL) {
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
@@ -1757,14 +1758,12 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
// builtin call.
DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL ||
redirection->type() == ExternalReference::BUILTIN_CALL_PAIR);
- SimulatorRuntimeCall target =
- reinterpret_cast<SimulatorRuntimeCall>(external);
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
PrintF(
"Call to host function at %p "
"args %08x, %08x, %08x, %08x, %08x, %08x, %08x, %08x, %08x, %08x",
- reinterpret_cast<void*>(FUNCTION_ADDR(target)), arg0, arg1, arg2,
- arg3, arg4, arg5, arg6, arg7, arg8, arg9);
+ reinterpret_cast<void*>(external), arg0, arg1, arg2, arg3, arg4,
+ arg5, arg6, arg7, arg8, arg9);
if (!stack_aligned) {
PrintF(" with unaligned stack %08x\n", get_register(sp));
}
@@ -1772,7 +1771,8 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
}
CHECK(stack_aligned);
int64_t result =
- target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9);
+ UnsafeGenericFunctionCall(external, arg0, arg1, arg2, arg3, arg4,
+ arg5, arg6, arg7, arg8, arg9);
int32_t lo_res = static_cast<int32_t>(result);
int32_t hi_res = static_cast<int32_t>(result >> 32);
if (::v8::internal::FLAG_trace_sim) {
@@ -1938,7 +1938,7 @@ void Simulator::DecodeType01(Instruction* instr) {
// Rn field to encode it.
// Format(instr, "mul'cond's 'rn, 'rm, 'rs");
int rd = rn; // Remap the rn field to the Rd register.
- int32_t alu_out = rm_val * rs_val;
+ int32_t alu_out = base::MulWithWraparound(rm_val, rs_val);
set_register(rd, alu_out);
if (instr->HasS()) {
SetNZFlags(alu_out);
@@ -1952,13 +1952,13 @@ void Simulator::DecodeType01(Instruction* instr) {
// Rn field to encode the Rd register and the Rd field to encode
// the Rn register.
// Format(instr, "mla'cond's 'rn, 'rm, 'rs, 'rd");
- int32_t mul_out = rm_val * rs_val;
- int32_t result = acc_value + mul_out;
+ int32_t mul_out = base::MulWithWraparound(rm_val, rs_val);
+ int32_t result = base::AddWithWraparound(acc_value, mul_out);
set_register(rn, result);
} else {
// Format(instr, "mls'cond's 'rn, 'rm, 'rs, 'rd");
- int32_t mul_out = rm_val * rs_val;
- int32_t result = acc_value - mul_out;
+ int32_t mul_out = base::MulWithWraparound(rm_val, rs_val);
+ int32_t result = base::SubWithWraparound(acc_value, mul_out);
set_register(rn, result);
}
}
@@ -2096,7 +2096,7 @@ void Simulator::DecodeType01(Instruction* instr) {
// Format(instr, "'memop'cond'sign'h 'rd, ['rn], -'rm");
DCHECK(!instr->HasW());
addr = rn_val;
- rn_val -= rm_val;
+ rn_val = base::SubWithWraparound(rn_val, rm_val);
set_register(rn, rn_val);
break;
}
@@ -2104,13 +2104,13 @@ void Simulator::DecodeType01(Instruction* instr) {
// Format(instr, "'memop'cond'sign'h 'rd, ['rn], +'rm");
DCHECK(!instr->HasW());
addr = rn_val;
- rn_val += rm_val;
+ rn_val = base::AddWithWraparound(rn_val, rm_val);
set_register(rn, rn_val);
break;
}
case db_x: {
// Format(instr, "'memop'cond'sign'h 'rd, ['rn, -'rm]'w");
- rn_val -= rm_val;
+ rn_val = base::SubWithWraparound(rn_val, rm_val);
addr = rn_val;
if (instr->HasW()) {
set_register(rn, rn_val);
@@ -2119,7 +2119,7 @@ void Simulator::DecodeType01(Instruction* instr) {
}
case ib_x: {
// Format(instr, "'memop'cond'sign'h 'rd, ['rn, +'rm]'w");
- rn_val += rm_val;
+ rn_val = base::AddWithWraparound(rn_val, rm_val);
addr = rn_val;
if (instr->HasW()) {
set_register(rn, rn_val);
@@ -2139,7 +2139,7 @@ void Simulator::DecodeType01(Instruction* instr) {
// Format(instr, "'memop'cond'sign'h 'rd, ['rn], #-'off8");
DCHECK(!instr->HasW());
addr = rn_val;
- rn_val -= imm_val;
+ rn_val = base::SubWithWraparound(rn_val, imm_val);
set_register(rn, rn_val);
break;
}
@@ -2147,13 +2147,13 @@ void Simulator::DecodeType01(Instruction* instr) {
// Format(instr, "'memop'cond'sign'h 'rd, ['rn], #+'off8");
DCHECK(!instr->HasW());
addr = rn_val;
- rn_val += imm_val;
+ rn_val = base::AddWithWraparound(rn_val, imm_val);
set_register(rn, rn_val);
break;
}
case db_x: {
// Format(instr, "'memop'cond'sign'h 'rd, ['rn, #-'off8]'w");
- rn_val -= imm_val;
+ rn_val = base::SubWithWraparound(rn_val, imm_val);
addr = rn_val;
if (instr->HasW()) {
set_register(rn, rn_val);
@@ -2162,7 +2162,7 @@ void Simulator::DecodeType01(Instruction* instr) {
}
case ib_x: {
// Format(instr, "'memop'cond'sign'h 'rd, ['rn, #+'off8]'w");
- rn_val += imm_val;
+ rn_val = base::AddWithWraparound(rn_val, imm_val);
addr = rn_val;
if (instr->HasW()) {
set_register(rn, rn_val);
@@ -2328,7 +2328,7 @@ void Simulator::DecodeType01(Instruction* instr) {
case SUB: {
// Format(instr, "sub'cond's 'rd, 'rn, 'shift_rm");
// Format(instr, "sub'cond's 'rd, 'rn, 'imm");
- alu_out = rn_val - shifter_operand;
+ alu_out = base::SubWithWraparound(rn_val, shifter_operand);
set_register(rd, alu_out);
if (instr->HasS()) {
SetNZFlags(alu_out);
@@ -2341,7 +2341,7 @@ void Simulator::DecodeType01(Instruction* instr) {
case RSB: {
// Format(instr, "rsb'cond's 'rd, 'rn, 'shift_rm");
// Format(instr, "rsb'cond's 'rd, 'rn, 'imm");
- alu_out = shifter_operand - rn_val;
+ alu_out = base::SubWithWraparound(shifter_operand, rn_val);
set_register(rd, alu_out);
if (instr->HasS()) {
SetNZFlags(alu_out);
@@ -2354,7 +2354,7 @@ void Simulator::DecodeType01(Instruction* instr) {
case ADD: {
// Format(instr, "add'cond's 'rd, 'rn, 'shift_rm");
// Format(instr, "add'cond's 'rd, 'rn, 'imm");
- alu_out = rn_val + shifter_operand;
+ alu_out = base::AddWithWraparound(rn_val, shifter_operand);
set_register(rd, alu_out);
if (instr->HasS()) {
SetNZFlags(alu_out);
@@ -2367,7 +2367,8 @@ void Simulator::DecodeType01(Instruction* instr) {
case ADC: {
// Format(instr, "adc'cond's 'rd, 'rn, 'shift_rm");
// Format(instr, "adc'cond's 'rd, 'rn, 'imm");
- alu_out = rn_val + shifter_operand + GetCarry();
+ alu_out = base::AddWithWraparound(
+ base::AddWithWraparound(rn_val, shifter_operand), GetCarry());
set_register(rd, alu_out);
if (instr->HasS()) {
SetNZFlags(alu_out);
@@ -2380,7 +2381,9 @@ void Simulator::DecodeType01(Instruction* instr) {
case SBC: {
// Format(instr, "sbc'cond's 'rd, 'rn, 'shift_rm");
// Format(instr, "sbc'cond's 'rd, 'rn, 'imm");
- alu_out = (rn_val - shifter_operand) - (GetCarry() ? 0 : 1);
+ alu_out = base::SubWithWraparound(
+ base::SubWithWraparound(rn_val, shifter_operand),
+ (GetCarry() ? 0 : 1));
set_register(rd, alu_out);
if (instr->HasS()) {
SetNZFlags(alu_out);
@@ -2430,7 +2433,7 @@ void Simulator::DecodeType01(Instruction* instr) {
if (instr->HasS()) {
// Format(instr, "cmp'cond 'rn, 'shift_rm");
// Format(instr, "cmp'cond 'rn, 'imm");
- alu_out = rn_val - shifter_operand;
+ alu_out = base::SubWithWraparound(rn_val, shifter_operand);
SetNZFlags(alu_out);
SetCFlag(!BorrowFrom(rn_val, shifter_operand));
SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, false));
@@ -2447,7 +2450,7 @@ void Simulator::DecodeType01(Instruction* instr) {
if (instr->HasS()) {
// Format(instr, "cmn'cond 'rn, 'shift_rm");
// Format(instr, "cmn'cond 'rn, 'imm");
- alu_out = rn_val + shifter_operand;
+ alu_out = base::AddWithWraparound(rn_val, shifter_operand);
SetNZFlags(alu_out);
SetCFlag(CarryFrom(rn_val, shifter_operand));
SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, true));
@@ -2937,7 +2940,7 @@ void Simulator::DecodeType3(Instruction* instr) {
} else {
// sbfx - signed bitfield extract.
int32_t rm_val = get_register(instr->RmValue());
- int32_t extr_val = rm_val << (31 - msbit);
+ int32_t extr_val = static_cast<uint32_t>(rm_val) << (31 - msbit);
extr_val = extr_val >> (31 - widthminus1);
set_register(instr->RdValue(), extr_val);
}
@@ -2969,7 +2972,7 @@ void Simulator::DecodeType3(Instruction* instr) {
return;
} else {
// Format(instr, "'memop'cond'b 'rd, ['rn, +'shift_rm]'w");
- addr = rn_val + shifter_operand;
+ addr = base::AddWithWraparound(rn_val, shifter_operand);
if (instr->HasW()) {
set_register(rn, addr);
}
@@ -3010,7 +3013,8 @@ void Simulator::DecodeType4(Instruction* instr) {
void Simulator::DecodeType5(Instruction* instr) {
// Format(instr, "b'l'cond 'target");
- int off = (instr->SImmed24Value() << 2);
+ int off =
+ static_cast<int>(static_cast<uint32_t>(instr->SImmed24Value()) << 2);
intptr_t pc_address = get_pc();
if (instr->HasLink()) {
set_register(lr, pc_address + kInstrSize);
@@ -3259,14 +3263,14 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
if (instr->SzValue() == 0x1) {
double dn_value = get_double_from_d_register(vn).get_scalar();
double dm_value = get_double_from_d_register(vm).get_scalar();
- double dd_value = dn_value / dm_value;
+ double dd_value = base::Divide(dn_value, dm_value);
div_zero_vfp_flag_ = (dm_value == 0);
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else {
float sn_value = get_float_from_s_register(n).get_scalar();
float sm_value = get_float_from_s_register(m).get_scalar();
- float sd_value = sn_value / sm_value;
+ float sd_value = base::Divide(sn_value, sm_value);
div_zero_vfp_flag_ = (sm_value == 0);
sd_value = canonicalizeNaN(sd_value);
set_s_register_from_float(d, sd_value);
@@ -3594,10 +3598,22 @@ int VFPConversionSaturate(double val, bool unsigned_res) {
int32_t Simulator::ConvertDoubleToInt(double val, bool unsigned_integer,
VFPRoundingMode mode) {
- // TODO(jkummerow): These casts are undefined behavior if the integral
- // part of {val} does not fit into the destination type.
- int32_t result =
- unsigned_integer ? static_cast<uint32_t>(val) : static_cast<int32_t>(val);
+ int32_t result;
+ if (unsigned_integer) {
+ // The FastD2UI helper does not have the rounding behavior we want here
+ // (it doesn't guarantee any particular rounding, and it doesn't check
+ // for or handle overflow), so do the conversion by hand.
+ using limits = std::numeric_limits<uint32_t>;
+ if (val > limits::max()) {
+ result = limits::max();
+ } else if (!(val >= 0)) { // Negation to catch NaNs.
+ result = 0;
+ } else {
+ result = static_cast<uint32_t>(val);
+ }
+ } else {
+ result = FastD2IChecked(val);
+ }
inv_op_vfp_flag_ = get_inv_op_vfp_flag(mode, val, unsigned_integer);
@@ -3617,7 +3633,9 @@ int32_t Simulator::ConvertDoubleToInt(double val, bool unsigned_integer,
result += val_sign;
} else if (abs_diff == 0.5) {
// Round to even if exactly halfway.
- result = ((result % 2) == 0) ? result : result + val_sign;
+ result = ((result % 2) == 0)
+ ? result
+ : base::AddWithWraparound(result, val_sign);
}
break;
}
@@ -3873,7 +3891,11 @@ void Neg(Simulator* simulator, int Vd, int Vm) {
T src[kElems];
simulator->get_neon_register<T, SIZE>(Vm, src);
for (int i = 0; i < kElems; i++) {
- src[i] = -src[i];
+ if (src[i] != std::numeric_limits<T>::min()) {
+ src[i] = -src[i];
+ } else {
+ // The respective minimum (negative) value maps to itself.
+ }
}
simulator->set_neon_register<T, SIZE>(Vd, src);
}
@@ -3998,6 +4020,17 @@ void Sub(Simulator* simulator, int Vd, int Vm, int Vn) {
simulator->set_neon_register<T, SIZE>(Vd, src1);
}
+namespace {
+uint32_t Multiply(uint32_t a, uint32_t b) { return a * b; }
+uint8_t Multiply(uint8_t a, uint8_t b) { return a * b; }
+// 16-bit integers are special due to C++'s implicit conversion rules.
+// See https://bugs.llvm.org/show_bug.cgi?id=25580.
+uint16_t Multiply(uint16_t a, uint16_t b) {
+ uint32_t result = static_cast<uint32_t>(a) * static_cast<uint32_t>(b);
+ return static_cast<uint16_t>(result);
+}
+} // namespace
+
template <typename T, int SIZE>
void Mul(Simulator* simulator, int Vd, int Vm, int Vn) {
static const int kElems = SIZE / sizeof(T);
@@ -4005,7 +4038,7 @@ void Mul(Simulator* simulator, int Vd, int Vm, int Vn) {
simulator->get_neon_register<T, SIZE>(Vn, src1);
simulator->get_neon_register<T, SIZE>(Vm, src2);
for (int i = 0; i < kElems; i++) {
- src1[i] *= src2[i];
+ src1[i] = Multiply(src1[i], src2[i]);
}
simulator->set_neon_register<T, SIZE>(Vd, src1);
}
@@ -4090,7 +4123,8 @@ void ShiftByRegister(Simulator* simulator, int Vd, int Vm, int Vn) {
if (shift_value >= size) {
src[i] = 0;
} else {
- src[i] <<= shift_value;
+ using unsignedT = typename std::make_unsigned<T>::type;
+ src[i] = static_cast<unsignedT>(src[i]) << shift_value;
}
} else {
// If the shift value is greater/equal than size, always end up with -1.
@@ -5721,7 +5755,7 @@ void Simulator::Execute() {
// should be stopping at a particular executed instruction.
while (program_counter != end_sim_pc) {
Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
- icount_++;
+ icount_ = base::AddWithWraparound(icount_, 1);
InstructionDecode(instr);
program_counter = get_pc();
}
@@ -5730,7 +5764,7 @@ void Simulator::Execute() {
// we reach the particular instruction count.
while (program_counter != end_sim_pc) {
Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
- icount_++;
+ icount_ = base::AddWithWraparound(icount_, 1);
if (icount_ == ::v8::internal::FLAG_stop_sim_at) {
ArmDebugger dbg(this);
dbg.Debug();
diff --git a/deps/v8/src/execution/arm64/simulator-arm64.cc b/deps/v8/src/execution/arm64/simulator-arm64.cc
index 71fedd5b2f..d3a73cbad8 100644
--- a/deps/v8/src/execution/arm64/simulator-arm64.cc
+++ b/deps/v8/src/execution/arm64/simulator-arm64.cc
@@ -12,6 +12,7 @@
#include <type_traits>
#include "src/base/lazy-instance.h"
+#include "src/base/overflowing-math.h"
#include "src/codegen/arm64/decoder-arm64-inl.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/macro-assembler.h"
@@ -154,6 +155,22 @@ void Simulator::CallImpl(Address entry, CallArgument* args) {
set_sp(original_stack);
}
+#ifdef DEBUG
+namespace {
+int PopLowestIndexAsCode(CPURegList* list) {
+ if (list->IsEmpty()) {
+ return -1;
+ }
+ RegList reg_list = list->list();
+ int index = base::bits::CountTrailingZeros(reg_list);
+ DCHECK((1LL << index) & reg_list);
+ list->Remove(index);
+
+ return index;
+}
+} // namespace
+#endif
+
void Simulator::CheckPCSComplianceAndRun() {
// Adjust JS-based stack limit to C-based stack limit.
isolate_->stack_guard()->AdjustStackLimitForSimulator();
@@ -171,10 +188,10 @@ void Simulator::CheckPCSComplianceAndRun() {
for (int i = 0; i < kNumberOfCalleeSavedRegisters; i++) {
// x31 is not a caller saved register, so no need to specify if we want
// the stack or zero.
- saved_registers[i] = xreg(register_list.PopLowestIndex().code());
+ saved_registers[i] = xreg(PopLowestIndexAsCode(&register_list));
}
for (int i = 0; i < kNumberOfCalleeSavedVRegisters; i++) {
- saved_fpregisters[i] = dreg_bits(fpregister_list.PopLowestIndex().code());
+ saved_fpregisters[i] = dreg_bits(PopLowestIndexAsCode(&fpregister_list));
}
int64_t original_stack = sp();
#endif
@@ -186,11 +203,11 @@ void Simulator::CheckPCSComplianceAndRun() {
register_list = kCalleeSaved;
fpregister_list = kCalleeSavedV;
for (int i = 0; i < kNumberOfCalleeSavedRegisters; i++) {
- DCHECK_EQ(saved_registers[i], xreg(register_list.PopLowestIndex().code()));
+ DCHECK_EQ(saved_registers[i], xreg(PopLowestIndexAsCode(&register_list)));
}
for (int i = 0; i < kNumberOfCalleeSavedVRegisters; i++) {
DCHECK(saved_fpregisters[i] ==
- dreg_bits(fpregister_list.PopLowestIndex().code()));
+ dreg_bits(PopLowestIndexAsCode(&fpregister_list)));
}
// Corrupt caller saved register minus the return regiters.
@@ -217,13 +234,13 @@ void Simulator::CheckPCSComplianceAndRun() {
void Simulator::CorruptRegisters(CPURegList* list, uint64_t value) {
if (list->type() == CPURegister::kRegister) {
while (!list->IsEmpty()) {
- unsigned code = list->PopLowestIndex().code();
+ unsigned code = PopLowestIndexAsCode(list);
set_xreg(code, value | code);
}
} else {
DCHECK_EQ(list->type(), CPURegister::kVRegister);
while (!list->IsEmpty()) {
- unsigned code = list->PopLowestIndex().code();
+ unsigned code = PopLowestIndexAsCode(list);
set_dreg_bits(code, value | code);
}
}
@@ -414,6 +431,34 @@ using SimulatorRuntimeDirectGetterCall = void (*)(int64_t arg0, int64_t arg1);
using SimulatorRuntimeProfilingGetterCall = void (*)(int64_t arg0, int64_t arg1,
void* arg2);
+// Separate for fine-grained UBSan blacklisting. Casting any given C++
+// function to {SimulatorRuntimeCall} is undefined behavior; but since
+// the target function can indeed be any function that's exposed via
+// the "fast C call" mechanism, we can't reconstruct its signature here.
+ObjectPair UnsafeGenericFunctionCall(int64_t function, int64_t arg0,
+ int64_t arg1, int64_t arg2, int64_t arg3,
+ int64_t arg4, int64_t arg5, int64_t arg6,
+ int64_t arg7, int64_t arg8, int64_t arg9) {
+ SimulatorRuntimeCall target =
+ reinterpret_cast<SimulatorRuntimeCall>(function);
+ return target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9);
+}
+void UnsafeDirectApiCall(int64_t function, int64_t arg0) {
+ SimulatorRuntimeDirectApiCall target =
+ reinterpret_cast<SimulatorRuntimeDirectApiCall>(function);
+ target(arg0);
+}
+void UnsafeProfilingApiCall(int64_t function, int64_t arg0, void* arg1) {
+ SimulatorRuntimeProfilingApiCall target =
+ reinterpret_cast<SimulatorRuntimeProfilingApiCall>(function);
+ target(arg0, arg1);
+}
+void UnsafeDirectGetterCall(int64_t function, int64_t arg0, int64_t arg1) {
+ SimulatorRuntimeDirectGetterCall target =
+ reinterpret_cast<SimulatorRuntimeDirectGetterCall>(function);
+ target(arg0, arg1);
+}
+
void Simulator::DoRuntimeCall(Instruction* instr) {
Redirection* redirection = Redirection::FromInstruction(instr);
@@ -515,10 +560,8 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
", "
"0x%016" PRIx64 ", 0x%016" PRIx64,
arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9);
- SimulatorRuntimeCall target =
- reinterpret_cast<SimulatorRuntimeCall>(external);
- ObjectPair result =
- target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9);
+ ObjectPair result = UnsafeGenericFunctionCall(
+ external, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9);
TraceSim("Returned: {%p, %p}\n", reinterpret_cast<void*>(result.x),
reinterpret_cast<void*>(result.y));
#ifdef DEBUG
@@ -532,10 +575,8 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
case ExternalReference::DIRECT_API_CALL: {
// void f(v8::FunctionCallbackInfo&)
TraceSim("Type: DIRECT_API_CALL\n");
- SimulatorRuntimeDirectApiCall target =
- reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
TraceSim("Arguments: 0x%016" PRIx64 "\n", xreg(0));
- target(xreg(0));
+ UnsafeDirectApiCall(external, xreg(0));
TraceSim("No return value.");
#ifdef DEBUG
CorruptAllCallerSavedCPURegisters();
@@ -606,11 +647,9 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
case ExternalReference::DIRECT_GETTER_CALL: {
// void f(Local<String> property, PropertyCallbackInfo& info)
TraceSim("Type: DIRECT_GETTER_CALL\n");
- SimulatorRuntimeDirectGetterCall target =
- reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
TraceSim("Arguments: 0x%016" PRIx64 ", 0x%016" PRIx64 "\n", xreg(0),
xreg(1));
- target(xreg(0), xreg(1));
+ UnsafeDirectGetterCall(external, xreg(0), xreg(1));
TraceSim("No return value.");
#ifdef DEBUG
CorruptAllCallerSavedCPURegisters();
@@ -621,11 +660,9 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
case ExternalReference::PROFILING_API_CALL: {
// void f(v8::FunctionCallbackInfo&, v8::FunctionCallback)
TraceSim("Type: PROFILING_API_CALL\n");
- SimulatorRuntimeProfilingApiCall target =
- reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
void* arg1 = Redirection::ReverseRedirection(xreg(1));
TraceSim("Arguments: 0x%016" PRIx64 ", %p\n", xreg(0), arg1);
- target(xreg(0), arg1);
+ UnsafeProfilingApiCall(external, xreg(0), arg1);
TraceSim("No return value.");
#ifdef DEBUG
CorruptAllCallerSavedCPURegisters();
@@ -849,10 +886,12 @@ T Simulator::ShiftOperand(T value, Shift shift_type, unsigned amount) {
if (amount == 0) {
return value;
}
+ // Larger shift {amount}s would be undefined behavior in C++.
+ DCHECK(amount < sizeof(value) * kBitsPerByte);
switch (shift_type) {
case LSL:
- return value << amount;
+ return static_cast<unsignedT>(value) << amount;
case LSR:
return static_cast<unsignedT>(value) >> amount;
case ASR:
@@ -873,6 +912,7 @@ T Simulator::ExtendValue(T value, Extend extend_type, unsigned left_shift) {
const unsigned kSignExtendBShift = (sizeof(T) - 1) * 8;
const unsigned kSignExtendHShift = (sizeof(T) - 2) * 8;
const unsigned kSignExtendWShift = (sizeof(T) - 4) * 8;
+ using unsignedT = typename std::make_unsigned<T>::type;
switch (extend_type) {
case UXTB:
@@ -885,13 +925,19 @@ T Simulator::ExtendValue(T value, Extend extend_type, unsigned left_shift) {
value &= kWordMask;
break;
case SXTB:
- value = (value << kSignExtendBShift) >> kSignExtendBShift;
+ value =
+ static_cast<T>(static_cast<unsignedT>(value) << kSignExtendBShift) >>
+ kSignExtendBShift;
break;
case SXTH:
- value = (value << kSignExtendHShift) >> kSignExtendHShift;
+ value =
+ static_cast<T>(static_cast<unsignedT>(value) << kSignExtendHShift) >>
+ kSignExtendHShift;
break;
case SXTW:
- value = (value << kSignExtendWShift) >> kSignExtendWShift;
+ value =
+ static_cast<T>(static_cast<unsignedT>(value) << kSignExtendWShift) >>
+ kSignExtendWShift;
break;
case UXTX:
case SXTX:
@@ -899,7 +945,7 @@ T Simulator::ExtendValue(T value, Extend extend_type, unsigned left_shift) {
default:
UNREACHABLE();
}
- return value << left_shift;
+ return static_cast<T>(static_cast<unsignedT>(value) << left_shift);
}
template <typename T>
@@ -2283,7 +2329,9 @@ void Simulator::VisitConditionalSelect(Instruction* instr) {
break;
case CSNEG_w:
case CSNEG_x:
- new_val = (uint64_t)(-(int64_t)new_val);
+ // Simulate two's complement (instead of casting to signed and negating)
+ // to avoid undefined behavior on signed overflow.
+ new_val = (~new_val) + 1;
break;
default:
UNIMPLEMENTED();
@@ -2446,23 +2494,27 @@ void Simulator::VisitDataProcessing3Source(Instruction* instr) {
switch (instr->Mask(DataProcessing3SourceMask)) {
case MADD_w:
case MADD_x:
- result = xreg(instr->Ra()) + (xreg(instr->Rn()) * xreg(instr->Rm()));
+ result = base::AddWithWraparound(
+ xreg(instr->Ra()),
+ base::MulWithWraparound(xreg(instr->Rn()), xreg(instr->Rm())));
break;
case MSUB_w:
case MSUB_x:
- result = xreg(instr->Ra()) - (xreg(instr->Rn()) * xreg(instr->Rm()));
+ result = base::SubWithWraparound(
+ xreg(instr->Ra()),
+ base::MulWithWraparound(xreg(instr->Rn()), xreg(instr->Rm())));
break;
case SMADDL_x:
- result = xreg(instr->Ra()) + (rn_s32 * rm_s32);
+ result = base::AddWithWraparound(xreg(instr->Ra()), (rn_s32 * rm_s32));
break;
case SMSUBL_x:
- result = xreg(instr->Ra()) - (rn_s32 * rm_s32);
+ result = base::SubWithWraparound(xreg(instr->Ra()), (rn_s32 * rm_s32));
break;
case UMADDL_x:
- result = xreg(instr->Ra()) + (rn_u32 * rm_u32);
+ result = static_cast<uint64_t>(xreg(instr->Ra())) + (rn_u32 * rm_u32);
break;
case UMSUBL_x:
- result = xreg(instr->Ra()) - (rn_u32 * rm_u32);
+ result = static_cast<uint64_t>(xreg(instr->Ra())) - (rn_u32 * rm_u32);
break;
case SMULH_x:
DCHECK_EQ(instr->Ra(), kZeroRegCode);
@@ -2488,10 +2540,10 @@ void Simulator::BitfieldHelper(Instruction* instr) {
T diff = S - R;
T mask;
if (diff >= 0) {
- mask = diff < reg_size - 1 ? (static_cast<T>(1) << (diff + 1)) - 1
+ mask = diff < reg_size - 1 ? (static_cast<unsignedT>(1) << (diff + 1)) - 1
: static_cast<T>(-1);
} else {
- uint64_t umask = ((1LL << (S + 1)) - 1);
+ uint64_t umask = ((1ULL << (S + 1)) - 1);
umask = (umask >> R) | (umask << (reg_size - R));
mask = static_cast<T>(umask);
diff += reg_size;
@@ -2522,11 +2574,15 @@ void Simulator::BitfieldHelper(Instruction* instr) {
T dst = inzero ? 0 : reg<T>(instr->Rd());
T src = reg<T>(instr->Rn());
// Rotate source bitfield into place.
- T result = (static_cast<unsignedT>(src) >> R) | (src << (reg_size - R));
+ T result = R == 0 ? src
+ : (static_cast<unsignedT>(src) >> R) |
+ (static_cast<unsignedT>(src) << (reg_size - R));
// Determine the sign extension.
- T topbits_preshift = (static_cast<T>(1) << (reg_size - diff - 1)) - 1;
- T signbits = (extend && ((src >> S) & 1) ? topbits_preshift : 0)
- << (diff + 1);
+ T topbits_preshift = (static_cast<unsignedT>(1) << (reg_size - diff - 1)) - 1;
+ T signbits =
+ diff >= reg_size - 1
+ ? 0
+ : ((extend && ((src >> S) & 1) ? topbits_preshift : 0) << (diff + 1));
// Merge sign extension, dest/zero and bitfield.
result = signbits | (result & mask) | (dst & ~mask);
diff --git a/deps/v8/src/execution/frames.cc b/deps/v8/src/execution/frames.cc
index 3b334739da..04768a365c 100644
--- a/deps/v8/src/execution/frames.cc
+++ b/deps/v8/src/execution/frames.cc
@@ -553,14 +553,6 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
return WASM_EXIT;
case wasm::WasmCode::kWasmToJsWrapper:
return WASM_TO_JS;
- case wasm::WasmCode::kRuntimeStub:
- // Some stubs, like e.g. {WasmCode::kWasmCompileLazy} build their own
- // specialized frame which already carries a type marker.
- // TODO(mstarzinger): This is only needed for the case where embedded
- // builtins are disabled. It can be removed once all non-embedded
- // builtins are gone.
- if (StackFrame::IsTypeMarker(marker)) break;
- return STUB;
case wasm::WasmCode::kInterpreterEntry:
return WASM_INTERPRETER_ENTRY;
default:
@@ -1079,13 +1071,12 @@ Address StubFrame::GetCallerStackPointer() const {
return fp() + ExitFrameConstants::kCallerSPOffset;
}
-int StubFrame::LookupExceptionHandlerInTable(int* stack_slots) {
+int StubFrame::LookupExceptionHandlerInTable() {
Code code = LookupCode();
DCHECK(code.is_turbofanned());
DCHECK_EQ(code.kind(), Code::BUILTIN);
HandlerTable table(code);
int pc_offset = static_cast<int>(pc() - code.InstructionStart());
- *stack_slots = code.stack_slots();
return table.LookupReturn(pc_offset);
}
@@ -1271,6 +1262,7 @@ void JavaScriptFrame::CollectFunctionAndOffsetForICStats(JSFunction function,
if (maybe_script.IsScript()) {
Script script = Script::cast(maybe_script);
ic_info.line_num = script.GetLineNumber(source_pos) + 1;
+ ic_info.column_num = script.GetColumnNumber(source_pos);
ic_info.script_name = ic_stats->GetOrCacheScriptName(script);
}
}
@@ -1627,7 +1619,7 @@ void OptimizedFrame::Summarize(std::vector<FrameSummary>* frames) const {
}
int OptimizedFrame::LookupExceptionHandlerInTable(
- int* stack_slots, HandlerTable::CatchPrediction* prediction) {
+ int* data, HandlerTable::CatchPrediction* prediction) {
// We cannot perform exception prediction on optimized code. Instead, we need
// to use FrameSummary to find the corresponding code offset in unoptimized
// code to perform prediction there.
@@ -1635,7 +1627,7 @@ int OptimizedFrame::LookupExceptionHandlerInTable(
Code code = LookupCode();
HandlerTable table(code);
int pc_offset = static_cast<int>(pc() - code.InstructionStart());
- if (stack_slots) *stack_slots = code.stack_slots();
+ DCHECK_NULL(data); // Data is not used and will not return a value.
// When the return pc has been replaced by a trampoline there won't be
// a handler for this trampoline. Thus we need to use the return pc that
@@ -1676,8 +1668,8 @@ DeoptimizationData OptimizedFrame::GetDeoptimizationData(
Object OptimizedFrame::receiver() const {
Code code = LookupCode();
if (code.kind() == Code::BUILTIN) {
- Address argc_ptr = fp() + OptimizedBuiltinFrameConstants::kArgCOffset;
- intptr_t argc = *reinterpret_cast<intptr_t*>(argc_ptr);
+ intptr_t argc = static_cast<int>(
+ Memory<intptr_t>(fp() + OptimizedBuiltinFrameConstants::kArgCOffset));
intptr_t args_size =
(StandardFrameConstants::kFixedSlotCountAboveFp + argc) *
kSystemPointerSize;
@@ -1950,15 +1942,13 @@ bool WasmCompiledFrame::at_to_number_conversion() const {
return !!pos;
}
-int WasmCompiledFrame::LookupExceptionHandlerInTable(int* stack_slots) {
- DCHECK_NOT_NULL(stack_slots);
+int WasmCompiledFrame::LookupExceptionHandlerInTable() {
wasm::WasmCode* code =
isolate()->wasm_engine()->code_manager()->LookupCode(pc());
if (!code->IsAnonymous() && code->handler_table_size() > 0) {
HandlerTable table(code->handler_table(), code->handler_table_size(),
HandlerTable::kReturnAddressBasedEncoding);
int pc_offset = static_cast<int>(pc() - code->instruction_start());
- *stack_slots = static_cast<int>(code->stack_slots());
return table.LookupReturn(pc_offset);
}
return -1;
diff --git a/deps/v8/src/execution/frames.h b/deps/v8/src/execution/frames.h
index d1e7a7890d..165ff85464 100644
--- a/deps/v8/src/execution/frames.h
+++ b/deps/v8/src/execution/frames.h
@@ -145,7 +145,12 @@ class StackFrame {
intptr_t type = marker >> kSmiTagSize;
// TODO(petermarshall): There is a bug in the arm simulators that causes
// invalid frame markers.
-#if !(defined(USE_SIMULATOR) && (V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM))
+#if defined(USE_SIMULATOR) && (V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM)
+ if (static_cast<uintptr_t>(type) >= Type::NUMBER_OF_TYPES) {
+ // Appease UBSan.
+ return Type::NUMBER_OF_TYPES;
+ }
+#else
DCHECK_LT(static_cast<uintptr_t>(type), Type::NUMBER_OF_TYPES);
#endif
return static_cast<Type>(type);
@@ -733,7 +738,7 @@ class JavaScriptFrame : public StandardFrame {
// Lookup exception handler for current {pc}, returns -1 if none found. Also
// returns data associated with the handler site specific to the frame type:
- // - OptimizedFrame : Data is the stack slot count of the entire frame.
+ // - OptimizedFrame : Data is not used and will not return a value.
// - InterpretedFrame: Data is the register index holding the context.
virtual int LookupExceptionHandlerInTable(
int* data, HandlerTable::CatchPrediction* prediction);
@@ -783,10 +788,8 @@ class StubFrame : public StandardFrame {
Code unchecked_code() const override;
// Lookup exception handler for current {pc}, returns -1 if none found. Only
- // TurboFan stub frames are supported. Also returns data associated with the
- // handler site:
- // - TurboFan stub: Data is the stack slot count of the entire frame.
- int LookupExceptionHandlerInTable(int* data);
+ // TurboFan stub frames are supported.
+ int LookupExceptionHandlerInTable();
protected:
inline explicit StubFrame(StackFrameIteratorBase* iterator);
@@ -938,9 +941,8 @@ class WasmCompiledFrame : public StandardFrame {
void Print(StringStream* accumulator, PrintMode mode,
int index) const override;
- // Lookup exception handler for current {pc}, returns -1 if none found. Also
- // returns the stack slot count of the entire frame.
- int LookupExceptionHandlerInTable(int* data);
+ // Lookup exception handler for current {pc}, returns -1 if none found.
+ int LookupExceptionHandlerInTable();
// Determine the code for the frame.
Code unchecked_code() const override;
diff --git a/deps/v8/src/execution/isolate-inl.h b/deps/v8/src/execution/isolate-inl.h
index e1b021b921..091b185a30 100644
--- a/deps/v8/src/execution/isolate-inl.h
+++ b/deps/v8/src/execution/isolate-inl.h
@@ -113,61 +113,6 @@ Isolate::ExceptionScope::~ExceptionScope() {
NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSOR)
#undef NATIVE_CONTEXT_FIELD_ACCESSOR
-bool Isolate::IsArrayConstructorIntact() {
- Cell array_constructor_cell =
- Cell::cast(root(RootIndex::kArrayConstructorProtector));
- return array_constructor_cell.value() == Smi::FromInt(kProtectorValid);
-}
-
-bool Isolate::IsTypedArraySpeciesLookupChainIntact() {
- PropertyCell species_cell =
- PropertyCell::cast(root(RootIndex::kTypedArraySpeciesProtector));
- return species_cell.value().IsSmi() &&
- Smi::ToInt(species_cell.value()) == kProtectorValid;
-}
-
-bool Isolate::IsPromiseSpeciesLookupChainIntact() {
- PropertyCell species_cell =
- PropertyCell::cast(root(RootIndex::kPromiseSpeciesProtector));
- return species_cell.value().IsSmi() &&
- Smi::ToInt(species_cell.value()) == kProtectorValid;
-}
-
-bool Isolate::IsStringLengthOverflowIntact() {
- Cell string_length_cell = Cell::cast(root(RootIndex::kStringLengthProtector));
- return string_length_cell.value() == Smi::FromInt(kProtectorValid);
-}
-
-bool Isolate::IsArrayBufferDetachingIntact() {
- PropertyCell buffer_detaching =
- PropertyCell::cast(root(RootIndex::kArrayBufferDetachingProtector));
- return buffer_detaching.value() == Smi::FromInt(kProtectorValid);
-}
-
-bool Isolate::IsArrayIteratorLookupChainIntact() {
- PropertyCell array_iterator_cell =
- PropertyCell::cast(root(RootIndex::kArrayIteratorProtector));
- return array_iterator_cell.value() == Smi::FromInt(kProtectorValid);
-}
-
-bool Isolate::IsMapIteratorLookupChainIntact() {
- PropertyCell map_iterator_cell =
- PropertyCell::cast(root(RootIndex::kMapIteratorProtector));
- return map_iterator_cell.value() == Smi::FromInt(kProtectorValid);
-}
-
-bool Isolate::IsSetIteratorLookupChainIntact() {
- PropertyCell set_iterator_cell =
- PropertyCell::cast(root(RootIndex::kSetIteratorProtector));
- return set_iterator_cell.value() == Smi::FromInt(kProtectorValid);
-}
-
-bool Isolate::IsStringIteratorLookupChainIntact() {
- PropertyCell string_iterator_cell =
- PropertyCell::cast(root(RootIndex::kStringIteratorProtector));
- return string_iterator_cell.value() == Smi::FromInt(kProtectorValid);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/execution/isolate.cc b/deps/v8/src/execution/isolate.cc
index 3ba39562b1..2b6bb76d8a 100644
--- a/deps/v8/src/execution/isolate.cc
+++ b/deps/v8/src/execution/isolate.cc
@@ -15,7 +15,6 @@
#include "src/api/api-inl.h"
#include "src/ast/ast-value-factory.h"
#include "src/ast/scopes.h"
-#include "src/base/adapters.h"
#include "src/base/hashmap.h"
#include "src/base/platform/platform.h"
#include "src/base/sys-info.h"
@@ -36,6 +35,7 @@
#include "src/execution/isolate-inl.h"
#include "src/execution/messages.h"
#include "src/execution/microtask-queue.h"
+#include "src/execution/protectors-inl.h"
#include "src/execution/runtime-profiler.h"
#include "src/execution/simulator.h"
#include "src/execution/v8threads.h"
@@ -51,6 +51,7 @@
#include "src/logging/counters.h"
#include "src/logging/log.h"
#include "src/numbers/hash-seed-inl.h"
+#include "src/objects/backing-store.h"
#include "src/objects/elements.h"
#include "src/objects/frame-array-inl.h"
#include "src/objects/hash-table-inl.h"
@@ -320,7 +321,9 @@ Isolate::FindOrAllocatePerThreadDataForThisThread() {
base::MutexGuard lock_guard(&thread_data_table_mutex_);
per_thread = thread_data_table_.Lookup(thread_id);
if (per_thread == nullptr) {
- base::OS::AdjustSchedulingParams();
+ if (FLAG_adjust_os_scheduling_parameters) {
+ base::OS::AdjustSchedulingParams();
+ }
per_thread = new PerIsolateThreadData(this, thread_id);
thread_data_table_.Insert(per_thread);
}
@@ -1091,12 +1094,14 @@ Handle<Object> CaptureStackTrace(Isolate* isolate, Handle<Object> caller,
} else {
Handle<JSAsyncGeneratorObject> async_generator_object =
Handle<JSAsyncGeneratorObject>::cast(generator_object);
- Handle<AsyncGeneratorRequest> async_generator_request(
- AsyncGeneratorRequest::cast(async_generator_object->queue()),
- isolate);
- Handle<JSPromise> promise(
- JSPromise::cast(async_generator_request->promise()), isolate);
- CaptureAsyncStackTrace(isolate, promise, &builder);
+ Handle<Object> queue(async_generator_object->queue(), isolate);
+ if (!queue->IsUndefined(isolate)) {
+ Handle<AsyncGeneratorRequest> async_generator_request =
+ Handle<AsyncGeneratorRequest>::cast(queue);
+ Handle<JSPromise> promise(
+ JSPromise::cast(async_generator_request->promise()), isolate);
+ CaptureAsyncStackTrace(isolate, promise, &builder);
+ }
}
}
} else {
@@ -1701,22 +1706,20 @@ Object Isolate::UnwindAndFindHandler() {
// currently being executed.
wasm::WasmCodeRefScope code_ref_scope;
WasmCompiledFrame* wasm_frame = static_cast<WasmCompiledFrame*>(frame);
- int stack_slots = 0; // Will contain stack slot count of frame.
- int offset = wasm_frame->LookupExceptionHandlerInTable(&stack_slots);
+ wasm::WasmCode* wasm_code =
+ wasm_engine()->code_manager()->LookupCode(frame->pc());
+ int offset = wasm_frame->LookupExceptionHandlerInTable();
if (offset < 0) break;
// Compute the stack pointer from the frame pointer. This ensures that
// argument slots on the stack are dropped as returning would.
Address return_sp = frame->fp() +
StandardFrameConstants::kFixedFrameSizeAboveFp -
- stack_slots * kSystemPointerSize;
+ wasm_code->stack_slots() * kSystemPointerSize;
// This is going to be handled by Wasm, so we need to set the TLS flag
// again. It was cleared above assuming the frame would be unwound.
trap_handler::SetThreadInWasm();
- // Gather information from the frame.
- wasm::WasmCode* wasm_code =
- wasm_engine()->code_manager()->LookupCode(frame->pc());
return FoundHandler(Context(), wasm_code->instruction_start(), offset,
wasm_code->constant_pool(), return_sp, frame->fp());
}
@@ -1735,18 +1738,14 @@ Object Isolate::UnwindAndFindHandler() {
// For optimized frames we perform a lookup in the handler table.
if (!catchable_by_js) break;
OptimizedFrame* js_frame = static_cast<OptimizedFrame*>(frame);
- int stack_slots = 0; // Will contain stack slot count of frame.
- int offset =
- js_frame->LookupExceptionHandlerInTable(&stack_slots, nullptr);
+ Code code = frame->LookupCode();
+ int offset = js_frame->LookupExceptionHandlerInTable(nullptr, nullptr);
if (offset < 0) break;
// Compute the stack pointer from the frame pointer. This ensures
// that argument slots on the stack are dropped as returning would.
Address return_sp = frame->fp() +
StandardFrameConstants::kFixedFrameSizeAboveFp -
- stack_slots * kSystemPointerSize;
-
- // Gather information from the frame.
- Code code = frame->LookupCode();
+ code.stack_slots() * kSystemPointerSize;
// TODO(bmeurer): Turbofanned BUILTIN frames appear as OPTIMIZED,
// but do not have a code kind of OPTIMIZED_FUNCTION.
@@ -1767,31 +1766,24 @@ Object Isolate::UnwindAndFindHandler() {
// Some stubs are able to handle exceptions.
if (!catchable_by_js) break;
StubFrame* stub_frame = static_cast<StubFrame*>(frame);
+#ifdef DEBUG
wasm::WasmCodeRefScope code_ref_scope;
- wasm::WasmCode* wasm_code =
- wasm_engine()->code_manager()->LookupCode(frame->pc());
- if (wasm_code != nullptr) {
- // It is safe to skip Wasm runtime stubs as none of them contain local
- // exception handlers.
- CHECK_EQ(wasm::WasmCode::kRuntimeStub, wasm_code->kind());
- CHECK_EQ(0, wasm_code->handler_table_size());
- break;
- }
+ DCHECK_NULL(wasm_engine()->code_manager()->LookupCode(frame->pc()));
+#endif // DEBUG
Code code = stub_frame->LookupCode();
if (!code.IsCode() || code.kind() != Code::BUILTIN ||
!code.has_handler_table() || !code.is_turbofanned()) {
break;
}
- int stack_slots = 0; // Will contain stack slot count of frame.
- int offset = stub_frame->LookupExceptionHandlerInTable(&stack_slots);
+ int offset = stub_frame->LookupExceptionHandlerInTable();
if (offset < 0) break;
// Compute the stack pointer from the frame pointer. This ensures
// that argument slots on the stack are dropped as returning would.
Address return_sp = frame->fp() +
StandardFrameConstants::kFixedFrameSizeAboveFp -
- stack_slots * kSystemPointerSize;
+ code.stack_slots() * kSystemPointerSize;
return FoundHandler(Context(), code.InstructionStart(), offset,
code.constant_pool(), return_sp, frame->fp());
@@ -2063,7 +2055,7 @@ void Isolate::PrintCurrentStackTrace(FILE* out) {
for (int i = 0; i < frames->length(); ++i) {
Handle<StackTraceFrame> frame(StackTraceFrame::cast(frames->get(i)), this);
- SerializeStackTraceFrame(this, frame, builder);
+ SerializeStackTraceFrame(this, frame, &builder);
}
Handle<String> stack_trace = builder.Finish().ToHandleChecked();
@@ -2821,7 +2813,7 @@ Isolate* Isolate::New(IsolateAllocationMode mode) {
// IsolateAllocator allocates the memory for the Isolate object according to
// the given allocation mode.
std::unique_ptr<IsolateAllocator> isolate_allocator =
- base::make_unique<IsolateAllocator>(mode);
+ std::make_unique<IsolateAllocator>(mode);
// Construct Isolate object in the allocated memory.
void* isolate_ptr = isolate_allocator->isolate_memory();
Isolate* isolate = new (isolate_ptr) Isolate(std::move(isolate_allocator));
@@ -2986,7 +2978,7 @@ void Isolate::Deinit() {
optimizing_compile_dispatcher_ = nullptr;
}
- wasm_engine()->memory_tracker()->DeleteSharedMemoryObjectsOnIsolate(this);
+ BackingStore::RemoveSharedWasmMemoryObjects(this);
heap_.mark_compact_collector()->EnsureSweepingCompleted();
heap_.memory_allocator()->unmapper()->EnsureUnmappingCompleted();
@@ -3805,308 +3797,12 @@ bool Isolate::IsInAnyContext(Object object, uint32_t index) {
return false;
}
-bool Isolate::IsNoElementsProtectorIntact(Context context) {
- PropertyCell no_elements_cell = heap()->no_elements_protector();
- bool cell_reports_intact =
- no_elements_cell.value().IsSmi() &&
- Smi::ToInt(no_elements_cell.value()) == kProtectorValid;
-
-#ifdef DEBUG
- Context native_context = context.native_context();
-
- Map root_array_map =
- native_context.GetInitialJSArrayMap(GetInitialFastElementsKind());
- JSObject initial_array_proto = JSObject::cast(
- native_context.get(Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
- JSObject initial_object_proto = JSObject::cast(
- native_context.get(Context::INITIAL_OBJECT_PROTOTYPE_INDEX));
- JSObject initial_string_proto = JSObject::cast(
- native_context.get(Context::INITIAL_STRING_PROTOTYPE_INDEX));
-
- if (root_array_map.is_null() || initial_array_proto == initial_object_proto) {
- // We are in the bootstrapping process, and the entire check sequence
- // shouldn't be performed.
- return cell_reports_intact;
- }
-
- // Check that the array prototype hasn't been altered WRT empty elements.
- if (root_array_map.prototype() != initial_array_proto) {
- DCHECK_EQ(false, cell_reports_intact);
- return cell_reports_intact;
- }
-
- FixedArrayBase elements = initial_array_proto.elements();
- ReadOnlyRoots roots(heap());
- if (elements != roots.empty_fixed_array() &&
- elements != roots.empty_slow_element_dictionary()) {
- DCHECK_EQ(false, cell_reports_intact);
- return cell_reports_intact;
- }
-
- // Check that the Object.prototype hasn't been altered WRT empty elements.
- elements = initial_object_proto.elements();
- if (elements != roots.empty_fixed_array() &&
- elements != roots.empty_slow_element_dictionary()) {
- DCHECK_EQ(false, cell_reports_intact);
- return cell_reports_intact;
- }
-
- // Check that the Array.prototype has the Object.prototype as its
- // [[Prototype]] and that the Object.prototype has a null [[Prototype]].
- PrototypeIterator iter(this, initial_array_proto);
- if (iter.IsAtEnd() || iter.GetCurrent() != initial_object_proto) {
- DCHECK_EQ(false, cell_reports_intact);
- DCHECK(!has_pending_exception());
- return cell_reports_intact;
- }
- iter.Advance();
- if (!iter.IsAtEnd()) {
- DCHECK_EQ(false, cell_reports_intact);
- DCHECK(!has_pending_exception());
- return cell_reports_intact;
- }
- DCHECK(!has_pending_exception());
-
- // Check that the String.prototype hasn't been altered WRT empty elements.
- elements = initial_string_proto.elements();
- if (elements != roots.empty_fixed_array() &&
- elements != roots.empty_slow_element_dictionary()) {
- DCHECK_EQ(false, cell_reports_intact);
- return cell_reports_intact;
- }
-
- // Check that the String.prototype has the Object.prototype
- // as its [[Prototype]] still.
- if (initial_string_proto.map().prototype() != initial_object_proto) {
- DCHECK_EQ(false, cell_reports_intact);
- return cell_reports_intact;
- }
-#endif
-
- return cell_reports_intact;
-}
-
-bool Isolate::IsNoElementsProtectorIntact() {
- return Isolate::IsNoElementsProtectorIntact(context());
-}
-
-bool Isolate::IsIsConcatSpreadableLookupChainIntact() {
- Cell is_concat_spreadable_cell = heap()->is_concat_spreadable_protector();
- bool is_is_concat_spreadable_set =
- Smi::ToInt(is_concat_spreadable_cell.value()) == kProtectorInvalid;
-#ifdef DEBUG
- Map root_array_map =
- raw_native_context().GetInitialJSArrayMap(GetInitialFastElementsKind());
- if (root_array_map.is_null()) {
- // Ignore the value of is_concat_spreadable during bootstrap.
- return !is_is_concat_spreadable_set;
- }
- Handle<Object> array_prototype(array_function()->prototype(), this);
- Handle<Symbol> key = factory()->is_concat_spreadable_symbol();
- Handle<Object> value;
- LookupIterator it(this, array_prototype, key);
- if (it.IsFound() && !JSReceiver::GetDataProperty(&it)->IsUndefined(this)) {
- // TODO(cbruni): Currently we do not revert if we unset the
- // @@isConcatSpreadable property on Array.prototype or Object.prototype
- // hence the reverse implication doesn't hold.
- DCHECK(is_is_concat_spreadable_set);
- return false;
- }
-#endif // DEBUG
-
- return !is_is_concat_spreadable_set;
-}
-
-bool Isolate::IsIsConcatSpreadableLookupChainIntact(JSReceiver receiver) {
- if (!IsIsConcatSpreadableLookupChainIntact()) return false;
- return !receiver.HasProxyInPrototype(this);
-}
-
-bool Isolate::IsPromiseHookProtectorIntact() {
- PropertyCell promise_hook_cell = heap()->promise_hook_protector();
- bool is_promise_hook_protector_intact =
- Smi::ToInt(promise_hook_cell.value()) == kProtectorValid;
- DCHECK_IMPLIES(is_promise_hook_protector_intact,
- !promise_hook_or_async_event_delegate_);
- DCHECK_IMPLIES(is_promise_hook_protector_intact,
- !promise_hook_or_debug_is_active_or_async_event_delegate_);
- return is_promise_hook_protector_intact;
-}
-
-bool Isolate::IsPromiseResolveLookupChainIntact() {
- Cell promise_resolve_cell = heap()->promise_resolve_protector();
- bool is_promise_resolve_protector_intact =
- Smi::ToInt(promise_resolve_cell.value()) == kProtectorValid;
- return is_promise_resolve_protector_intact;
-}
-
-bool Isolate::IsPromiseThenLookupChainIntact() {
- PropertyCell promise_then_cell = heap()->promise_then_protector();
- bool is_promise_then_protector_intact =
- Smi::ToInt(promise_then_cell.value()) == kProtectorValid;
- return is_promise_then_protector_intact;
-}
-
-bool Isolate::IsPromiseThenLookupChainIntact(Handle<JSReceiver> receiver) {
- DisallowHeapAllocation no_gc;
- if (!receiver->IsJSPromise()) return false;
- if (!IsInAnyContext(receiver->map().prototype(),
- Context::PROMISE_PROTOTYPE_INDEX)) {
- return false;
- }
- return IsPromiseThenLookupChainIntact();
-}
-
void Isolate::UpdateNoElementsProtectorOnSetElement(Handle<JSObject> object) {
DisallowHeapAllocation no_gc;
if (!object->map().is_prototype_map()) return;
- if (!IsNoElementsProtectorIntact()) return;
+ if (!Protectors::IsNoElementsIntact(this)) return;
if (!IsArrayOrObjectOrStringPrototype(*object)) return;
- PropertyCell::SetValueWithInvalidation(
- this, "no_elements_protector", factory()->no_elements_protector(),
- handle(Smi::FromInt(kProtectorInvalid), this));
-}
-
-void Isolate::TraceProtectorInvalidation(const char* protector_name) {
- static constexpr char kInvalidateProtectorTracingCategory[] =
- "V8.InvalidateProtector";
- static constexpr char kInvalidateProtectorTracingArg[] = "protector-name";
-
- DCHECK(FLAG_trace_protector_invalidation);
-
- // TODO(jgruber): Remove the PrintF once tracing can output to stdout.
- i::PrintF("Invalidating protector cell %s in isolate %p\n", protector_name,
- this);
- TRACE_EVENT_INSTANT1("v8", kInvalidateProtectorTracingCategory,
- TRACE_EVENT_SCOPE_THREAD, kInvalidateProtectorTracingArg,
- protector_name);
-}
-
-void Isolate::InvalidateIsConcatSpreadableProtector() {
- DCHECK(factory()->is_concat_spreadable_protector()->value().IsSmi());
- DCHECK(IsIsConcatSpreadableLookupChainIntact());
- if (FLAG_trace_protector_invalidation) {
- TraceProtectorInvalidation("is_concat_spreadable_protector");
- }
- factory()->is_concat_spreadable_protector()->set_value(
- Smi::FromInt(kProtectorInvalid));
- DCHECK(!IsIsConcatSpreadableLookupChainIntact());
-}
-
-void Isolate::InvalidateArrayConstructorProtector() {
- DCHECK(factory()->array_constructor_protector()->value().IsSmi());
- DCHECK(IsArrayConstructorIntact());
- if (FLAG_trace_protector_invalidation) {
- TraceProtectorInvalidation("array_constructor_protector");
- }
- factory()->array_constructor_protector()->set_value(
- Smi::FromInt(kProtectorInvalid));
- DCHECK(!IsArrayConstructorIntact());
-}
-
-void Isolate::InvalidateTypedArraySpeciesProtector() {
- DCHECK(factory()->typed_array_species_protector()->value().IsSmi());
- DCHECK(IsTypedArraySpeciesLookupChainIntact());
- PropertyCell::SetValueWithInvalidation(
- this, "typed_array_species_protector",
- factory()->typed_array_species_protector(),
- handle(Smi::FromInt(kProtectorInvalid), this));
- DCHECK(!IsTypedArraySpeciesLookupChainIntact());
-}
-
-void Isolate::InvalidatePromiseSpeciesProtector() {
- DCHECK(factory()->promise_species_protector()->value().IsSmi());
- DCHECK(IsPromiseSpeciesLookupChainIntact());
- PropertyCell::SetValueWithInvalidation(
- this, "promise_species_protector", factory()->promise_species_protector(),
- handle(Smi::FromInt(kProtectorInvalid), this));
- DCHECK(!IsPromiseSpeciesLookupChainIntact());
-}
-
-void Isolate::InvalidateStringLengthOverflowProtector() {
- DCHECK(factory()->string_length_protector()->value().IsSmi());
- DCHECK(IsStringLengthOverflowIntact());
- if (FLAG_trace_protector_invalidation) {
- TraceProtectorInvalidation("string_length_protector");
- }
- factory()->string_length_protector()->set_value(
- Smi::FromInt(kProtectorInvalid));
- DCHECK(!IsStringLengthOverflowIntact());
-}
-
-void Isolate::InvalidateArrayIteratorProtector() {
- DCHECK(factory()->array_iterator_protector()->value().IsSmi());
- DCHECK(IsArrayIteratorLookupChainIntact());
- PropertyCell::SetValueWithInvalidation(
- this, "array_iterator_protector", factory()->array_iterator_protector(),
- handle(Smi::FromInt(kProtectorInvalid), this));
- DCHECK(!IsArrayIteratorLookupChainIntact());
-}
-
-void Isolate::InvalidateMapIteratorProtector() {
- DCHECK(factory()->map_iterator_protector()->value().IsSmi());
- DCHECK(IsMapIteratorLookupChainIntact());
- PropertyCell::SetValueWithInvalidation(
- this, "map_iterator_protector", factory()->map_iterator_protector(),
- handle(Smi::FromInt(kProtectorInvalid), this));
- DCHECK(!IsMapIteratorLookupChainIntact());
-}
-
-void Isolate::InvalidateSetIteratorProtector() {
- DCHECK(factory()->set_iterator_protector()->value().IsSmi());
- DCHECK(IsSetIteratorLookupChainIntact());
- PropertyCell::SetValueWithInvalidation(
- this, "set_iterator_protector", factory()->set_iterator_protector(),
- handle(Smi::FromInt(kProtectorInvalid), this));
- DCHECK(!IsSetIteratorLookupChainIntact());
-}
-
-void Isolate::InvalidateStringIteratorProtector() {
- DCHECK(factory()->string_iterator_protector()->value().IsSmi());
- DCHECK(IsStringIteratorLookupChainIntact());
- PropertyCell::SetValueWithInvalidation(
- this, "string_iterator_protector", factory()->string_iterator_protector(),
- handle(Smi::FromInt(kProtectorInvalid), this));
- DCHECK(!IsStringIteratorLookupChainIntact());
-}
-
-void Isolate::InvalidateArrayBufferDetachingProtector() {
- DCHECK(factory()->array_buffer_detaching_protector()->value().IsSmi());
- DCHECK(IsArrayBufferDetachingIntact());
- PropertyCell::SetValueWithInvalidation(
- this, "array_buffer_detaching_protector",
- factory()->array_buffer_detaching_protector(),
- handle(Smi::FromInt(kProtectorInvalid), this));
- DCHECK(!IsArrayBufferDetachingIntact());
-}
-
-void Isolate::InvalidatePromiseHookProtector() {
- DCHECK(factory()->promise_hook_protector()->value().IsSmi());
- DCHECK(IsPromiseHookProtectorIntact());
- PropertyCell::SetValueWithInvalidation(
- this, "promise_hook_protector", factory()->promise_hook_protector(),
- handle(Smi::FromInt(kProtectorInvalid), this));
- DCHECK(!IsPromiseHookProtectorIntact());
-}
-
-void Isolate::InvalidatePromiseResolveProtector() {
- DCHECK(factory()->promise_resolve_protector()->value().IsSmi());
- DCHECK(IsPromiseResolveLookupChainIntact());
- if (FLAG_trace_protector_invalidation) {
- TraceProtectorInvalidation("promise_resolve_protector");
- }
- factory()->promise_resolve_protector()->set_value(
- Smi::FromInt(kProtectorInvalid));
- DCHECK(!IsPromiseResolveLookupChainIntact());
-}
-
-void Isolate::InvalidatePromiseThenProtector() {
- DCHECK(factory()->promise_then_protector()->value().IsSmi());
- DCHECK(IsPromiseThenLookupChainIntact());
- PropertyCell::SetValueWithInvalidation(
- this, "promise_then_protector", factory()->promise_then_protector(),
- handle(Smi::FromInt(kProtectorInvalid), this));
- DCHECK(!IsPromiseThenLookupChainIntact());
+ Protectors::InvalidateNoElements(this);
}
bool Isolate::IsAnyInitialArrayPrototype(Handle<JSArray> array) {
@@ -4256,9 +3952,9 @@ void Isolate::PromiseHookStateUpdated() {
bool promise_hook_or_debug_is_active_or_async_event_delegate =
promise_hook_or_async_event_delegate || debug()->is_active();
if (promise_hook_or_debug_is_active_or_async_event_delegate &&
- IsPromiseHookProtectorIntact()) {
+ Protectors::IsPromiseHookIntact(this)) {
HandleScope scope(this);
- InvalidatePromiseHookProtector();
+ Protectors::InvalidatePromiseHook(this);
}
promise_hook_or_async_event_delegate_ = promise_hook_or_async_event_delegate;
promise_hook_or_debug_is_active_or_async_event_delegate_ =
@@ -4584,6 +4280,15 @@ void Isolate::AddDetachedContext(Handle<Context> context) {
heap()->set_detached_contexts(*detached_contexts);
}
+void Isolate::AddSharedWasmMemory(Handle<WasmMemoryObject> memory_object) {
+ HandleScope scope(this);
+ Handle<WeakArrayList> shared_wasm_memories =
+ factory()->shared_wasm_memories();
+ shared_wasm_memories = WeakArrayList::AddToEnd(
+ this, shared_wasm_memories, MaybeObjectHandle::Weak(memory_object));
+ heap()->set_shared_wasm_memories(*shared_wasm_memories);
+}
+
void Isolate::CheckDetachedContextsAfterGC() {
HandleScope scope(this);
Handle<WeakArrayList> detached_contexts = factory()->detached_contexts();
diff --git a/deps/v8/src/execution/isolate.h b/deps/v8/src/execution/isolate.h
index 4eadb42438..20aea6066c 100644
--- a/deps/v8/src/execution/isolate.h
+++ b/deps/v8/src/execution/isolate.h
@@ -1163,87 +1163,8 @@ class Isolate final : private HiddenFactory {
#endif // V8_INTL_SUPPORT
- static const int kProtectorValid = 1;
- static const int kProtectorInvalid = 0;
-
- inline bool IsArrayConstructorIntact();
-
- // The version with an explicit context parameter can be used when
- // Isolate::context is not set up, e.g. when calling directly into C++ from
- // CSA.
- bool IsNoElementsProtectorIntact(Context context);
- V8_EXPORT_PRIVATE bool IsNoElementsProtectorIntact();
-
bool IsArrayOrObjectOrStringPrototype(Object object);
- inline bool IsTypedArraySpeciesLookupChainIntact();
-
- // Check that the @@species protector is intact, which guards the lookup of
- // "constructor" on JSPromise instances, whose [[Prototype]] is the initial
- // %PromisePrototype%, and the Symbol.species lookup on the
- // %PromisePrototype%.
- inline bool IsPromiseSpeciesLookupChainIntact();
-
- bool IsIsConcatSpreadableLookupChainIntact();
- bool IsIsConcatSpreadableLookupChainIntact(JSReceiver receiver);
- inline bool IsStringLengthOverflowIntact();
- inline bool IsArrayIteratorLookupChainIntact();
-
- // The MapIterator protector protects the original iteration behaviors of
- // Map.prototype.keys(), Map.prototype.values(), and Set.prototype.entries().
- // It does not protect the original iteration behavior of
- // Map.prototype[Symbol.iterator](). The protector is invalidated when:
- // * The 'next' property is set on an object where the property holder is the
- // %MapIteratorPrototype% (e.g. because the object is that very prototype).
- // * The 'Symbol.iterator' property is set on an object where the property
- // holder is the %IteratorPrototype%. Note that this also invalidates the
- // SetIterator protector (see below).
- inline bool IsMapIteratorLookupChainIntact();
-
- // The SetIterator protector protects the original iteration behavior of
- // Set.prototype.keys(), Set.prototype.values(), Set.prototype.entries(),
- // and Set.prototype[Symbol.iterator](). The protector is invalidated when:
- // * The 'next' property is set on an object where the property holder is the
- // %SetIteratorPrototype% (e.g. because the object is that very prototype).
- // * The 'Symbol.iterator' property is set on an object where the property
- // holder is the %SetPrototype% OR %IteratorPrototype%. This means that
- // setting Symbol.iterator on a MapIterator object can also invalidate the
- // SetIterator protector, and vice versa, setting Symbol.iterator on a
- // SetIterator object can also invalidate the MapIterator. This is an over-
- // approximation for the sake of simplicity.
- inline bool IsSetIteratorLookupChainIntact();
-
- // The StringIteratorProtector protects the original string iteration behavior
- // for primitive strings. As long as the StringIteratorProtector is valid,
- // iterating over a primitive string is guaranteed to be unobservable from
- // user code and can thus be cut short. More specifically, the protector gets
- // invalidated as soon as either String.prototype[Symbol.iterator] or
- // String.prototype[Symbol.iterator]().next is modified. This guarantee does
- // not apply to string objects (as opposed to primitives), since they could
- // define their own Symbol.iterator.
- // String.prototype itself does not need to be protected, since it is
- // non-configurable and non-writable.
- inline bool IsStringIteratorLookupChainIntact();
-
- // Make sure we do check for detached array buffers.
- inline bool IsArrayBufferDetachingIntact();
-
- // Disable promise optimizations if promise (debug) hooks have ever been
- // active, because those can observe promises.
- bool IsPromiseHookProtectorIntact();
-
- // Make sure a lookup of "resolve" on the %Promise% intrinsic object
- // yeidls the initial Promise.resolve method.
- bool IsPromiseResolveLookupChainIntact();
-
- // Make sure a lookup of "then" on any JSPromise whose [[Prototype]] is the
- // initial %PromisePrototype% yields the initial method. In addition this
- // protector also guards the negative lookup of "then" on the intrinsic
- // %ObjectPrototype%, meaning that such lookups are guaranteed to yield
- // undefined without triggering any side-effects.
- bool IsPromiseThenLookupChainIntact();
- bool IsPromiseThenLookupChainIntact(Handle<JSReceiver> receiver);
-
// On intent to set an element in object, make sure that appropriate
// notifications occur if the set is on the elements of the array or
// object prototype. Also ensure that changes to prototype chain between
@@ -1259,24 +1180,6 @@ class Isolate final : private HiddenFactory {
UpdateNoElementsProtectorOnSetElement(object);
}
- // The `protector_name` C string must be statically allocated.
- void TraceProtectorInvalidation(const char* protector_name);
-
- void InvalidateArrayConstructorProtector();
- void InvalidateTypedArraySpeciesProtector();
- void InvalidateRegExpSpeciesProtector(Handle<NativeContext> native_context);
- void InvalidatePromiseSpeciesProtector();
- void InvalidateIsConcatSpreadableProtector();
- void InvalidateStringLengthOverflowProtector();
- void InvalidateArrayIteratorProtector();
- void InvalidateMapIteratorProtector();
- void InvalidateSetIteratorProtector();
- void InvalidateStringIteratorProtector();
- void InvalidateArrayBufferDetachingProtector();
- V8_EXPORT_PRIVATE void InvalidatePromiseHookProtector();
- void InvalidatePromiseResolveProtector();
- void InvalidatePromiseThenProtector();
-
// Returns true if array is the initial array prototype in any native context.
bool IsAnyInitialArrayPrototype(Handle<JSArray> array);
@@ -1406,6 +1309,8 @@ class Isolate final : private HiddenFactory {
void AddDetachedContext(Handle<Context> context);
void CheckDetachedContextsAfterGC();
+ void AddSharedWasmMemory(Handle<WasmMemoryObject> memory_object);
+
std::vector<Object>* partial_snapshot_cache() {
return &partial_snapshot_cache_;
}
@@ -1513,6 +1418,11 @@ class Isolate final : private HiddenFactory {
bool HasPrepareStackTraceCallback() const;
void SetAddCrashKeyCallback(AddCrashKeyCallback callback);
+ void AddCrashKey(CrashKeyId id, const std::string& value) {
+ if (add_crash_key_callback_) {
+ add_crash_key_callback_(id, value);
+ }
+ }
void SetRAILMode(RAILMode rail_mode);
diff --git a/deps/v8/src/execution/messages.cc b/deps/v8/src/execution/messages.cc
index 63d1e2be1f..96fb94cd4e 100644
--- a/deps/v8/src/execution/messages.cc
+++ b/deps/v8/src/execution/messages.cc
@@ -603,7 +603,7 @@ int WasmStackFrame::GetColumnNumber() { return GetModuleOffset(); }
int WasmStackFrame::GetModuleOffset() const {
const int function_offset =
- wasm_instance_->module_object().GetFunctionOffset(wasm_func_index_);
+ GetWasmFunctionOffset(wasm_instance_->module(), wasm_func_index_);
return function_offset + GetPosition();
}
@@ -631,7 +631,7 @@ Handle<Object> AsmJsWasmStackFrame::GetReceiver() const {
}
Handle<Object> AsmJsWasmStackFrame::GetFunction() const {
- // TODO(clemensh): Return lazily created JSFunction.
+ // TODO(clemensb): Return lazily created JSFunction.
return Null();
}
@@ -894,7 +894,7 @@ MaybeHandle<Object> ErrorUtils::FormatStackTrace(Isolate* isolate,
Handle<StackTraceFrame> frame(StackTraceFrame::cast(elems->get(i)),
isolate);
- SerializeStackTraceFrame(isolate, frame, builder);
+ SerializeStackTraceFrame(isolate, frame, &builder);
if (isolate->has_pending_exception()) {
// CallSite.toString threw. Parts of the current frame might have been
diff --git a/deps/v8/src/execution/protectors.cc b/deps/v8/src/execution/protectors.cc
index 3ac07eede3..b5b4c47a1b 100644
--- a/deps/v8/src/execution/protectors.cc
+++ b/deps/v8/src/execution/protectors.cc
@@ -16,12 +16,32 @@
namespace v8 {
namespace internal {
+namespace {
+void TraceProtectorInvalidation(const char* protector_name) {
+ DCHECK(FLAG_trace_protector_invalidation);
+ static constexpr char kInvalidateProtectorTracingCategory[] =
+ "V8.InvalidateProtector";
+ static constexpr char kInvalidateProtectorTracingArg[] = "protector-name";
+
+ DCHECK(FLAG_trace_protector_invalidation);
+
+ // TODO(jgruber): Remove the PrintF once tracing can output to stdout.
+ i::PrintF("Invalidating protector cell %s", protector_name);
+ TRACE_EVENT_INSTANT1("v8", kInvalidateProtectorTracingCategory,
+ TRACE_EVENT_SCOPE_THREAD, kInvalidateProtectorTracingArg,
+ protector_name);
+}
+} // namespace
+
#define INVALIDATE_PROTECTOR_ON_NATIVE_CONTEXT_DEFINITION(name, cell) \
void Protectors::Invalidate##name(Isolate* isolate, \
Handle<NativeContext> native_context) { \
DCHECK_EQ(*native_context, isolate->raw_native_context()); \
DCHECK(native_context->cell().value().IsSmi()); \
DCHECK(Is##name##Intact(native_context)); \
+ if (FLAG_trace_protector_invalidation) { \
+ TraceProtectorInvalidation(#name); \
+ } \
Handle<PropertyCell> species_cell(native_context->cell(), isolate); \
PropertyCell::SetValueWithInvalidation( \
isolate, #cell, species_cell, \
@@ -36,6 +56,9 @@ DECLARED_PROTECTORS_ON_NATIVE_CONTEXT(
void Protectors::Invalidate##name(Isolate* isolate) { \
DCHECK(isolate->factory()->cell()->value().IsSmi()); \
DCHECK(Is##name##Intact(isolate)); \
+ if (FLAG_trace_protector_invalidation) { \
+ TraceProtectorInvalidation(#name); \
+ } \
PropertyCell::SetValueWithInvalidation( \
isolate, #cell, isolate->factory()->cell(), \
handle(Smi::FromInt(kProtectorInvalid), isolate)); \
diff --git a/deps/v8/src/execution/protectors.h b/deps/v8/src/execution/protectors.h
index 5c54613bb1..4601f16cf0 100644
--- a/deps/v8/src/execution/protectors.h
+++ b/deps/v8/src/execution/protectors.h
@@ -18,19 +18,82 @@ class Protectors : public AllStatic {
#define DECLARED_PROTECTORS_ON_NATIVE_CONTEXT(V) \
V(RegExpSpeciesLookupChainProtector, regexp_species_protector)
-#define DECLARED_PROTECTORS_ON_ISOLATE(V) \
- V(ArraySpeciesLookupChain, ArraySpeciesProtector, array_species_protector)
+#define DECLARED_PROTECTORS_ON_ISOLATE(V) \
+ V(ArrayBufferDetaching, ArrayBufferDetachingProtector, \
+ array_buffer_detaching_protector) \
+ V(ArrayConstructor, ArrayConstructorProtector, array_constructor_protector) \
+ V(ArrayIteratorLookupChain, ArrayIteratorProtector, \
+ array_iterator_protector) \
+ V(ArraySpeciesLookupChain, ArraySpeciesProtector, array_species_protector) \
+ V(IsConcatSpreadableLookupChain, IsConcatSpreadableProtector, \
+ is_concat_spreadable_protector) \
+ V(NoElements, NoElementsProtector, no_elements_protector) \
+ \
+ /* The MapIterator protector protects the original iteration behaviors */ \
+ /* of Map.prototype.keys(), Map.prototype.values(), and */ \
+ /* Set.prototype.entries(). It does not protect the original iteration */ \
+ /* behavior of Map.prototype[Symbol.iterator](). */ \
+ /* The protector is invalidated when: */ \
+ /* * The 'next' property is set on an object where the property holder */ \
+ /* is the %MapIteratorPrototype% (e.g. because the object is that very */ \
+ /* prototype). */ \
+ /* * The 'Symbol.iterator' property is set on an object where the */ \
+ /* property holder is the %IteratorPrototype%. Note that this also */ \
+ /* invalidates the SetIterator protector (see below). */ \
+ V(MapIteratorLookupChain, MapIteratorProtector, map_iterator_protector) \
+ V(PromiseHook, PromiseHookProtector, promise_hook_protector) \
+ V(PromiseThenLookupChain, PromiseThenProtector, promise_then_protector) \
+ V(PromiseResolveLookupChain, PromiseResolveProtector, \
+ promise_resolve_protector) \
+ V(PromiseSpeciesLookupChain, PromiseSpeciesProtector, \
+ promise_species_protector) \
+ \
+ /* The SetIterator protector protects the original iteration behavior of */ \
+ /* Set.prototype.keys(), Set.prototype.values(), */ \
+ /* Set.prototype.entries(), and Set.prototype[Symbol.iterator](). The */ \
+ /* protector is invalidated when: */ \
+ /* * The 'next' property is set on an object where the property holder */ \
+ /* is the %SetIteratorPrototype% (e.g. because the object is that very */ \
+ /* prototype). */ \
+ /* * The 'Symbol.iterator' property is set on an object where the */ \
+ /* property holder is the %SetPrototype% OR %IteratorPrototype%. This */ \
+ /* means that setting Symbol.iterator on a MapIterator object can also */ \
+ /* invalidate the SetIterator protector, and vice versa, setting */ \
+ /* Symbol.iterator on a SetIterator object can also invalidate the */ \
+ /* MapIterator. This is an over-approximation for the sake of */ \
+ /* simplicity. */ \
+ V(SetIteratorLookupChain, SetIteratorProtector, set_iterator_protector) \
+ \
+ /* The StringIteratorProtector protects the original string iteration */ \
+ /* behavior for primitive strings. As long as the */ \
+ /* StringIteratorProtector is valid, iterating over a primitive string */ \
+ /* is guaranteed to be unobservable from user code and can thus be cut */ \
+ /* short. More specifically, the protector gets invalidated as soon as */ \
+ /* either String.prototype[Symbol.iterator] or */ \
+ /* String.prototype[Symbol.iterator]().next is modified. This guarantee */ \
+ /* does not apply to string objects (as opposed to primitives), since */ \
+ /* they could define their own Symbol.iterator. */ \
+ /* String.prototype itself does not need to be protected, since it is */ \
+ /* non-configurable and non-writable. */ \
+ V(StringIteratorLookupChain, StringIteratorProtector, \
+ string_iterator_protector) \
+ V(StringLengthOverflowLookupChain, StringLengthProtector, \
+ string_length_protector) \
+ V(TypedArraySpeciesLookupChain, TypedArraySpeciesProtector, \
+ typed_array_species_protector)
+
+#define DECLARE_PROTECTOR_ON_NATIVE_CONTEXT(name, unused_cell) \
+ V8_EXPORT_PRIVATE static inline bool Is##name##Intact( \
+ Handle<NativeContext> native_context); \
+ V8_EXPORT_PRIVATE static void Invalidate##name( \
+ Isolate* isolate, Handle<NativeContext> native_context);
-#define DECLARE_PROTECTOR_ON_NATIVE_CONTEXT(name, unused_cell) \
- static inline bool Is##name##Intact(Handle<NativeContext> native_context); \
- static void Invalidate##name(Isolate* isolate, \
- Handle<NativeContext> native_context);
DECLARED_PROTECTORS_ON_NATIVE_CONTEXT(DECLARE_PROTECTOR_ON_NATIVE_CONTEXT)
#undef DECLARE_PROTECTOR_ON_NATIVE_CONTEXT
#define DECLARE_PROTECTOR_ON_ISOLATE(name, unused_root_index, unused_cell) \
- static inline bool Is##name##Intact(Isolate* isolate); \
- static void Invalidate##name(Isolate* isolate);
+ V8_EXPORT_PRIVATE static inline bool Is##name##Intact(Isolate* isolate); \
+ V8_EXPORT_PRIVATE static void Invalidate##name(Isolate* isolate);
DECLARED_PROTECTORS_ON_ISOLATE(DECLARE_PROTECTOR_ON_ISOLATE)
#undef DECLARE_PROTECTOR_ON_ISOLATE
diff --git a/deps/v8/src/execution/s390/frame-constants-s390.h b/deps/v8/src/execution/s390/frame-constants-s390.h
index a48a78fd42..34ae136aad 100644
--- a/deps/v8/src/execution/s390/frame-constants-s390.h
+++ b/deps/v8/src/execution/s390/frame-constants-s390.h
@@ -14,7 +14,7 @@ namespace internal {
class EntryFrameConstants : public AllStatic {
public:
static constexpr int kCallerFPOffset =
- -(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
+ -(StandardFrameConstants::kFixedFrameSizeFromFp + kSystemPointerSize);
// Stack offsets for arguments passed to JSEntry.
static constexpr int kArgvOffset = 20 * kSystemPointerSize;
};
@@ -25,13 +25,13 @@ class ExitFrameConstants : public TypedFrameConstants {
DEFINE_TYPED_FRAME_SIZES(1);
// The caller fields are below the frame pointer on the stack.
- static constexpr int kCallerFPOffset = 0 * kPointerSize;
+ static constexpr int kCallerFPOffset = 0 * kSystemPointerSize;
// The calling JS function is below FP.
- static constexpr int kCallerPCOffset = 1 * kPointerSize;
+ static constexpr int kCallerPCOffset = 1 * kSystemPointerSize;
// FP-relative displacement of the caller's SP. It points just
// below the saved PC.
- static constexpr int kCallerSPDisplacement = 2 * kPointerSize;
+ static constexpr int kCallerSPDisplacement = 2 * kSystemPointerSize;
};
class WasmCompileLazyFrameConstants : public TypedFrameConstants {
@@ -47,7 +47,7 @@ class WasmCompileLazyFrameConstants : public TypedFrameConstants {
static constexpr int kWasmInstanceOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
static constexpr int kFixedFrameSizeFromFp =
TypedFrameConstants::kFixedFrameSizeFromFp +
- kNumberOfSavedGpParamRegs * kPointerSize +
+ kNumberOfSavedGpParamRegs * kSystemPointerSize +
kNumberOfSavedFpParamRegs * kDoubleSize;
};
@@ -56,13 +56,13 @@ class JavaScriptFrameConstants : public AllStatic {
// FP-relative.
static constexpr int kLocal0Offset =
StandardFrameConstants::kExpressionsOffset;
- static constexpr int kLastParameterOffset = +2 * kPointerSize;
+ static constexpr int kLastParameterOffset = +2 * kSystemPointerSize;
static constexpr int kFunctionOffset =
StandardFrameConstants::kFunctionOffset;
// Caller SP-relative.
- static constexpr int kParam0Offset = -2 * kPointerSize;
- static constexpr int kReceiverOffset = -1 * kPointerSize;
+ static constexpr int kParam0Offset = -2 * kSystemPointerSize;
+ static constexpr int kReceiverOffset = -1 * kSystemPointerSize;
};
} // namespace internal
diff --git a/deps/v8/src/execution/simulator.h b/deps/v8/src/execution/simulator.h
index 9f98f2039b..9c5cae7e97 100644
--- a/deps/v8/src/execution/simulator.h
+++ b/deps/v8/src/execution/simulator.h
@@ -115,15 +115,26 @@ class GeneratedCode {
#ifdef USE_SIMULATOR
// Defined in simulator-base.h.
Return Call(Args... args) {
+#if defined(V8_TARGET_OS_WIN) && !defined(V8_OS_WIN)
+ FATAL("Generated code execution not possible during cross-compilation.");
+#endif // defined(V8_TARGET_OS_WIN) && !defined(V8_OS_WIN)
return Simulator::current(isolate_)->template Call<Return>(
reinterpret_cast<Address>(fn_ptr_), args...);
}
- DISABLE_CFI_ICALL Return CallIrregexp(Args... args) { return Call(args...); }
+ DISABLE_CFI_ICALL Return CallIrregexp(Args... args) {
+#if defined(V8_TARGET_OS_WIN) && !defined(V8_OS_WIN)
+ FATAL("Generated code execution not possible during cross-compilation.");
+#endif // defined(V8_TARGET_OS_WIN) && !defined(V8_OS_WIN)
+ return Call(args...);
+ }
#else
DISABLE_CFI_ICALL Return Call(Args... args) {
// When running without a simulator we call the entry directly.
+#if defined(V8_TARGET_OS_WIN) && !defined(V8_OS_WIN)
+ FATAL("Generated code execution not possible during cross-compilation.");
+#endif // defined(V8_TARGET_OS_WIN) && !defined(V8_OS_WIN)
#if V8_OS_AIX
// AIX ABI requires function descriptors (FD). Artificially create a pseudo
// FD to ensure correct dispatch to generated code. The 'volatile'
@@ -141,6 +152,9 @@ class GeneratedCode {
DISABLE_CFI_ICALL Return CallIrregexp(Args... args) {
// When running without a simulator we call the entry directly.
+#if defined(V8_TARGET_OS_WIN) && !defined(V8_OS_WIN)
+ FATAL("Generated code execution not possible during cross-compilation.");
+#endif // defined(V8_TARGET_OS_WIN) && !defined(V8_OS_WIN)
return fn_ptr_(args...);
}
#endif // USE_SIMULATOR
diff --git a/deps/v8/src/execution/stack-guard.cc b/deps/v8/src/execution/stack-guard.cc
index 1cf4c4605a..d37327f1c3 100644
--- a/deps/v8/src/execution/stack-guard.cc
+++ b/deps/v8/src/execution/stack-guard.cc
@@ -10,6 +10,7 @@
#include "src/execution/runtime-profiler.h"
#include "src/execution/simulator.h"
#include "src/logging/counters.h"
+#include "src/objects/backing-store.h"
#include "src/roots/roots-inl.h"
#include "src/utils/memcopy.h"
#include "src/wasm/wasm-engine.h"
@@ -86,6 +87,8 @@ void StackGuard::PushInterruptsScope(InterruptsScope* scope) {
current->intercepted_flags_ &= ~scope->intercept_mask_;
}
thread_local_.interrupt_flags_ |= restored_flags;
+
+ if (has_pending_interrupts(access)) set_interrupt_limits(access);
}
if (!has_pending_interrupts(access)) reset_limits(access);
// Add scope to the chain.
@@ -271,8 +274,7 @@ Object StackGuard::HandleInterrupts() {
if (TestAndClear(&interrupt_flags, GROW_SHARED_MEMORY)) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
"V8.WasmGrowSharedMemory");
- isolate_->wasm_engine()->memory_tracker()->UpdateSharedMemoryInstances(
- isolate_);
+ BackingStore::UpdateSharedWasmMemoryObjects(isolate_);
}
if (TestAndClear(&interrupt_flags, DEOPT_MARKED_ALLOCATION_SITES)) {
@@ -305,8 +307,6 @@ Object StackGuard::HandleInterrupts() {
}
isolate_->counters()->stack_interrupts()->Increment();
- isolate_->counters()->runtime_profiler_ticks()->Increment();
- isolate_->runtime_profiler()->MarkCandidatesForOptimization();
return ReadOnlyRoots(isolate_).undefined_value();
}
diff --git a/deps/v8/src/execution/x64/frame-constants-x64.h b/deps/v8/src/execution/x64/frame-constants-x64.h
index 5af35b1b3b..49d69829f0 100644
--- a/deps/v8/src/execution/x64/frame-constants-x64.h
+++ b/deps/v8/src/execution/x64/frame-constants-x64.h
@@ -13,7 +13,7 @@ namespace internal {
class EntryFrameConstants : public AllStatic {
public:
-#ifdef _WIN64
+#ifdef V8_TARGET_OS_WIN
static constexpr int kCalleeSaveXMMRegisters = 10;
static constexpr int kXMMRegisterSize = 16;
static constexpr int kXMMRegistersBlockSize =
diff --git a/deps/v8/src/extensions/free-buffer-extension.cc b/deps/v8/src/extensions/free-buffer-extension.cc
index 975e9543c8..9fdfe920eb 100644
--- a/deps/v8/src/extensions/free-buffer-extension.cc
+++ b/deps/v8/src/extensions/free-buffer-extension.cc
@@ -21,9 +21,8 @@ void FreeBufferExtension::FreeBuffer(
const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Local<v8::ArrayBuffer> arrayBuffer = args[0].As<v8::ArrayBuffer>();
v8::ArrayBuffer::Contents contents = arrayBuffer->Externalize();
- Isolate* isolate = reinterpret_cast<Isolate*>(args.GetIsolate());
- isolate->array_buffer_allocator()->Free(contents.Data(),
- contents.ByteLength());
+ contents.Deleter()(contents.Data(), contents.ByteLength(),
+ contents.DeleterData());
}
} // namespace internal
diff --git a/deps/v8/src/extensions/gc-extension.cc b/deps/v8/src/extensions/gc-extension.cc
index 4f446627fd..fddd40b352 100644
--- a/deps/v8/src/extensions/gc-extension.cc
+++ b/deps/v8/src/extensions/gc-extension.cc
@@ -4,23 +4,160 @@
#include "src/extensions/gc-extension.h"
+#include "include/v8.h"
#include "src/base/platform/platform.h"
+#include "src/execution/isolate.h"
+#include "src/heap/heap.h"
+#include "src/tasks/cancelable-task.h"
namespace v8 {
namespace internal {
+namespace {
+
+enum class ExecutionType { kAsync, kSync };
+
+struct GCOptions {
+ v8::Isolate::GarbageCollectionType type;
+ ExecutionType execution;
+};
+
+Maybe<bool> IsProperty(v8::Isolate* isolate, v8::Local<v8::Context> ctx,
+ v8::Local<v8::Object> object, const char* key,
+ const char* value) {
+ auto k = v8::String::NewFromUtf8(isolate, key).ToLocalChecked();
+ // Get will return undefined for non-existing keys which will make
+ // StrictEquals fail.
+ auto maybe_property = object->Get(ctx, k);
+ if (maybe_property.IsEmpty()) return Nothing<bool>();
+ return Just<bool>(maybe_property.ToLocalChecked()->StrictEquals(
+ v8::String::NewFromUtf8(isolate, value).ToLocalChecked()));
+}
+
+Maybe<GCOptions> Parse(v8::Isolate* isolate,
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ // Default values.
+ auto options =
+ GCOptions{v8::Isolate::GarbageCollectionType::kFullGarbageCollection,
+ ExecutionType::kSync};
+ bool found_options_object = false;
+
+ if (args.Length() > 0 && args[0]->IsObject()) {
+ v8::HandleScope scope(isolate);
+ auto ctx = isolate->GetCurrentContext();
+ auto param = v8::Local<v8::Object>::Cast(args[0]);
+ auto maybe_type = IsProperty(isolate, ctx, param, "type", "minor");
+ if (maybe_type.IsNothing()) return Nothing<GCOptions>();
+ if (maybe_type.ToChecked()) {
+ found_options_object = true;
+ options.type =
+ v8::Isolate::GarbageCollectionType::kMinorGarbageCollection;
+ }
+ auto maybe_execution =
+ IsProperty(isolate, ctx, param, "execution", "async");
+ if (maybe_execution.IsNothing()) return Nothing<GCOptions>();
+ if (maybe_execution.ToChecked()) {
+ found_options_object = true;
+ options.execution = ExecutionType::kAsync;
+ }
+ }
+
+ // If no options object is present default to legacy behavior.
+ if (!found_options_object) {
+ options.type =
+ args[0]->BooleanValue(isolate)
+ ? v8::Isolate::GarbageCollectionType::kMinorGarbageCollection
+ : v8::Isolate::GarbageCollectionType::kFullGarbageCollection;
+ }
+
+ return Just<GCOptions>(options);
+}
+
+void InvokeGC(v8::Isolate* isolate, v8::Isolate::GarbageCollectionType type,
+ v8::EmbedderHeapTracer::EmbedderStackState embedder_stack_state) {
+ Heap* heap = reinterpret_cast<Isolate*>(isolate)->heap();
+ switch (type) {
+ case v8::Isolate::GarbageCollectionType::kMinorGarbageCollection:
+ heap->CollectGarbage(i::NEW_SPACE, i::GarbageCollectionReason::kTesting,
+ kGCCallbackFlagForced);
+ break;
+ case v8::Isolate::GarbageCollectionType::kFullGarbageCollection:
+ heap->SetEmbedderStackStateForNextFinalizaton(embedder_stack_state);
+ heap->PreciseCollectAllGarbage(i::Heap::kNoGCFlags,
+ i::GarbageCollectionReason::kTesting,
+ kGCCallbackFlagForced);
+ break;
+ }
+}
+
+class AsyncGC final : public CancelableTask {
+ public:
+ ~AsyncGC() final = default;
+
+ AsyncGC(v8::Isolate* isolate, v8::Local<v8::Promise::Resolver> resolver,
+ v8::Isolate::GarbageCollectionType type)
+ : CancelableTask(reinterpret_cast<Isolate*>(isolate)),
+ isolate_(isolate),
+ ctx_(isolate, isolate->GetCurrentContext()),
+ resolver_(isolate, resolver),
+ type_(type) {}
+
+ void RunInternal() final {
+ v8::HandleScope scope(isolate_);
+ InvokeGC(isolate_, type_,
+ v8::EmbedderHeapTracer::EmbedderStackState::kEmpty);
+ auto resolver = v8::Local<v8::Promise::Resolver>::New(isolate_, resolver_);
+ auto ctx = Local<v8::Context>::New(isolate_, ctx_);
+ resolver->Resolve(ctx, v8::Undefined(isolate_)).ToChecked();
+ }
+
+ private:
+ v8::Isolate* isolate_;
+ v8::Persistent<v8::Context> ctx_;
+ v8::Persistent<v8::Promise::Resolver> resolver_;
+ v8::Isolate::GarbageCollectionType type_;
+
+ DISALLOW_COPY_AND_ASSIGN(AsyncGC);
+};
+
+} // namespace
v8::Local<v8::FunctionTemplate> GCExtension::GetNativeFunctionTemplate(
v8::Isolate* isolate, v8::Local<v8::String> str) {
return v8::FunctionTemplate::New(isolate, GCExtension::GC);
}
-
void GCExtension::GC(const v8::FunctionCallbackInfo<v8::Value>& args) {
- args.GetIsolate()->RequestGarbageCollectionForTesting(
- args[0]->BooleanValue(args.GetIsolate())
- ? v8::Isolate::kMinorGarbageCollection
- : v8::Isolate::kFullGarbageCollection);
+ v8::Isolate* isolate = args.GetIsolate();
+
+ // Immediate bailout if no arguments are provided.
+ if (args.Length() == 0) {
+ InvokeGC(isolate,
+ v8::Isolate::GarbageCollectionType::kFullGarbageCollection,
+ v8::EmbedderHeapTracer::EmbedderStackState::kUnknown);
+ return;
+ }
+
+ auto maybe_options = Parse(isolate, args);
+ if (maybe_options.IsNothing()) return;
+ GCOptions options = maybe_options.ToChecked();
+ switch (options.execution) {
+ case ExecutionType::kSync:
+ InvokeGC(isolate, options.type,
+ v8::EmbedderHeapTracer::EmbedderStackState::kUnknown);
+ break;
+ case ExecutionType::kAsync: {
+ v8::HandleScope scope(isolate);
+ auto resolver = v8::Promise::Resolver::New(isolate->GetCurrentContext())
+ .ToLocalChecked();
+ args.GetReturnValue().Set(resolver->GetPromise());
+ auto task_runner =
+ V8::GetCurrentPlatform()->GetForegroundTaskRunner(isolate);
+ CHECK(task_runner->NonNestableTasksEnabled());
+ task_runner->PostNonNestableTask(
+ std::make_unique<AsyncGC>(isolate, resolver, options.type));
+ } break;
+ }
}
} // namespace internal
diff --git a/deps/v8/src/extensions/gc-extension.h b/deps/v8/src/extensions/gc-extension.h
index 7b517637f7..061bb725a3 100644
--- a/deps/v8/src/extensions/gc-extension.h
+++ b/deps/v8/src/extensions/gc-extension.h
@@ -11,6 +11,21 @@
namespace v8 {
namespace internal {
+// Provides garbage collection on invoking |fun_name|(options), where
+// - options is a dictionary like object. See supported properties below.
+// - no parameter refers to options:
+// {type: 'major', execution: 'sync'}.
+// - truthy parameter that is not setting any options:
+// {type: 'minor', execution: 'sync'}.
+//
+// Supported options:
+// - type: 'major' or 'minor' for full GC and Scavenge, respectively.
+// - execution: 'sync' or 'async' for synchronous and asynchronous execution,
+// respectively.
+// - Defaults to {type: 'major', execution: 'sync'}.
+//
+// Returns a Promise that resolves when GC is done when asynchronous execution
+// is requested, and undefined otherwise.
class GCExtension : public v8::Extension {
public:
explicit GCExtension(const char* fun_name)
diff --git a/deps/v8/src/flags/flag-definitions.h b/deps/v8/src/flags/flag-definitions.h
index c7c07e6dc6..b05c36ccdd 100644
--- a/deps/v8/src/flags/flag-definitions.h
+++ b/deps/v8/src/flags/flag-definitions.h
@@ -204,32 +204,33 @@ DEFINE_IMPLICATION(harmony_import_meta, harmony_dynamic_import)
// Features that are still work in progress (behind individual flags).
#define HARMONY_INPROGRESS_BASE(V) \
- V(harmony_private_methods, "harmony private methods in class literals") \
V(harmony_regexp_sequence, "RegExp Unicode sequence properties") \
V(harmony_weak_refs, "harmony weak references") \
- V(harmony_optional_chaining, "harmony optional chaining syntax") \
- V(harmony_nullish, "harmony nullish operator")
+ V(harmony_regexp_match_indices, "harmony regexp match indices") \
+ V(harmony_top_level_await, "harmony top level await")
#ifdef V8_INTL_SUPPORT
-#define HARMONY_INPROGRESS(V) \
- HARMONY_INPROGRESS_BASE(V) \
- V(harmony_intl_dateformat_quarter, "Add quarter option to DateTimeFormat")
+#define HARMONY_INPROGRESS(V) HARMONY_INPROGRESS_BASE(V)
#else
#define HARMONY_INPROGRESS(V) HARMONY_INPROGRESS_BASE(V)
#endif
// Features that are complete (but still behind --harmony/es-staging flag).
-#define HARMONY_STAGED_BASE(V)
+#define HARMONY_STAGED_BASE(V) \
+ V(harmony_optional_chaining, "harmony optional chaining syntax") \
+ V(harmony_nullish, "harmony nullish operator") \
+ V(harmony_private_methods, "harmony private methods in class literals")
#ifdef V8_INTL_SUPPORT
-#define HARMONY_STAGED(V) \
- HARMONY_STAGED_BASE(V) \
- V(harmony_intl_add_calendar_numbering_system, \
- "Add calendar and numberingSystem to DateTimeFormat") \
- V(harmony_intl_dateformat_day_period, \
- "Add dayPeriod option to DateTimeFormat") \
- V(harmony_intl_dateformat_fractional_second_digits, \
- "Add fractionalSecondDigits option to DateTimeFormat") \
+#define HARMONY_STAGED(V) \
+ HARMONY_STAGED_BASE(V) \
+ V(harmony_intl_add_calendar_numbering_system, \
+ "Add calendar and numberingSystem to DateTimeFormat") \
+ V(harmony_intl_dateformat_day_period, \
+ "Add dayPeriod option to DateTimeFormat") \
+ V(harmony_intl_dateformat_fractional_second_digits, \
+ "Add fractionalSecondDigits option to DateTimeFormat") \
+ V(harmony_intl_other_calendars, "DateTimeFormat other calendars") \
V(harmony_intl_segmenter, "Intl.Segmenter")
#else
#define HARMONY_STAGED(V) HARMONY_STAGED_BASE(V)
@@ -245,12 +246,7 @@ DEFINE_IMPLICATION(harmony_import_meta, harmony_dynamic_import)
V(harmony_promise_all_settled, "harmony Promise.allSettled")
#ifdef V8_INTL_SUPPORT
-#define HARMONY_SHIPPING(V) \
- HARMONY_SHIPPING_BASE(V) \
- V(harmony_intl_bigint, "BigInt.prototype.toLocaleString") \
- V(harmony_intl_date_format_range, "DateTimeFormat formatRange") \
- V(harmony_intl_datetime_style, "dateStyle timeStyle for DateTimeFormat") \
- V(harmony_intl_numberformat_unified, "Unified Intl.NumberFormat Features")
+#define HARMONY_SHIPPING(V) HARMONY_SHIPPING_BASE(V)
#else
#define HARMONY_SHIPPING(V) HARMONY_SHIPPING_BASE(V)
#endif
@@ -390,7 +386,7 @@ DEFINE_BOOL(enable_one_shot_optimization, true,
"only be executed once")
// Flag for sealed, frozen elements kind instead of dictionary elements kind
-DEFINE_BOOL_READONLY(enable_sealed_frozen_elements_kind, false,
+DEFINE_BOOL_READONLY(enable_sealed_frozen_elements_kind, true,
"Enable sealed, frozen elements kind")
// Flags for data representation optimizations
@@ -469,6 +465,12 @@ DEFINE_BOOL(trace_track_allocation_sites, false,
DEFINE_BOOL(trace_migration, false, "trace object migration")
DEFINE_BOOL(trace_generalization, false, "trace map generalization")
+// Flags for TurboProp.
+DEFINE_BOOL(turboprop, false,
+ "enable experimental turboprop mid-tier compiler.")
+DEFINE_NEG_IMPLICATION(turboprop, turbo_inlining)
+DEFINE_NEG_IMPLICATION(turboprop, inline_accessors)
+
// Flags for concurrent recompilation.
DEFINE_BOOL(concurrent_recompilation, true,
"optimizing hot functions asynchronously on a separate thread")
@@ -485,9 +487,12 @@ DEFINE_BOOL(concurrent_inlining, false,
DEFINE_IMPLICATION(future, concurrent_inlining)
DEFINE_BOOL(trace_heap_broker_verbose, false,
"trace the heap broker verbosely (all reports)")
+DEFINE_BOOL(trace_heap_broker_memory, false,
+ "trace the heap broker memory (refs analysis and zone numbers)")
DEFINE_BOOL(trace_heap_broker, false,
"trace the heap broker (reports on missing data only)")
DEFINE_IMPLICATION(trace_heap_broker_verbose, trace_heap_broker)
+DEFINE_IMPLICATION(trace_heap_broker_memory, trace_heap_broker)
// Flags for stress-testing the compiler.
DEFINE_INT(stress_runs, 0, "number of stress runs")
@@ -499,7 +504,7 @@ DEFINE_BOOL(print_deopt_stress, false, "print number of possible deopt points")
DEFINE_BOOL(opt, true, "use adaptive optimizations")
DEFINE_BOOL(turbo_sp_frame_access, false,
"use stack pointer-relative access to frame wherever possible")
-DEFINE_BOOL(turbo_control_flow_aware_allocation, false,
+DEFINE_BOOL(turbo_control_flow_aware_allocation, true,
"consider control flow while allocating registers")
DEFINE_STRING(turbo_filter, "*", "optimization filter for TurboFan compiler")
@@ -608,8 +613,6 @@ DEFINE_BOOL(turbo_store_elimination, true,
DEFINE_BOOL(trace_store_elimination, false, "trace store elimination")
DEFINE_BOOL(turbo_rewrite_far_jumps, true,
"rewrite far to near jumps (ia32,x64)")
-DEFINE_BOOL(experimental_inline_promise_constructor, true,
- "inline the Promise constructor in TurboFan")
DEFINE_BOOL(
stress_gc_during_compilation, false,
"simulate GC/compiler thread race related to https://crbug.com/v8/8520")
@@ -730,9 +733,6 @@ DEFINE_BOOL(wasm_math_intrinsics, true,
DEFINE_BOOL(wasm_shared_engine, true,
"shares one wasm engine between all isolates within a process")
DEFINE_IMPLICATION(future, wasm_shared_engine)
-DEFINE_BOOL(wasm_shared_code, true,
- "shares code underlying a wasm module when it is transferred")
-DEFINE_IMPLICATION(future, wasm_shared_code)
DEFINE_BOOL(wasm_trap_handler, true,
"use signal handlers to catch out of bounds memory access in wasm"
" (currently Linux x86_64 only)")
@@ -750,7 +750,7 @@ DEFINE_BOOL(wasm_lazy_compilation, false,
"enable lazy compilation for all wasm modules")
DEFINE_DEBUG_BOOL(trace_wasm_lazy_compilation, false,
"trace lazy compilation of wasm functions")
-DEFINE_BOOL(wasm_grow_shared_memory, false,
+DEFINE_BOOL(wasm_grow_shared_memory, true,
"allow growing shared WebAssembly memory objects")
DEFINE_BOOL(wasm_lazy_validation, false,
"enable lazy validation for lazily compiled wasm functions")
@@ -762,6 +762,11 @@ DEFINE_BOOL(wasm_code_gc, true, "enable garbage collection of wasm code")
DEFINE_BOOL(trace_wasm_code_gc, false, "trace garbage collection of wasm code")
DEFINE_BOOL(stress_wasm_code_gc, false,
"stress test garbage collection of wasm code")
+DEFINE_BOOL(wasm_far_jump_table, true,
+ "use multiple separate code spaces that might require far jumps "
+ "between them")
+DEFINE_INT(wasm_max_initial_code_space_reservation, 0,
+ "maximum size of the initial wasm code space reservation (in MB)")
// Profiler flags.
DEFINE_INT(frame_count, 1, "number of stack frames inspected by the profiler")
@@ -885,6 +890,7 @@ DEFINE_BOOL(trace_gc_object_stats, false,
DEFINE_BOOL(trace_zone_stats, false, "trace zone memory usage")
DEFINE_BOOL(track_retaining_path, false,
"enable support for tracking retaining path")
+DEFINE_DEBUG_BOOL(trace_backing_store, false, "trace backing store events")
DEFINE_BOOL(concurrent_array_buffer_freeing, true,
"free array buffer allocations on a background thread")
DEFINE_INT(gc_stats, 0, "Used by tracing internally to enable gc statistics")
@@ -961,6 +967,10 @@ DEFINE_BOOL(gc_experiment_less_compaction, false,
DEFINE_BOOL(disable_abortjs, false, "disables AbortJS runtime function")
+DEFINE_BOOL(randomize_all_allocations, false,
+ "randomize virtual memory reservations by ignoring any hints "
+ "passed when allocating pages")
+
DEFINE_BOOL(manual_evacuation_candidates_selection, false,
"Test mode only flag. It allows an unit test to select evacuation "
"candidates pages (requires --stress_compaction).")
@@ -1227,6 +1237,8 @@ DEFINE_BOOL(print_all_exceptions, false,
DEFINE_BOOL(
detailed_error_stack_trace, false,
"includes arguments for each function call in the error stack frames array")
+DEFINE_BOOL(adjust_os_scheduling_parameters, true,
+ "adjust OS specific scheduling params for the isolate")
// runtime.cc
DEFINE_BOOL(runtime_call_stats, false, "report runtime call counts and times")
@@ -1254,9 +1266,26 @@ DEFINE_UINT(serialization_chunk_size, 4096,
DEFINE_BOOL(regexp_optimization, true, "generate optimized regexp code")
DEFINE_BOOL(regexp_mode_modifiers, false, "enable inline flags in regexp.")
DEFINE_BOOL(regexp_interpret_all, false, "interpret all regexp code")
-DEFINE_BOOL(regexp_tier_up, false,
- "enable regexp interpreter and tier up to the compiler")
-DEFINE_NEG_IMPLICATION(regexp_interpret_all, regexp_tier_up)
+#ifdef V8_TARGET_BIG_ENDIAN
+#define REGEXP_PEEPHOLE_OPTIMIZATION_BOOL false
+#else
+#define REGEXP_PEEPHOLE_OPTIMIZATION_BOOL true
+#endif
+DEFINE_BOOL(regexp_tier_up, true,
+ "enable regexp interpreter and tier up to the compiler after the "
+ "number of executions set by the tier up ticks flag")
+DEFINE_INT(regexp_tier_up_ticks, 1,
+ "set the number of executions for the regexp interpreter before "
+ "tiering-up to the compiler")
+DEFINE_BOOL(regexp_peephole_optimization, REGEXP_PEEPHOLE_OPTIMIZATION_BOOL,
+ "enable peephole optimization for regexp bytecode")
+DEFINE_BOOL(trace_regexp_peephole_optimization, false,
+ "trace regexp bytecode peephole optimization")
+DEFINE_BOOL(trace_regexp_bytecodes, false, "trace regexp bytecode execution")
+DEFINE_BOOL(trace_regexp_assembler, false,
+ "trace regexp macro assembler calls.")
+DEFINE_BOOL(trace_regexp_parser, false, "trace regexp parsing")
+DEFINE_BOOL(trace_regexp_tier_up, false, "trace regexp tiering up execution")
// Testing flags test/cctest/test-{flags,api,serialization}.cc
DEFINE_BOOL(testing_bool_flag, true, "testing_bool_flag")
@@ -1391,11 +1420,6 @@ DEFINE_BOOL(trace_isolates, false, "trace isolate state changes")
// Regexp
DEFINE_BOOL(regexp_possessive_quantifier, false,
"enable possessive quantifier syntax for testing")
-DEFINE_BOOL(trace_regexp_bytecodes, false, "trace regexp bytecode execution")
-DEFINE_BOOL(trace_regexp_assembler, false,
- "trace regexp macro assembler calls.")
-DEFINE_BOOL(trace_regexp_parser, false, "trace regexp parsing")
-DEFINE_BOOL(trace_regexp_tier_up, false, "trace regexp tiering up execution")
// Debugger
DEFINE_BOOL(print_break_location, false, "print source location on debug break")
@@ -1498,6 +1522,11 @@ DEFINE_BOOL(interpreted_frames_native_stack, false,
"profilers).")
#endif
+// TODO(v8:9206, solanes): remove this when smi-corrupting reducer is fully on.
+DEFINE_BOOL_READONLY(turbo_decompression_elimination, true,
+ "enable the decompression elimination system when "
+ "pointer compression is enabled.")
+
//
// Disassembler only flags
//
diff --git a/deps/v8/src/handles/global-handles.cc b/deps/v8/src/handles/global-handles.cc
index aed5b3fa83..416c65fabd 100644
--- a/deps/v8/src/handles/global-handles.cc
+++ b/deps/v8/src/handles/global-handles.cc
@@ -901,8 +901,13 @@ void GlobalHandles::IdentifyWeakUnmodifiedObjects(
DCHECK(node->is_root());
if (is_unmodified(node->location())) {
v8::Value* value = ToApi<v8::Value>(node->handle());
- node->set_root(tracer->IsRootForNonTracingGC(
- *reinterpret_cast<v8::TracedGlobal<v8::Value>*>(&value)));
+ if (node->has_destructor()) {
+ node->set_root(tracer->IsRootForNonTracingGC(
+ *reinterpret_cast<v8::TracedGlobal<v8::Value>*>(&value)));
+ } else {
+ node->set_root(tracer->IsRootForNonTracingGC(
+ *reinterpret_cast<v8::TracedReference<v8::Value>*>(&value)));
+ }
}
}
}
@@ -990,7 +995,7 @@ void GlobalHandles::IterateYoungWeakUnmodifiedRootsForPhantomHandles(
} else {
v8::Value* value = ToApi<v8::Value>(node->handle());
tracer->ResetHandleInNonTracingGC(
- *reinterpret_cast<v8::TracedGlobal<v8::Value>*>(&value));
+ *reinterpret_cast<v8::TracedReference<v8::Value>*>(&value));
DCHECK(!node->IsInUse());
}
@@ -1271,8 +1276,13 @@ void GlobalHandles::IterateTracedNodes(
for (TracedNode* node : *traced_nodes_) {
if (node->IsInUse()) {
v8::Value* value = ToApi<v8::Value>(node->handle());
- visitor->VisitTracedGlobalHandle(
- *reinterpret_cast<v8::TracedGlobal<v8::Value>*>(&value));
+ if (node->has_destructor()) {
+ visitor->VisitTracedGlobalHandle(
+ *reinterpret_cast<v8::TracedGlobal<v8::Value>*>(&value));
+ } else {
+ visitor->VisitTracedReference(
+ *reinterpret_cast<v8::TracedReference<v8::Value>*>(&value));
+ }
}
}
}
diff --git a/deps/v8/src/handles/global-handles.h b/deps/v8/src/handles/global-handles.h
index a07f7a772a..37b42a54b7 100644
--- a/deps/v8/src/handles/global-handles.h
+++ b/deps/v8/src/handles/global-handles.h
@@ -5,6 +5,7 @@
#ifndef V8_HANDLES_GLOBAL_HANDLES_H_
#define V8_HANDLES_GLOBAL_HANDLES_H_
+#include <memory>
#include <type_traits>
#include <utility>
#include <vector>
diff --git a/deps/v8/src/handles/handles.cc b/deps/v8/src/handles/handles.cc
index 7f320a271c..87c435061e 100644
--- a/deps/v8/src/handles/handles.cc
+++ b/deps/v8/src/handles/handles.cc
@@ -28,7 +28,7 @@ ASSERT_TRIVIALLY_COPYABLE(Handle<Object>);
ASSERT_TRIVIALLY_COPYABLE(MaybeHandle<Object>);
#ifdef DEBUG
-bool HandleBase::IsDereferenceAllowed(DereferenceCheckMode mode) const {
+bool HandleBase::IsDereferenceAllowed() const {
DCHECK_NOT_NULL(location_);
Object object(*location_);
if (object.IsSmi()) return true;
@@ -40,16 +40,7 @@ bool HandleBase::IsDereferenceAllowed(DereferenceCheckMode mode) const {
RootsTable::IsImmortalImmovable(root_index)) {
return true;
}
- if (!AllowHandleDereference::IsAllowed()) return false;
- if (mode == INCLUDE_DEFERRED_CHECK &&
- !AllowDeferredHandleDereference::IsAllowed()) {
- // Accessing cells, maps and internalized strings is safe.
- if (heap_object.IsCell()) return true;
- if (heap_object.IsMap()) return true;
- if (heap_object.IsInternalizedString()) return true;
- return !isolate->IsDeferredHandle(location_);
- }
- return true;
+ return AllowHandleDereference::IsAllowed();
}
#endif
@@ -188,13 +179,13 @@ DeferredHandleScope::DeferredHandleScope(Isolate* isolate)
}
DeferredHandleScope::~DeferredHandleScope() {
- impl_->isolate()->handle_scope_data()->level--;
DCHECK(handles_detached_);
- DCHECK(impl_->isolate()->handle_scope_data()->level == prev_level_);
+ impl_->isolate()->handle_scope_data()->level--;
+ DCHECK_EQ(impl_->isolate()->handle_scope_data()->level, prev_level_);
}
-DeferredHandles* DeferredHandleScope::Detach() {
- DeferredHandles* deferred = impl_->Detach(prev_limit_);
+std::unique_ptr<DeferredHandles> DeferredHandleScope::Detach() {
+ std::unique_ptr<DeferredHandles> deferred = impl_->Detach(prev_limit_);
HandleScopeData* data = impl_->isolate()->handle_scope_data();
data->next = prev_next_;
data->limit = prev_limit_;
diff --git a/deps/v8/src/handles/handles.h b/deps/v8/src/handles/handles.h
index 5f9b170d4b..2fea55d1a0 100644
--- a/deps/v8/src/handles/handles.h
+++ b/deps/v8/src/handles/handles.h
@@ -41,11 +41,8 @@ class HandleBase {
// Check if this handle refers to the exact same object as the other handle.
V8_INLINE bool is_identical_to(const HandleBase that) const {
- // Dereferencing deferred handles to check object equality is safe.
- SLOW_DCHECK((this->location_ == nullptr ||
- this->IsDereferenceAllowed(NO_DEFERRED_CHECK)) &&
- (that.location_ == nullptr ||
- that.IsDereferenceAllowed(NO_DEFERRED_CHECK)));
+ SLOW_DCHECK((this->location_ == nullptr || this->IsDereferenceAllowed()) &&
+ (that.location_ == nullptr || that.IsDereferenceAllowed()));
if (this->location_ == that.location_) return true;
if (this->location_ == nullptr || that.location_ == nullptr) return false;
return *this->location_ == *that.location_;
@@ -59,20 +56,16 @@ class HandleBase {
// Returns the address to where the raw pointer is stored.
V8_INLINE Address* location() const {
- SLOW_DCHECK(location_ == nullptr ||
- IsDereferenceAllowed(INCLUDE_DEFERRED_CHECK));
+ SLOW_DCHECK(location_ == nullptr || IsDereferenceAllowed());
return location_;
}
protected:
- enum DereferenceCheckMode { INCLUDE_DEFERRED_CHECK, NO_DEFERRED_CHECK };
#ifdef DEBUG
- bool V8_EXPORT_PRIVATE IsDereferenceAllowed(DereferenceCheckMode mode) const;
+ bool V8_EXPORT_PRIVATE IsDereferenceAllowed() const;
#else
V8_INLINE
- bool V8_EXPORT_PRIVATE IsDereferenceAllowed(DereferenceCheckMode mode) const {
- return true;
- }
+ bool V8_EXPORT_PRIVATE IsDereferenceAllowed() const { return true; }
#endif // DEBUG
// This uses type Address* as opposed to a pointer type to a typed
@@ -140,7 +133,7 @@ class Handle final : public HandleBase {
V8_INLINE T operator*() const {
// unchecked_cast because we rather trust Handle<T> to contain a T than
// include all the respective -inl.h headers for SLOW_DCHECKs.
- SLOW_DCHECK(IsDereferenceAllowed(INCLUDE_DEFERRED_CHECK));
+ SLOW_DCHECK(IsDereferenceAllowed());
return T::unchecked_cast(Object(*location()));
}
@@ -318,7 +311,7 @@ class V8_EXPORT_PRIVATE DeferredHandleScope final {
// The DeferredHandles object returned stores the Handles created
// since the creation of this DeferredHandleScope. The Handles are
// alive as long as the DeferredHandles object is alive.
- DeferredHandles* Detach();
+ std::unique_ptr<DeferredHandles> Detach();
~DeferredHandleScope();
private:
diff --git a/deps/v8/src/heap/array-buffer-collector.cc b/deps/v8/src/heap/array-buffer-collector.cc
index b6d7df8191..672d5e68f0 100644
--- a/deps/v8/src/heap/array-buffer-collector.cc
+++ b/deps/v8/src/heap/array-buffer-collector.cc
@@ -14,33 +14,22 @@
namespace v8 {
namespace internal {
-namespace {
-
-void FreeAllocationsHelper(
- Heap* heap, const std::vector<JSArrayBuffer::Allocation>& allocations) {
- for (JSArrayBuffer::Allocation alloc : allocations) {
- JSArrayBuffer::FreeBackingStore(heap->isolate(), alloc);
- }
-}
-
-} // namespace
-
void ArrayBufferCollector::QueueOrFreeGarbageAllocations(
- std::vector<JSArrayBuffer::Allocation> allocations) {
+ std::vector<std::shared_ptr<BackingStore>> backing_stores) {
if (heap_->ShouldReduceMemory()) {
- FreeAllocationsHelper(heap_, allocations);
+ // Destruct the vector, which destructs the std::shared_ptrs, freeing
+ // the backing stores.
+ backing_stores.clear();
} else {
base::MutexGuard guard(&allocations_mutex_);
- allocations_.push_back(std::move(allocations));
+ allocations_.push_back(std::move(backing_stores));
}
}
void ArrayBufferCollector::PerformFreeAllocations() {
base::MutexGuard guard(&allocations_mutex_);
- for (const std::vector<JSArrayBuffer::Allocation>& allocations :
- allocations_) {
- FreeAllocationsHelper(heap_, allocations);
- }
+ // Destruct the vector, which destructs the vecotr of std::shared_ptrs,
+ // freeing the backing stores if their refcount drops to zero.
allocations_.clear();
}
diff --git a/deps/v8/src/heap/array-buffer-collector.h b/deps/v8/src/heap/array-buffer-collector.h
index 784092e936..2d060cc595 100644
--- a/deps/v8/src/heap/array-buffer-collector.h
+++ b/deps/v8/src/heap/array-buffer-collector.h
@@ -31,7 +31,7 @@ class ArrayBufferCollector {
//
// FreeAllocations() potentially triggers a background task for processing.
void QueueOrFreeGarbageAllocations(
- std::vector<JSArrayBuffer::Allocation> allocations);
+ std::vector<std::shared_ptr<BackingStore>> allocations);
// Calls FreeAllocations() on a background thread.
void FreeAllocations();
@@ -45,7 +45,7 @@ class ArrayBufferCollector {
Heap* const heap_;
base::Mutex allocations_mutex_;
- std::vector<std::vector<JSArrayBuffer::Allocation>> allocations_;
+ std::vector<std::vector<std::shared_ptr<BackingStore>>> allocations_;
};
} // namespace internal
diff --git a/deps/v8/src/heap/array-buffer-tracker-inl.h b/deps/v8/src/heap/array-buffer-tracker-inl.h
index 763300cffe..21106cee4b 100644
--- a/deps/v8/src/heap/array-buffer-tracker-inl.h
+++ b/deps/v8/src/heap/array-buffer-tracker-inl.h
@@ -12,16 +12,31 @@
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/objects.h"
+#define TRACE_BS(...) \
+ do { \
+ if (FLAG_trace_backing_store) PrintF(__VA_ARGS__); \
+ } while (false)
+
namespace v8 {
namespace internal {
-void ArrayBufferTracker::RegisterNew(Heap* heap, JSArrayBuffer buffer) {
- if (buffer.backing_store() == nullptr) return;
+inline size_t PerIsolateAccountingLength(JSArrayBuffer buffer) {
+ // TODO(titzer): SharedArrayBuffers and shared WasmMemorys cause problems with
+ // accounting for per-isolate external memory. In particular, sharing the same
+ // array buffer or memory multiple times, which happens in stress tests, can
+ // cause overcounting, leading to GC thrashing. Fix with global accounting?
+ return buffer.is_shared() ? 0 : buffer.byte_length();
+}
+
+void ArrayBufferTracker::RegisterNew(
+ Heap* heap, JSArrayBuffer buffer,
+ std::shared_ptr<BackingStore> backing_store) {
+ if (!backing_store) return;
// ArrayBuffer tracking works only for small objects.
DCHECK(!heap->IsLargeObject(buffer));
+ DCHECK_EQ(backing_store->buffer_start(), buffer.backing_store());
- const size_t length = buffer.byte_length();
Page* page = Page::FromHeapObject(buffer);
{
base::MutexGuard guard(page->mutex());
@@ -31,44 +46,63 @@ void ArrayBufferTracker::RegisterNew(Heap* heap, JSArrayBuffer buffer) {
tracker = page->local_tracker();
}
DCHECK_NOT_NULL(tracker);
- tracker->Add(buffer, length);
+ TRACE_BS("ABT:reg bs=%p mem=%p (length=%zu) cnt=%ld\n",
+ backing_store.get(), backing_store->buffer_start(),
+ backing_store->byte_length(), backing_store.use_count());
+ tracker->Add(buffer, std::move(backing_store));
}
// TODO(wez): Remove backing-store from external memory accounting.
// We may go over the limit of externally allocated memory here. We call the
// api function to trigger a GC in this case.
+ const size_t length = PerIsolateAccountingLength(buffer);
reinterpret_cast<v8::Isolate*>(heap->isolate())
->AdjustAmountOfExternalAllocatedMemory(length);
}
-void ArrayBufferTracker::Unregister(Heap* heap, JSArrayBuffer buffer) {
- if (buffer.backing_store() == nullptr) return;
+std::shared_ptr<BackingStore> ArrayBufferTracker::Unregister(
+ Heap* heap, JSArrayBuffer buffer) {
+ std::shared_ptr<BackingStore> backing_store;
+ const size_t length = PerIsolateAccountingLength(buffer);
Page* page = Page::FromHeapObject(buffer);
- const size_t length = buffer.byte_length();
{
base::MutexGuard guard(page->mutex());
LocalArrayBufferTracker* tracker = page->local_tracker();
DCHECK_NOT_NULL(tracker);
- tracker->Remove(buffer, length);
+ backing_store = tracker->Remove(buffer);
}
// TODO(wez): Remove backing-store from external memory accounting.
heap->update_external_memory(-static_cast<intptr_t>(length));
+ return backing_store;
+}
+
+std::shared_ptr<BackingStore> ArrayBufferTracker::Lookup(Heap* heap,
+ JSArrayBuffer buffer) {
+ if (buffer.backing_store() == nullptr) return {};
+
+ Page* page = Page::FromHeapObject(buffer);
+ base::MutexGuard guard(page->mutex());
+ LocalArrayBufferTracker* tracker = page->local_tracker();
+ DCHECK_NOT_NULL(tracker);
+ return tracker->Lookup(buffer);
}
template <typename Callback>
void LocalArrayBufferTracker::Free(Callback should_free) {
size_t freed_memory = 0;
- Isolate* isolate = page_->heap()->isolate();
for (TrackingData::iterator it = array_buffers_.begin();
it != array_buffers_.end();) {
// Unchecked cast because the map might already be dead at this point.
JSArrayBuffer buffer = JSArrayBuffer::unchecked_cast(it->first);
- const size_t length = it->second.length;
+ const size_t length = PerIsolateAccountingLength(buffer);
if (should_free(buffer)) {
- JSArrayBuffer::FreeBackingStore(isolate, it->second);
+ // Destroy the shared pointer, (perhaps) freeing the backing store.
+ TRACE_BS("ABT:die bs=%p mem=%p (length=%zu) cnt=%ld\n",
+ it->second.get(), it->second->buffer_start(),
+ it->second->byte_length(), it->second.use_count());
it = array_buffers_.erase(it);
freed_memory += length;
} else {
@@ -97,35 +131,60 @@ void ArrayBufferTracker::FreeDead(Page* page, MarkingState* marking_state) {
}
}
-void LocalArrayBufferTracker::Add(JSArrayBuffer buffer, size_t length) {
+void LocalArrayBufferTracker::Add(JSArrayBuffer buffer,
+ std::shared_ptr<BackingStore> backing_store) {
+ auto length = PerIsolateAccountingLength(buffer);
page_->IncrementExternalBackingStoreBytes(
ExternalBackingStoreType::kArrayBuffer, length);
- AddInternal(buffer, length);
+ AddInternal(buffer, std::move(backing_store));
}
-void LocalArrayBufferTracker::AddInternal(JSArrayBuffer buffer, size_t length) {
- auto ret = array_buffers_.insert(
- {buffer,
- {buffer.backing_store(), length, buffer.backing_store(),
- buffer.is_wasm_memory()}});
+void LocalArrayBufferTracker::AddInternal(
+ JSArrayBuffer buffer, std::shared_ptr<BackingStore> backing_store) {
+ auto ret = array_buffers_.insert({buffer, std::move(backing_store)});
USE(ret);
// Check that we indeed inserted a new value and did not overwrite an existing
// one (which would be a bug).
DCHECK(ret.second);
}
-void LocalArrayBufferTracker::Remove(JSArrayBuffer buffer, size_t length) {
- page_->DecrementExternalBackingStoreBytes(
- ExternalBackingStoreType::kArrayBuffer, length);
-
+std::shared_ptr<BackingStore> LocalArrayBufferTracker::Remove(
+ JSArrayBuffer buffer) {
TrackingData::iterator it = array_buffers_.find(buffer);
+
// Check that we indeed find a key to remove.
DCHECK(it != array_buffers_.end());
- DCHECK_EQ(length, it->second.length);
+
+ // Steal the underlying shared pointer before erasing the entry.
+ std::shared_ptr<BackingStore> backing_store = std::move(it->second);
+
+ TRACE_BS("ABT:rm bs=%p mem=%p (length=%zu) cnt=%ld\n", backing_store.get(),
+ backing_store->buffer_start(), backing_store->byte_length(),
+ backing_store.use_count());
+
+ // Erase the entry.
array_buffers_.erase(it);
+
+ // Update accounting.
+ auto length = PerIsolateAccountingLength(buffer);
+ page_->DecrementExternalBackingStoreBytes(
+ ExternalBackingStoreType::kArrayBuffer, length);
+
+ return backing_store;
+}
+
+std::shared_ptr<BackingStore> LocalArrayBufferTracker::Lookup(
+ JSArrayBuffer buffer) {
+ TrackingData::iterator it = array_buffers_.find(buffer);
+ if (it != array_buffers_.end()) {
+ return it->second;
+ }
+ return {};
}
+#undef TRACE_BS
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/array-buffer-tracker.cc b/deps/v8/src/heap/array-buffer-tracker.cc
index fdca6e8df2..b284a65f66 100644
--- a/deps/v8/src/heap/array-buffer-tracker.cc
+++ b/deps/v8/src/heap/array-buffer-tracker.cc
@@ -11,6 +11,11 @@
#include "src/heap/heap.h"
#include "src/heap/spaces.h"
+#define TRACE_BS(...) \
+ do { \
+ if (FLAG_trace_backing_store) PrintF(__VA_ARGS__); \
+ } while (false)
+
namespace v8 {
namespace internal {
@@ -20,7 +25,7 @@ LocalArrayBufferTracker::~LocalArrayBufferTracker() {
template <typename Callback>
void LocalArrayBufferTracker::Process(Callback callback) {
- std::vector<JSArrayBuffer::Allocation> backing_stores_to_free;
+ std::vector<std::shared_ptr<BackingStore>> backing_stores_to_free;
TrackingData kept_array_buffers;
JSArrayBuffer new_buffer;
@@ -32,8 +37,9 @@ void LocalArrayBufferTracker::Process(Callback callback) {
DCHECK_EQ(page_, Page::FromHeapObject(old_buffer));
const CallbackResult result = callback(old_buffer, &new_buffer);
if (result == kKeepEntry) {
- kept_array_buffers.insert(*it);
+ kept_array_buffers.insert(std::move(*it));
} else if (result == kUpdateEntry) {
+ DCHECK_EQ(old_buffer.byte_length(), new_buffer.byte_length());
DCHECK(!new_buffer.is_null());
Page* target_page = Page::FromHeapObject(new_buffer);
{
@@ -44,22 +50,28 @@ void LocalArrayBufferTracker::Process(Callback callback) {
tracker = target_page->local_tracker();
}
DCHECK_NOT_NULL(tracker);
- const size_t length = it->second.length;
+ const size_t length = PerIsolateAccountingLength(old_buffer);
// We should decrement before adding to avoid potential overflows in
// the external memory counters.
- DCHECK_EQ(it->first.is_wasm_memory(), it->second.is_wasm_memory);
- tracker->AddInternal(new_buffer, length);
+ tracker->AddInternal(new_buffer, std::move(it->second));
MemoryChunk::MoveExternalBackingStoreBytes(
ExternalBackingStoreType::kArrayBuffer,
static_cast<MemoryChunk*>(page_),
static_cast<MemoryChunk*>(target_page), length);
}
} else if (result == kRemoveEntry) {
- freed_memory += it->second.length;
- // We pass backing_store() and stored length to the collector for freeing
- // the backing store. Wasm allocations will go through their own tracker
- // based on the backing store.
- backing_stores_to_free.push_back(it->second);
+ freed_memory += PerIsolateAccountingLength(old_buffer);
+ auto backing_store = std::move(it->second);
+ TRACE_BS("ABT:queue bs=%p mem=%p (length=%zu) cnt=%ld\n",
+ backing_store.get(), backing_store->buffer_start(),
+ backing_store->byte_length(), backing_store.use_count());
+ if (!backing_store->is_shared()) {
+ // Only retain non-shared backing stores. For shared backing stores,
+ // drop the shared_ptr right away, since this should be cheap,
+ // as it only updates a refcount, except that last, which will
+ // destruct it, which is rare.
+ backing_stores_to_free.push_back(backing_store);
+ }
} else {
UNREACHABLE();
}
@@ -147,3 +159,4 @@ void ArrayBufferTracker::TearDown(Heap* heap) {
} // namespace internal
} // namespace v8
+#undef TRACE_BS
diff --git a/deps/v8/src/heap/array-buffer-tracker.h b/deps/v8/src/heap/array-buffer-tracker.h
index b7950c2506..156c226406 100644
--- a/deps/v8/src/heap/array-buffer-tracker.h
+++ b/deps/v8/src/heap/array-buffer-tracker.h
@@ -9,6 +9,7 @@
#include "src/base/platform/mutex.h"
#include "src/common/globals.h"
+#include "src/objects/backing-store.h"
#include "src/objects/js-array-buffer.h"
#include "src/utils/allocation.h"
@@ -31,8 +32,12 @@ class ArrayBufferTracker : public AllStatic {
// Register/unregister a new JSArrayBuffer |buffer| for tracking. Guards all
// access to the tracker by taking the page lock for the corresponding page.
- inline static void RegisterNew(Heap* heap, JSArrayBuffer buffer);
- inline static void Unregister(Heap* heap, JSArrayBuffer buffer);
+ inline static void RegisterNew(Heap* heap, JSArrayBuffer buffer,
+ std::shared_ptr<BackingStore>);
+ inline static std::shared_ptr<BackingStore> Unregister(Heap* heap,
+ JSArrayBuffer buffer);
+ inline static std::shared_ptr<BackingStore> Lookup(Heap* heap,
+ JSArrayBuffer buffer);
// Identifies all backing store pointers for dead JSArrayBuffers in new space.
// Does not take any locks and can only be called during Scavenge.
@@ -70,8 +75,10 @@ class LocalArrayBufferTracker {
explicit LocalArrayBufferTracker(Page* page) : page_(page) {}
~LocalArrayBufferTracker();
- inline void Add(JSArrayBuffer buffer, size_t length);
- inline void Remove(JSArrayBuffer buffer, size_t length);
+ inline void Add(JSArrayBuffer buffer,
+ std::shared_ptr<BackingStore> backing_store);
+ inline std::shared_ptr<BackingStore> Remove(JSArrayBuffer buffer);
+ inline std::shared_ptr<BackingStore> Lookup(JSArrayBuffer buffer);
// Frees up array buffers.
//
@@ -105,17 +112,13 @@ class LocalArrayBufferTracker {
}
};
- // Keep track of the backing store and the corresponding length at time of
- // registering. The length is accessed from JavaScript and can be a
- // HeapNumber. The reason for tracking the length is that in the case of
- // length being a HeapNumber, the buffer and its length may be stored on
- // different memory pages, making it impossible to guarantee order of freeing.
using TrackingData =
- std::unordered_map<JSArrayBuffer, JSArrayBuffer::Allocation, Hasher>;
+ std::unordered_map<JSArrayBuffer, std::shared_ptr<BackingStore>, Hasher>;
// Internal version of add that does not update counters. Requires separate
// logic for updating external memory counters.
- inline void AddInternal(JSArrayBuffer buffer, size_t length);
+ inline void AddInternal(JSArrayBuffer buffer,
+ std::shared_ptr<BackingStore> backing_store);
Page* page_;
// The set contains raw heap pointers which are removed by the GC upon
diff --git a/deps/v8/src/heap/basic-memory-chunk.h b/deps/v8/src/heap/basic-memory-chunk.h
index 65fc072bd2..c0d4ade522 100644
--- a/deps/v8/src/heap/basic-memory-chunk.h
+++ b/deps/v8/src/heap/basic-memory-chunk.h
@@ -10,12 +10,19 @@
#include "src/base/atomic-utils.h"
#include "src/common/globals.h"
#include "src/heap/marking.h"
+#include "src/heap/slot-set.h"
namespace v8 {
namespace internal {
class MemoryChunk;
+enum RememberedSetType {
+ OLD_TO_NEW,
+ OLD_TO_OLD,
+ NUMBER_OF_REMEMBERED_SET_TYPES
+};
+
class BasicMemoryChunk {
public:
enum Flag {
@@ -170,6 +177,11 @@ class BasicMemoryChunk {
static const intptr_t kHeapOffset = kMarkBitmapOffset + kSystemPointerSize;
static const intptr_t kHeaderSentinelOffset =
kHeapOffset + kSystemPointerSize;
+ static const intptr_t kAreaStartOffset =
+ kHeaderSentinelOffset + kSystemPointerSize;
+ static const intptr_t kAreaEndOffset = kAreaStartOffset + kSystemPointerSize;
+ static const intptr_t kOldToNewSlotSetOffset =
+ kAreaEndOffset + kSystemPointerSize;
static const size_t kHeaderSize =
kSizeOffset + kSizetSize // size_t size
@@ -178,7 +190,8 @@ class BasicMemoryChunk {
+ kSystemPointerSize // Heap* heap_
+ kSystemPointerSize // Address header_sentinel_
+ kSystemPointerSize // Address area_start_
- + kSystemPointerSize; // Address area_end_
+ + kSystemPointerSize // Address area_end_
+ + kSystemPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES; // SlotSet* array
protected:
// Overall size of the chunk, including the header and guards.
@@ -204,6 +217,11 @@ class BasicMemoryChunk {
Address area_start_;
Address area_end_;
+ // A single slot set for small pages (of size kPageSize) or an array of slot
+ // set for large pages. In the latter case the number of entries in the array
+ // is ceil(size() / kPageSize).
+ SlotSet* slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
+
friend class BasicMemoryChunkValidator;
};
@@ -221,6 +239,8 @@ class BasicMemoryChunkValidator {
offsetof(BasicMemoryChunk, heap_));
STATIC_ASSERT(BasicMemoryChunk::kHeaderSentinelOffset ==
offsetof(BasicMemoryChunk, header_sentinel_));
+ STATIC_ASSERT(BasicMemoryChunk::kOldToNewSlotSetOffset ==
+ offsetof(BasicMemoryChunk, slot_set_));
};
} // namespace internal
diff --git a/deps/v8/src/heap/concurrent-marking.cc b/deps/v8/src/heap/concurrent-marking.cc
index 12bb28f1c8..6a155c78ea 100644
--- a/deps/v8/src/heap/concurrent-marking.cc
+++ b/deps/v8/src/heap/concurrent-marking.cc
@@ -8,7 +8,6 @@
#include <unordered_map>
#include "include/v8config.h"
-#include "src/base/template-utils.h"
#include "src/execution/isolate.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
@@ -225,6 +224,9 @@ class ConcurrentMarkingVisitor final
}
if (weak_ref.target().IsHeapObject()) {
HeapObject target = HeapObject::cast(weak_ref.target());
+#ifdef THREAD_SANITIZER
+ MemoryChunk::FromHeapObject(target)->SynchronizedHeapLoad();
+#endif
if (marking_state_.IsBlackOrGrey(target)) {
// Record the slot inside the JSWeakRef, since the
// VisitJSObjectSubclass above didn't visit it.
@@ -247,6 +249,9 @@ class ConcurrentMarkingVisitor final
WeakCell::BodyDescriptor::IterateBody(map, weak_cell, size, this);
if (weak_cell.target().IsHeapObject()) {
HeapObject target = HeapObject::cast(weak_cell.target());
+#ifdef THREAD_SANITIZER
+ MemoryChunk::FromHeapObject(target)->SynchronizedHeapLoad();
+#endif
if (marking_state_.IsBlackOrGrey(target)) {
// Record the slot inside the WeakCell, since the IterateBody above
// didn't visit it.
@@ -478,6 +483,9 @@ class ConcurrentMarkingVisitor final
ObjectSlot key_slot =
table.RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(i));
HeapObject key = HeapObject::cast(table.KeyAt(i));
+#ifdef THREAD_SANITIZER
+ MemoryChunk::FromHeapObject(key)->SynchronizedHeapLoad();
+#endif
MarkCompactCollector::RecordSlot(table, key_slot, key);
ObjectSlot value_slot =
@@ -491,6 +499,9 @@ class ConcurrentMarkingVisitor final
if (value_obj.IsHeapObject()) {
HeapObject value = HeapObject::cast(value_obj);
+#ifdef THREAD_SANITIZER
+ MemoryChunk::FromHeapObject(value)->SynchronizedHeapLoad();
+#endif
MarkCompactCollector::RecordSlot(table, value_slot, value);
// Revisit ephemerons with both key and value unreachable at end
@@ -864,8 +875,7 @@ void ConcurrentMarking::ScheduleTasks() {
DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
DCHECK(!heap_->IsTearingDown());
base::MutexGuard guard(&pending_lock_);
- DCHECK_EQ(0, pending_task_count_);
- if (task_count_ == 0) {
+ if (total_task_count_ == 0) {
static const int num_cores =
V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
#if defined(V8_OS_MACOSX)
@@ -873,15 +883,18 @@ void ConcurrentMarking::ScheduleTasks() {
// marking on competing hyper-threads (regresses Octane/Splay). As such,
// only use num_cores/2, leaving one of those for the main thread.
// TODO(ulan): Use all cores on Mac 10.12+.
- task_count_ = Max(1, Min(kMaxTasks, (num_cores / 2) - 1));
+ total_task_count_ = Max(1, Min(kMaxTasks, (num_cores / 2) - 1));
#else // defined(OS_MACOSX)
// On other platforms use all logical cores, leaving one for the main
// thread.
- task_count_ = Max(1, Min(kMaxTasks, num_cores - 1));
+ total_task_count_ = Max(1, Min(kMaxTasks, num_cores - 1));
#endif // defined(OS_MACOSX)
+ DCHECK_LE(total_task_count_, kMaxTasks);
+ // One task is for the main thread.
+ STATIC_ASSERT(kMaxTasks + 1 <= MarkingWorklist::kMaxNumTasks);
}
// Task id 0 is for the main thread.
- for (int i = 1; i <= task_count_; i++) {
+ for (int i = 1; i <= total_task_count_; i++) {
if (!is_pending_[i]) {
if (FLAG_trace_concurrent_marking) {
heap_->isolate()->PrintWithTimestamp(
@@ -894,12 +907,12 @@ void ConcurrentMarking::ScheduleTasks() {
is_pending_[i] = true;
++pending_task_count_;
auto task =
- base::make_unique<Task>(heap_->isolate(), this, &task_state_[i], i);
+ std::make_unique<Task>(heap_->isolate(), this, &task_state_[i], i);
cancelable_id_[i] = task->id();
V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
}
}
- DCHECK_EQ(task_count_, pending_task_count_);
+ DCHECK_EQ(total_task_count_, pending_task_count_);
}
void ConcurrentMarking::RescheduleTasksIfNeeded() {
@@ -907,11 +920,15 @@ void ConcurrentMarking::RescheduleTasksIfNeeded() {
if (heap_->IsTearingDown()) return;
{
base::MutexGuard guard(&pending_lock_);
- if (pending_task_count_ > 0) return;
+ // The total task count is initialized in ScheduleTasks from
+ // NumberOfWorkerThreads of the platform.
+ if (total_task_count_ > 0 && pending_task_count_ == total_task_count_) {
+ return;
+ }
}
if (!shared_->IsGlobalPoolEmpty() ||
- !weak_objects_->current_ephemerons.IsEmpty() ||
- !weak_objects_->discovered_ephemerons.IsEmpty()) {
+ !weak_objects_->current_ephemerons.IsGlobalPoolEmpty() ||
+ !weak_objects_->discovered_ephemerons.IsGlobalPoolEmpty()) {
ScheduleTasks();
}
}
@@ -925,7 +942,7 @@ bool ConcurrentMarking::Stop(StopRequest stop_request) {
if (stop_request != StopRequest::COMPLETE_TASKS_FOR_TESTING) {
CancelableTaskManager* task_manager =
heap_->isolate()->cancelable_task_manager();
- for (int i = 1; i <= task_count_; i++) {
+ for (int i = 1; i <= total_task_count_; i++) {
if (is_pending_[i]) {
if (task_manager->TryAbort(cancelable_id_[i]) ==
TryAbortResult::kTaskAborted) {
@@ -940,7 +957,7 @@ bool ConcurrentMarking::Stop(StopRequest stop_request) {
while (pending_task_count_ > 0) {
pending_condition_.Wait(&pending_lock_);
}
- for (int i = 1; i <= task_count_; i++) {
+ for (int i = 1; i <= total_task_count_; i++) {
DCHECK(!is_pending_[i]);
}
return true;
@@ -956,7 +973,7 @@ bool ConcurrentMarking::IsStopped() {
void ConcurrentMarking::FlushMemoryChunkData(
MajorNonAtomicMarkingState* marking_state) {
DCHECK_EQ(pending_task_count_, 0);
- for (int i = 1; i <= task_count_; i++) {
+ for (int i = 1; i <= total_task_count_; i++) {
MemoryChunkDataMap& memory_chunk_data = task_state_[i].memory_chunk_data;
for (auto& pair : memory_chunk_data) {
// ClearLiveness sets the live bytes to zero.
@@ -978,7 +995,7 @@ void ConcurrentMarking::FlushMemoryChunkData(
}
void ConcurrentMarking::ClearMemoryChunkData(MemoryChunk* chunk) {
- for (int i = 1; i <= task_count_; i++) {
+ for (int i = 1; i <= total_task_count_; i++) {
auto it = task_state_[i].memory_chunk_data.find(chunk);
if (it != task_state_[i].memory_chunk_data.end()) {
it->second.live_bytes = 0;
@@ -989,7 +1006,7 @@ void ConcurrentMarking::ClearMemoryChunkData(MemoryChunk* chunk) {
size_t ConcurrentMarking::TotalMarkedBytes() {
size_t result = 0;
- for (int i = 1; i <= task_count_; i++) {
+ for (int i = 1; i <= total_task_count_; i++) {
result +=
base::AsAtomicWord::Relaxed_Load<size_t>(&task_state_[i].marked_bytes);
}
diff --git a/deps/v8/src/heap/concurrent-marking.h b/deps/v8/src/heap/concurrent-marking.h
index be2fc03d46..c08a9c47b0 100644
--- a/deps/v8/src/heap/concurrent-marking.h
+++ b/deps/v8/src/heap/concurrent-marking.h
@@ -5,6 +5,8 @@
#ifndef V8_HEAP_CONCURRENT_MARKING_H_
#define V8_HEAP_CONCURRENT_MARKING_H_
+#include <memory>
+
#include "include/v8-platform.h"
#include "src/base/atomic-utils.h"
#include "src/base/platform/condition-variable.h"
@@ -86,8 +88,6 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
// scavenge and is going to be re-used.
void ClearMemoryChunkData(MemoryChunk* chunk);
- int TaskCount() { return task_count_; }
-
// Checks if all threads are stopped.
bool IsStopped();
@@ -124,7 +124,7 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
int pending_task_count_ = 0;
bool is_pending_[kMaxTasks + 1] = {};
CancelableTaskManager::Id cancelable_id_[kMaxTasks + 1] = {};
- int task_count_ = 0;
+ int total_task_count_ = 0;
};
} // namespace internal
diff --git a/deps/v8/src/heap/embedder-tracing.h b/deps/v8/src/heap/embedder-tracing.h
index 7c67ccfab7..a150f2c26a 100644
--- a/deps/v8/src/heap/embedder-tracing.h
+++ b/deps/v8/src/heap/embedder-tracing.h
@@ -57,7 +57,12 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
bool IsRootForNonTracingGC(const v8::TracedGlobal<v8::Value>& handle) {
return !InUse() || remote_tracer_->IsRootForNonTracingGC(handle);
}
- void ResetHandleInNonTracingGC(const v8::TracedGlobal<v8::Value>& handle) {
+
+ bool IsRootForNonTracingGC(const v8::TracedReference<v8::Value>& handle) {
+ return !InUse() || remote_tracer_->IsRootForNonTracingGC(handle);
+ }
+
+ void ResetHandleInNonTracingGC(const v8::TracedReference<v8::Value>& handle) {
// Resetting is only called when IsRootForNonTracingGC returns false which
// can only happen the EmbedderHeapTracer is set on API level.
DCHECK(InUse());
diff --git a/deps/v8/src/heap/factory-inl.h b/deps/v8/src/heap/factory-inl.h
index bcad5d2714..f0f61bbb2c 100644
--- a/deps/v8/src/heap/factory-inl.h
+++ b/deps/v8/src/heap/factory-inl.h
@@ -43,44 +43,41 @@ Handle<String> Factory::NewSubString(Handle<String> str, int begin, int end) {
return NewProperSubString(str, begin, end);
}
-Handle<Object> Factory::NewNumberFromSize(size_t value,
- AllocationType allocation) {
+Handle<Object> Factory::NewNumberFromSize(size_t value) {
// We can't use Smi::IsValid() here because that operates on a signed
// intptr_t, and casting from size_t could create a bogus sign bit.
if (value <= static_cast<size_t>(Smi::kMaxValue)) {
return Handle<Object>(Smi::FromIntptr(static_cast<intptr_t>(value)),
isolate());
}
- return NewNumber(static_cast<double>(value), allocation);
+ return NewNumber(static_cast<double>(value));
}
-Handle<Object> Factory::NewNumberFromInt64(int64_t value,
- AllocationType allocation) {
+Handle<Object> Factory::NewNumberFromInt64(int64_t value) {
if (value <= std::numeric_limits<int32_t>::max() &&
value >= std::numeric_limits<int32_t>::min() &&
Smi::IsValid(static_cast<int32_t>(value))) {
return Handle<Object>(Smi::FromInt(static_cast<int32_t>(value)), isolate());
}
- return NewNumber(static_cast<double>(value), allocation);
+ return NewNumber(static_cast<double>(value));
}
-Handle<HeapNumber> Factory::NewHeapNumber(double value,
- AllocationType allocation) {
- Handle<HeapNumber> heap_number = NewHeapNumber(allocation);
+template <AllocationType allocation>
+Handle<HeapNumber> Factory::NewHeapNumber(double value) {
+ Handle<HeapNumber> heap_number = NewHeapNumber<allocation>();
heap_number->set_value(value);
return heap_number;
}
-Handle<HeapNumber> Factory::NewHeapNumberFromBits(uint64_t bits,
- AllocationType allocation) {
- Handle<HeapNumber> heap_number = NewHeapNumber(allocation);
+template <AllocationType allocation>
+Handle<HeapNumber> Factory::NewHeapNumberFromBits(uint64_t bits) {
+ Handle<HeapNumber> heap_number = NewHeapNumber<allocation>();
heap_number->set_value_as_bits(bits);
return heap_number;
}
-Handle<HeapNumber> Factory::NewHeapNumberWithHoleNaN(
- AllocationType allocation) {
- return NewHeapNumberFromBits(kHoleNanInt64, allocation);
+Handle<HeapNumber> Factory::NewHeapNumberWithHoleNaN() {
+ return NewHeapNumberFromBits(kHoleNanInt64);
}
Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArrayBase> elements,
diff --git a/deps/v8/src/heap/factory.cc b/deps/v8/src/heap/factory.cc
index 9bf46be6e8..721682f00f 100644
--- a/deps/v8/src/heap/factory.cc
+++ b/deps/v8/src/heap/factory.cc
@@ -11,6 +11,7 @@
#include "src/builtins/constants-table-builder.h"
#include "src/codegen/compiler.h"
#include "src/execution/isolate-inl.h"
+#include "src/execution/protectors-inl.h"
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact-inl.h"
@@ -117,11 +118,11 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
CodePageCollectionMemoryModificationScope code_allocation(heap);
HeapObject result;
if (retry_allocation_or_fail) {
- result =
- heap->AllocateRawWithRetryOrFail(object_size, AllocationType::kCode);
+ result = heap->AllocateRawWith<Heap::kRetryOrFail>(object_size,
+ AllocationType::kCode);
} else {
- result =
- heap->AllocateRawWithLightRetry(object_size, AllocationType::kCode);
+ result = heap->AllocateRawWith<Heap::kLightRetry>(object_size,
+ AllocationType::kCode);
// Return an empty handle if we cannot allocate the code object.
if (result.is_null()) return MaybeHandle<Code>();
}
@@ -209,8 +210,8 @@ HeapObject Factory::AllocateRawWithImmortalMap(int size,
AllocationType allocation,
Map map,
AllocationAlignment alignment) {
- HeapObject result = isolate()->heap()->AllocateRawWithRetryOrFail(
- size, allocation, alignment);
+ HeapObject result = isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(
+ size, allocation, AllocationOrigin::kRuntime, alignment);
result.set_map_after_allocation(map, SKIP_WRITE_BARRIER);
return result;
}
@@ -222,7 +223,7 @@ HeapObject Factory::AllocateRawWithAllocationSite(
int size = map->instance_size();
if (!allocation_site.is_null()) size += AllocationMemento::kSize;
HeapObject result =
- isolate()->heap()->AllocateRawWithRetryOrFail(size, allocation);
+ isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(size, allocation);
WriteBarrierMode write_barrier_mode = allocation == AllocationType::kYoung
? SKIP_WRITE_BARRIER
: UPDATE_WRITE_BARRIER;
@@ -247,7 +248,7 @@ void Factory::InitializeAllocationMemento(AllocationMemento memento,
HeapObject Factory::AllocateRawArray(int size, AllocationType allocation) {
HeapObject result =
- isolate()->heap()->AllocateRawWithRetryOrFail(size, allocation);
+ isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(size, allocation);
if (size > kMaxRegularHeapObjectSize && FLAG_use_marking_progress_bar) {
MemoryChunk* chunk = MemoryChunk::FromHeapObject(result);
chunk->SetFlag<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR);
@@ -275,7 +276,7 @@ HeapObject Factory::New(Handle<Map> map, AllocationType allocation) {
DCHECK(map->instance_type() != MAP_TYPE);
int size = map->instance_size();
HeapObject result =
- isolate()->heap()->AllocateRawWithRetryOrFail(size, allocation);
+ isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(size, allocation);
// New space objects are allocated white.
WriteBarrierMode write_barrier_mode = allocation == AllocationType::kYoung
? SKIP_WRITE_BARRIER
@@ -289,8 +290,8 @@ Handle<HeapObject> Factory::NewFillerObject(int size, bool double_align,
AllocationOrigin origin) {
AllocationAlignment alignment = double_align ? kDoubleAligned : kWordAligned;
Heap* heap = isolate()->heap();
- HeapObject result =
- heap->AllocateRawWithRetryOrFail(size, allocation, origin, alignment);
+ HeapObject result = heap->AllocateRawWith<Heap::kRetryOrFail>(
+ size, allocation, origin, alignment);
heap->CreateFillerObjectAt(result.address(), size, ClearRecordedSlots::kNo);
return Handle<HeapObject>(result, isolate());
}
@@ -323,17 +324,6 @@ Handle<Tuple2> Factory::NewTuple2(Handle<Object> value1, Handle<Object> value2,
return result;
}
-Handle<Tuple3> Factory::NewTuple3(Handle<Object> value1, Handle<Object> value2,
- Handle<Object> value3,
- AllocationType allocation) {
- Handle<Tuple3> result =
- Handle<Tuple3>::cast(NewStruct(TUPLE3_TYPE, allocation));
- result->set_value1(*value1);
- result->set_value2(*value2);
- result->set_value3(*value3);
- return result;
-}
-
Handle<ArrayBoilerplateDescription> Factory::NewArrayBoilerplateDescription(
ElementsKind elements_kind, Handle<FixedArrayBase> constant_values) {
Handle<ArrayBoilerplateDescription> result =
@@ -358,24 +348,23 @@ Handle<TemplateObjectDescription> Factory::NewTemplateObjectDescription(
Handle<Oddball> Factory::NewOddball(Handle<Map> map, const char* to_string,
Handle<Object> to_number,
- const char* type_of, byte kind,
- AllocationType allocation) {
- Handle<Oddball> oddball(Oddball::cast(New(map, allocation)), isolate());
+ const char* type_of, byte kind) {
+ Handle<Oddball> oddball(Oddball::cast(New(map, AllocationType::kReadOnly)),
+ isolate());
Oddball::Initialize(isolate(), oddball, to_string, to_number, type_of, kind);
return oddball;
}
-Handle<Oddball> Factory::NewSelfReferenceMarker(AllocationType allocation) {
+Handle<Oddball> Factory::NewSelfReferenceMarker() {
return NewOddball(self_reference_marker_map(), "self_reference_marker",
handle(Smi::FromInt(-1), isolate()), "undefined",
- Oddball::kSelfReferenceMarker, allocation);
+ Oddball::kSelfReferenceMarker);
}
-Handle<PropertyArray> Factory::NewPropertyArray(int length,
- AllocationType allocation) {
+Handle<PropertyArray> Factory::NewPropertyArray(int length) {
DCHECK_LE(0, length);
if (length == 0) return empty_property_array();
- HeapObject result = AllocateRawFixedArray(length, allocation);
+ HeapObject result = AllocateRawFixedArray(length, AllocationType::kYoung);
result.set_map_after_allocation(*property_array_map(), SKIP_WRITE_BARRIER);
Handle<PropertyArray> array(PropertyArray::cast(result), isolate());
array->initialize_length(length);
@@ -419,7 +408,7 @@ Handle<T> Factory::NewWeakFixedArrayWithMap(RootIndex map_root_index,
DCHECK_LT(0, length);
HeapObject result =
- AllocateRawArray(WeakFixedArray::SizeFor(length), allocation);
+ AllocateRawArray(WeakFixedArray::SizeFor(length), AllocationType::kOld);
Map map = Map::cast(isolate()->root(map_root_index));
result.set_map_after_allocation(map, SKIP_WRITE_BARRIER);
@@ -485,8 +474,7 @@ Handle<FixedArray> Factory::NewFixedArrayWithHoles(int length,
*the_hole_value(), allocation);
}
-Handle<FixedArray> Factory::NewUninitializedFixedArray(
- int length, AllocationType allocation) {
+Handle<FixedArray> Factory::NewUninitializedFixedArray(int length) {
DCHECK_LE(0, length);
if (length == 0) return empty_fixed_array();
@@ -494,30 +482,30 @@ Handle<FixedArray> Factory::NewUninitializedFixedArray(
// array. After getting canary/performance coverage, either remove the
// function or revert to returning uninitilized array.
return NewFixedArrayWithFiller(RootIndex::kFixedArrayMap, length,
- *undefined_value(), allocation);
+ *undefined_value(), AllocationType::kYoung);
}
Handle<ClosureFeedbackCellArray> Factory::NewClosureFeedbackCellArray(
- int length, AllocationType allocation) {
+ int length) {
if (length == 0) return empty_closure_feedback_cell_array();
Handle<ClosureFeedbackCellArray> feedback_cell_array =
NewFixedArrayWithMap<ClosureFeedbackCellArray>(
- RootIndex::kClosureFeedbackCellArrayMap, length, allocation);
+ RootIndex::kClosureFeedbackCellArrayMap, length,
+ AllocationType::kYoung);
return feedback_cell_array;
}
Handle<FeedbackVector> Factory::NewFeedbackVector(
Handle<SharedFunctionInfo> shared,
- Handle<ClosureFeedbackCellArray> closure_feedback_cell_array,
- AllocationType allocation) {
+ Handle<ClosureFeedbackCellArray> closure_feedback_cell_array) {
int length = shared->feedback_metadata().slot_count();
DCHECK_LE(0, length);
int size = FeedbackVector::SizeFor(length);
- HeapObject result =
- AllocateRawWithImmortalMap(size, allocation, *feedback_vector_map());
+ HeapObject result = AllocateRawWithImmortalMap(size, AllocationType::kOld,
+ *feedback_vector_map());
Handle<FeedbackVector> vector(FeedbackVector::cast(result), isolate());
vector->set_shared_function_info(*shared);
vector->set_optimized_code_weak_or_smi(MaybeObject::FromSmi(Smi::FromEnum(
@@ -534,13 +522,12 @@ Handle<FeedbackVector> Factory::NewFeedbackVector(
return vector;
}
-Handle<EmbedderDataArray> Factory::NewEmbedderDataArray(
- int length, AllocationType allocation) {
+Handle<EmbedderDataArray> Factory::NewEmbedderDataArray(int length) {
DCHECK_LE(0, length);
int size = EmbedderDataArray::SizeFor(length);
- HeapObject result =
- AllocateRawWithImmortalMap(size, allocation, *embedder_data_array_map());
+ HeapObject result = AllocateRawWithImmortalMap(size, AllocationType::kYoung,
+ *embedder_data_array_map());
Handle<EmbedderDataArray> array(EmbedderDataArray::cast(result), isolate());
array->set_length(length);
@@ -589,25 +576,23 @@ Handle<ObjectBoilerplateDescription> Factory::NewObjectBoilerplateDescription(
return description;
}
-Handle<FixedArrayBase> Factory::NewFixedDoubleArray(int length,
- AllocationType allocation) {
+Handle<FixedArrayBase> Factory::NewFixedDoubleArray(int length) {
if (length == 0) return empty_fixed_array();
if (length < 0 || length > FixedDoubleArray::kMaxLength) {
isolate()->heap()->FatalProcessOutOfMemory("invalid array length");
}
int size = FixedDoubleArray::SizeFor(length);
Map map = *fixed_double_array_map();
- HeapObject result =
- AllocateRawWithImmortalMap(size, allocation, map, kDoubleAligned);
+ HeapObject result = AllocateRawWithImmortalMap(size, AllocationType::kYoung,
+ map, kDoubleAligned);
Handle<FixedDoubleArray> array(FixedDoubleArray::cast(result), isolate());
array->set_length(length);
return array;
}
-Handle<FixedArrayBase> Factory::NewFixedDoubleArrayWithHoles(
- int length, AllocationType allocation) {
+Handle<FixedArrayBase> Factory::NewFixedDoubleArrayWithHoles(int length) {
DCHECK_LE(0, length);
- Handle<FixedArrayBase> array = NewFixedDoubleArray(length, allocation);
+ Handle<FixedArrayBase> array = NewFixedDoubleArray(length);
if (length > 0) {
Handle<FixedDoubleArray>::cast(array)->FillWithHoles(0, length);
}
@@ -633,11 +618,10 @@ Handle<FeedbackMetadata> Factory::NewFeedbackMetadata(
return data;
}
-Handle<FrameArray> Factory::NewFrameArray(int number_of_frames,
- AllocationType allocation) {
+Handle<FrameArray> Factory::NewFrameArray(int number_of_frames) {
DCHECK_LE(0, number_of_frames);
- Handle<FixedArray> result = NewFixedArrayWithHoles(
- FrameArray::LengthFor(number_of_frames), allocation);
+ Handle<FixedArray> result =
+ NewFixedArrayWithHoles(FrameArray::LengthFor(number_of_frames));
result->set(FrameArray::kFrameCountIndex, Smi::kZero);
return Handle<FrameArray>::cast(result);
}
@@ -1438,7 +1422,7 @@ Handle<Context> Factory::NewContext(RootIndex map_root_index, int size,
Map map = Map::cast(isolate()->root(map_root_index));
HeapObject result = AllocateRawWithImmortalMap(size, allocation, map);
Handle<Context> context(Context::cast(result), isolate());
- context->set_length(variadic_part_length);
+ context->initialize_length_and_extension_bit(variadic_part_length);
DCHECK_EQ(context->SizeFromMap(map), size);
if (size > Context::kTodoHeaderSize) {
ObjectSlot start = context->RawField(Context::kTodoHeaderSize);
@@ -1461,6 +1445,7 @@ Handle<NativeContext> Factory::NewNativeContext() {
context->set_math_random_index(Smi::zero());
context->set_serialized_objects(*empty_fixed_array());
context->set_microtask_queue(nullptr);
+ context->set_osr_code_cache(*empty_weak_fixed_array());
return context;
}
@@ -1549,8 +1534,8 @@ Handle<Context> Factory::NewDebugEvaluateContext(Handle<Context> previous,
Handle<ScopeInfo> scope_info,
Handle<JSReceiver> extension,
Handle<Context> wrapped,
- Handle<StringSet> whitelist) {
- STATIC_ASSERT(Context::WHITE_LIST_INDEX == Context::MIN_CONTEXT_SLOTS + 1);
+ Handle<StringSet> blacklist) {
+ STATIC_ASSERT(Context::BLACK_LIST_INDEX == Context::MIN_CONTEXT_SLOTS + 1);
DCHECK(scope_info->IsDebugEvaluateScope());
Handle<HeapObject> ext = extension.is_null()
? Handle<HeapObject>::cast(the_hole_value())
@@ -1565,7 +1550,7 @@ Handle<Context> Factory::NewDebugEvaluateContext(Handle<Context> previous,
c->set_native_context(previous->native_context());
c->set_extension(*ext);
if (!wrapped.is_null()) c->set(Context::WRAPPED_CONTEXT_INDEX, *wrapped);
- if (!whitelist.is_null()) c->set(Context::WHITE_LIST_INDEX, *whitelist);
+ if (!blacklist.is_null()) c->set(Context::BLACK_LIST_INDEX, *blacklist);
return c;
}
@@ -1648,20 +1633,16 @@ Handle<AccessorInfo> Factory::NewAccessorInfo() {
return info;
}
-Handle<Script> Factory::NewScript(Handle<String> source,
- AllocationType allocation) {
- return NewScriptWithId(source, isolate()->heap()->NextScriptId(), allocation);
+Handle<Script> Factory::NewScript(Handle<String> source) {
+ return NewScriptWithId(source, isolate()->heap()->NextScriptId());
}
-Handle<Script> Factory::NewScriptWithId(Handle<String> source, int script_id,
- AllocationType allocation) {
- DCHECK(allocation == AllocationType::kOld ||
- allocation == AllocationType::kReadOnly);
+Handle<Script> Factory::NewScriptWithId(Handle<String> source, int script_id) {
// Create and initialize script object.
Heap* heap = isolate()->heap();
ReadOnlyRoots roots(heap);
Handle<Script> script =
- Handle<Script>::cast(NewStruct(SCRIPT_TYPE, allocation));
+ Handle<Script>::cast(NewStruct(SCRIPT_TYPE, AllocationType::kOld));
script->set_source(*source);
script->set_name(roots.undefined_value());
script->set_id(script_id);
@@ -1748,20 +1729,19 @@ Handle<PromiseResolveThenableJobTask> Factory::NewPromiseResolveThenableJobTask(
return microtask;
}
-Handle<Foreign> Factory::NewForeign(Address addr, AllocationType allocation) {
+Handle<Foreign> Factory::NewForeign(Address addr) {
// Statically ensure that it is safe to allocate foreigns in paged spaces.
STATIC_ASSERT(Foreign::kSize <= kMaxRegularHeapObjectSize);
Map map = *foreign_map();
- HeapObject result =
- AllocateRawWithImmortalMap(map.instance_size(), allocation, map);
+ HeapObject result = AllocateRawWithImmortalMap(map.instance_size(),
+ AllocationType::kYoung, map);
Handle<Foreign> foreign(Foreign::cast(result), isolate());
foreign->set_foreign_address(addr);
return foreign;
}
Handle<ByteArray> Factory::NewByteArray(int length, AllocationType allocation) {
- DCHECK_LE(0, length);
- if (length > ByteArray::kMaxLength) {
+ if (length < 0 || length > ByteArray::kMaxLength) {
isolate()->heap()->FatalProcessOutOfMemory("invalid array length");
}
int size = ByteArray::SizeFor(length);
@@ -1776,8 +1756,7 @@ Handle<ByteArray> Factory::NewByteArray(int length, AllocationType allocation) {
Handle<BytecodeArray> Factory::NewBytecodeArray(
int length, const byte* raw_bytecodes, int frame_size, int parameter_count,
Handle<FixedArray> constant_pool) {
- DCHECK_LE(0, length);
- if (length > BytecodeArray::kMaxLength) {
+ if (length < 0 || length > BytecodeArray::kMaxLength) {
isolate()->heap()->FatalProcessOutOfMemory("invalid array length");
}
// Bytecode array is AllocationType::kOld, so constant pool array should be
@@ -1806,7 +1785,6 @@ Handle<BytecodeArray> Factory::NewBytecodeArray(
}
Handle<Cell> Factory::NewCell(Handle<Object> value) {
- AllowDeferredHandleDereference convert_to_cell;
STATIC_ASSERT(Cell::kSize <= kMaxRegularHeapObjectSize);
HeapObject result = AllocateRawWithImmortalMap(
Cell::kSize, AllocationType::kOld, *cell_map());
@@ -1816,7 +1794,6 @@ Handle<Cell> Factory::NewCell(Handle<Object> value) {
}
Handle<FeedbackCell> Factory::NewNoClosuresCell(Handle<HeapObject> value) {
- AllowDeferredHandleDereference convert_to_cell;
HeapObject result = AllocateRawWithImmortalMap(FeedbackCell::kAlignedSize,
AllocationType::kOld, *no_closures_cell_map());
Handle<FeedbackCell> cell(FeedbackCell::cast(result), isolate());
@@ -1827,7 +1804,6 @@ Handle<FeedbackCell> Factory::NewNoClosuresCell(Handle<HeapObject> value) {
}
Handle<FeedbackCell> Factory::NewOneClosureCell(Handle<HeapObject> value) {
- AllowDeferredHandleDereference convert_to_cell;
HeapObject result = AllocateRawWithImmortalMap(FeedbackCell::kAlignedSize,
AllocationType::kOld, *one_closure_cell_map());
Handle<FeedbackCell> cell(FeedbackCell::cast(result), isolate());
@@ -1838,7 +1814,6 @@ Handle<FeedbackCell> Factory::NewOneClosureCell(Handle<HeapObject> value) {
}
Handle<FeedbackCell> Factory::NewManyClosuresCell(Handle<HeapObject> value) {
- AllowDeferredHandleDereference convert_to_cell;
HeapObject result = AllocateRawWithImmortalMap(FeedbackCell::kAlignedSize,
AllocationType::kOld, *many_closures_cell_map());
Handle<FeedbackCell> cell(FeedbackCell::cast(result), isolate());
@@ -1864,15 +1839,13 @@ Handle<PropertyCell> Factory::NewPropertyCell(Handle<Name> name,
}
Handle<DescriptorArray> Factory::NewDescriptorArray(int number_of_descriptors,
- int slack,
- AllocationType allocation) {
- DCHECK(Heap::IsRegularObjectAllocation(allocation));
+ int slack) {
int number_of_all_descriptors = number_of_descriptors + slack;
// Zero-length case must be handled outside.
DCHECK_LT(0, number_of_all_descriptors);
int size = DescriptorArray::SizeFor(number_of_all_descriptors);
- HeapObject obj =
- isolate()->heap()->AllocateRawWithRetryOrFail(size, allocation);
+ HeapObject obj = isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(
+ size, AllocationType::kYoung);
obj.set_map_after_allocation(*descriptor_array_map(), SKIP_WRITE_BARRIER);
DescriptorArray array = DescriptorArray::cast(obj);
array.Initialize(*empty_enum_cache(), *undefined_value(),
@@ -1923,7 +1896,7 @@ Handle<Map> Factory::NewMap(InstanceType type, int instance_size,
!Map::CanHaveFastTransitionableElementsKind(type),
IsDictionaryElementsKind(elements_kind) ||
IsTerminalElementsKind(elements_kind));
- HeapObject result = isolate()->heap()->AllocateRawWithRetryOrFail(
+ HeapObject result = isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(
Map::kSize, AllocationType::kMap);
result.set_map_after_allocation(*meta_map(), SKIP_WRITE_BARRIER);
return handle(InitializeMap(Map::cast(result), type, instance_size,
@@ -1985,23 +1958,23 @@ Handle<JSObject> Factory::CopyJSObjectWithAllocationSite(
// We can only clone regexps, normal objects, api objects, errors or arrays.
// Copying anything else will break invariants.
- CHECK(map->instance_type() == JS_REGEXP_TYPE ||
+ CHECK(map->instance_type() == JS_REG_EXP_TYPE ||
map->instance_type() == JS_OBJECT_TYPE ||
map->instance_type() == JS_ERROR_TYPE ||
map->instance_type() == JS_ARRAY_TYPE ||
map->instance_type() == JS_API_OBJECT_TYPE ||
- map->instance_type() == WASM_GLOBAL_TYPE ||
- map->instance_type() == WASM_INSTANCE_TYPE ||
- map->instance_type() == WASM_MEMORY_TYPE ||
- map->instance_type() == WASM_MODULE_TYPE ||
- map->instance_type() == WASM_TABLE_TYPE ||
+ map->instance_type() == WASM_GLOBAL_OBJECT_TYPE ||
+ map->instance_type() == WASM_INSTANCE_OBJECT_TYPE ||
+ map->instance_type() == WASM_MEMORY_OBJECT_TYPE ||
+ map->instance_type() == WASM_MODULE_OBJECT_TYPE ||
+ map->instance_type() == WASM_TABLE_OBJECT_TYPE ||
map->instance_type() == JS_SPECIAL_API_OBJECT_TYPE);
DCHECK(site.is_null() || AllocationSite::CanTrack(map->instance_type()));
int object_size = map->instance_size();
int adjusted_object_size =
site.is_null() ? object_size : object_size + AllocationMemento::kSize;
- HeapObject raw_clone = isolate()->heap()->AllocateRawWithRetryOrFail(
+ HeapObject raw_clone = isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(
adjusted_object_size, AllocationType::kYoung);
DCHECK(Heap::InYoungGeneration(raw_clone) || FLAG_single_generation);
@@ -2062,6 +2035,13 @@ void initialize_length<PropertyArray>(Handle<PropertyArray> array, int length) {
array->initialize_length(length);
}
+inline void ZeroEmbedderFields(i::Handle<i::JSObject> obj) {
+ auto count = obj->GetEmbedderFieldCount();
+ for (int i = 0; i < count; i++) {
+ obj->SetEmbedderField(i, Smi::kZero);
+ }
+}
+
} // namespace
template <typename T>
@@ -2107,15 +2087,14 @@ Handle<FixedArray> Factory::CopyFixedArrayWithMap(Handle<FixedArray> array,
}
Handle<FixedArray> Factory::CopyFixedArrayAndGrow(Handle<FixedArray> array,
- int grow_by,
- AllocationType allocation) {
- return CopyArrayAndGrow(array, grow_by, allocation);
+ int grow_by) {
+ return CopyArrayAndGrow(array, grow_by, AllocationType::kYoung);
}
Handle<WeakFixedArray> Factory::CopyWeakFixedArrayAndGrow(
- Handle<WeakFixedArray> src, int grow_by, AllocationType allocation) {
+ Handle<WeakFixedArray> src, int grow_by) {
DCHECK(!src->IsTransitionArray()); // Compacted by GC, this code doesn't work
- return CopyArrayAndGrow(src, grow_by, allocation);
+ return CopyArrayAndGrow(src, grow_by, AllocationType::kOld);
}
Handle<WeakArrayList> Factory::CopyWeakArrayListAndGrow(
@@ -2142,8 +2121,8 @@ Handle<WeakArrayList> Factory::CopyWeakArrayListAndGrow(
}
Handle<PropertyArray> Factory::CopyPropertyArrayAndGrow(
- Handle<PropertyArray> array, int grow_by, AllocationType allocation) {
- return CopyArrayAndGrow(array, grow_by, allocation);
+ Handle<PropertyArray> array, int grow_by) {
+ return CopyArrayAndGrow(array, grow_by, AllocationType::kYoung);
}
Handle<FixedArray> Factory::CopyFixedArrayUpTo(Handle<FixedArray> array,
@@ -2187,8 +2166,8 @@ Handle<FixedDoubleArray> Factory::CopyFixedDoubleArray(
Handle<FixedDoubleArray> array) {
int len = array->length();
if (len == 0) return array;
- Handle<FixedDoubleArray> result = Handle<FixedDoubleArray>::cast(
- NewFixedDoubleArray(len, AllocationType::kYoung));
+ Handle<FixedDoubleArray> result =
+ Handle<FixedDoubleArray>::cast(NewFixedDoubleArray(len));
Heap::CopyBlock(
result->address() + FixedDoubleArray::kLengthOffset,
array->address() + FixedDoubleArray::kLengthOffset,
@@ -2196,32 +2175,39 @@ Handle<FixedDoubleArray> Factory::CopyFixedDoubleArray(
return result;
}
-Handle<Object> Factory::NewNumber(double value, AllocationType allocation) {
+template <AllocationType allocation>
+Handle<Object> Factory::NewNumber(double value) {
// Materialize as a SMI if possible.
int32_t int_value;
if (DoubleToSmiInteger(value, &int_value)) {
return handle(Smi::FromInt(int_value), isolate());
}
- return NewHeapNumber(value, allocation);
+ return NewHeapNumber<allocation>(value);
}
-Handle<Object> Factory::NewNumberFromInt(int32_t value,
- AllocationType allocation) {
+template Handle<Object> V8_EXPORT_PRIVATE
+Factory::NewNumber<AllocationType::kYoung>(double);
+template Handle<Object> V8_EXPORT_PRIVATE
+Factory::NewNumber<AllocationType::kOld>(double);
+template Handle<Object> V8_EXPORT_PRIVATE
+Factory::NewNumber<AllocationType::kReadOnly>(double);
+
+Handle<Object> Factory::NewNumberFromInt(int32_t value) {
if (Smi::IsValid(value)) return handle(Smi::FromInt(value), isolate());
// Bypass NewNumber to avoid various redundant checks.
- return NewHeapNumber(FastI2D(value), allocation);
+ return NewHeapNumber(FastI2D(value));
}
-Handle<Object> Factory::NewNumberFromUint(uint32_t value,
- AllocationType allocation) {
+Handle<Object> Factory::NewNumberFromUint(uint32_t value) {
int32_t int32v = static_cast<int32_t>(value);
if (int32v >= 0 && Smi::IsValid(int32v)) {
return handle(Smi::FromInt(int32v), isolate());
}
- return NewHeapNumber(FastUI2D(value), allocation);
+ return NewHeapNumber(FastUI2D(value));
}
-Handle<HeapNumber> Factory::NewHeapNumber(AllocationType allocation) {
+template <AllocationType allocation>
+Handle<HeapNumber> Factory::NewHeapNumber() {
STATIC_ASSERT(HeapNumber::kSize <= kMaxRegularHeapObjectSize);
Map map = *heap_number_map();
HeapObject result = AllocateRawWithImmortalMap(HeapNumber::kSize, allocation,
@@ -2229,10 +2215,17 @@ Handle<HeapNumber> Factory::NewHeapNumber(AllocationType allocation) {
return handle(HeapNumber::cast(result), isolate());
}
+template Handle<HeapNumber> V8_EXPORT_PRIVATE
+Factory::NewHeapNumber<AllocationType::kYoung>();
+template Handle<HeapNumber> V8_EXPORT_PRIVATE
+Factory::NewHeapNumber<AllocationType::kOld>();
+template Handle<HeapNumber> V8_EXPORT_PRIVATE
+Factory::NewHeapNumber<AllocationType::kReadOnly>();
+
Handle<HeapNumber> Factory::NewHeapNumberForCodeAssembler(double value) {
- return NewHeapNumber(value, isolate()->heap()->CanAllocateInReadOnlySpace()
- ? AllocationType::kReadOnly
- : AllocationType::kOld);
+ return isolate()->heap()->CanAllocateInReadOnlySpace()
+ ? NewHeapNumber<AllocationType::kReadOnly>(value)
+ : NewHeapNumber<AllocationType::kOld>(value);
}
Handle<FreshlyAllocatedBigInt> Factory::NewBigInt(int length,
@@ -2299,8 +2292,8 @@ Handle<Object> Factory::NewInvalidStringLengthError() {
FATAL("Aborting on invalid string length");
}
// Invalidate the "string length" protector.
- if (isolate()->IsStringLengthOverflowIntact()) {
- isolate()->InvalidateStringLengthOverflowProtector();
+ if (Protectors::IsStringLengthOverflowLookupChainIntact(isolate())) {
+ Protectors::InvalidateStringLengthOverflowLookupChain(isolate());
}
return NewRangeError(MessageTemplate::kInvalidStringLength);
}
@@ -2412,7 +2405,7 @@ Handle<JSFunction> Factory::NewFunction(const NewFunctionArgs& args) {
case JS_ARRAY_TYPE:
elements_kind = PACKED_SMI_ELEMENTS;
break;
- case JS_ARGUMENTS_TYPE:
+ case JS_ARGUMENTS_OBJECT_TYPE:
elements_kind = PACKED_ELEMENTS;
break;
default:
@@ -2679,8 +2672,8 @@ Handle<Code> Factory::CopyCode(Handle<Code> code) {
{
int obj_size = code->Size();
CodePageCollectionMemoryModificationScope code_allocation(heap);
- HeapObject result =
- heap->AllocateRawWithRetryOrFail(obj_size, AllocationType::kCode);
+ HeapObject result = heap->AllocateRawWith<Heap::kRetryOrFail>(
+ obj_size, AllocationType::kCode);
// Copy code object.
Address old_addr = code->address();
@@ -2696,7 +2689,9 @@ Handle<Code> Factory::CopyCode(Handle<Code> code) {
// allocation is on.
heap->incremental_marking()->ProcessBlackAllocatedObject(*new_code);
// Record all references to embedded objects in the new code object.
+#ifndef V8_DISABLE_WRITE_BARRIERS
WriteBarrierForCode(*new_code);
+#endif
}
#ifdef VERIFY_HEAP
@@ -2737,9 +2732,8 @@ Handle<JSObject> Factory::NewJSObject(Handle<JSFunction> constructor,
return NewJSObjectFromMap(map, allocation);
}
-Handle<JSObject> Factory::NewJSObjectWithNullProto(AllocationType allocation) {
- Handle<JSObject> result =
- NewJSObject(isolate()->object_function(), allocation);
+Handle<JSObject> Factory::NewJSObjectWithNullProto() {
+ Handle<JSObject> result = NewJSObject(isolate()->object_function());
Handle<Map> new_map = Map::Copy(
isolate(), Handle<Map>(result->map(), isolate()), "ObjectWithNullProto");
Map::SetPrototype(isolate(), new_map, null_value());
@@ -2776,7 +2770,7 @@ Handle<JSGlobalObject> Factory::NewJSGlobalObject(
// The global object might be created from an object template with accessors.
// Fill these accessors into the dictionary.
Handle<DescriptorArray> descs(map->instance_descriptors(), isolate());
- for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
+ for (InternalIndex i : InternalIndex::Range(map->NumberOfOwnDescriptors())) {
PropertyDetails details = descs->GetDetails(i);
// Only accessors are expected.
DCHECK_EQ(kAccessor, details.kind());
@@ -2888,13 +2882,14 @@ Handle<JSObject> Factory::NewSlowJSObjectFromMap(
Handle<JSObject> Factory::NewSlowJSObjectWithPropertiesAndElements(
Handle<HeapObject> prototype, Handle<NameDictionary> properties,
- Handle<FixedArrayBase> elements, AllocationType allocation) {
+ Handle<FixedArrayBase> elements) {
Handle<Map> object_map = isolate()->slow_object_with_object_prototype_map();
if (object_map->prototype() != *prototype) {
object_map = Map::TransitionToPrototype(isolate(), object_map, prototype);
}
DCHECK(object_map->is_dictionary_map());
- Handle<JSObject> object = NewJSObjectFromMap(object_map, allocation);
+ Handle<JSObject> object =
+ NewJSObjectFromMap(object_map, AllocationType::kYoung);
object->set_raw_properties_or_hash(*properties);
if (*elements != ReadOnlyRoots(isolate()).empty_fixed_array()) {
DCHECK(elements->IsNumberDictionary());
@@ -3010,7 +3005,7 @@ Handle<JSModuleNamespace> Factory::NewJSModuleNamespace() {
Handle<JSModuleNamespace> module_namespace(
Handle<JSModuleNamespace>::cast(NewJSObjectFromMap(map)));
FieldIndex index = FieldIndex::ForDescriptor(
- *map, JSModuleNamespace::kToStringTagFieldIndex);
+ *map, InternalIndex(JSModuleNamespace::kToStringTagFieldIndex));
module_namespace->FastPropertyAtPut(index,
ReadOnlyRoots(isolate()).Module_string());
return module_namespace;
@@ -3042,6 +3037,7 @@ Handle<SourceTextModule> Factory::NewSourceTextModule(
Handle<FixedArray> requested_modules =
requested_modules_length > 0 ? NewFixedArray(requested_modules_length)
: empty_fixed_array();
+ Handle<ArrayList> async_parent_modules = ArrayList::New(isolate(), 0);
ReadOnlyRoots roots(isolate());
Handle<SourceTextModule> module(
@@ -3061,6 +3057,12 @@ Handle<SourceTextModule> Factory::NewSourceTextModule(
module->set_import_meta(roots.the_hole_value());
module->set_dfs_index(-1);
module->set_dfs_ancestor_index(-1);
+ module->set_top_level_capability(roots.undefined_value());
+ module->set_flags(0);
+ module->set_async(IsAsyncModule(code->kind()));
+ module->set_async_evaluating(false);
+ module->set_async_parent_modules(*async_parent_modules);
+ module->set_pending_async_dependencies(0);
return module;
}
@@ -3086,15 +3088,43 @@ Handle<SyntheticModule> Factory::NewSyntheticModule(
return module;
}
-Handle<JSArrayBuffer> Factory::NewJSArrayBuffer(SharedFlag shared,
- AllocationType allocation) {
- Handle<JSFunction> array_buffer_fun(
- shared == SharedFlag::kShared
- ? isolate()->native_context()->shared_array_buffer_fun()
- : isolate()->native_context()->array_buffer_fun(),
+Handle<JSArrayBuffer> Factory::NewJSArrayBuffer(
+ std::shared_ptr<BackingStore> backing_store, AllocationType allocation) {
+ Handle<Map> map(isolate()->native_context()->array_buffer_fun().initial_map(),
+ isolate());
+ auto result =
+ Handle<JSArrayBuffer>::cast(NewJSObjectFromMap(map, allocation));
+ result->Setup(SharedFlag::kNotShared, std::move(backing_store));
+ return result;
+}
+
+MaybeHandle<JSArrayBuffer> Factory::NewJSArrayBufferAndBackingStore(
+ size_t byte_length, InitializedFlag initialized,
+ AllocationType allocation) {
+ std::unique_ptr<BackingStore> backing_store = nullptr;
+
+ if (byte_length > 0) {
+ backing_store = BackingStore::Allocate(isolate(), byte_length,
+ SharedFlag::kNotShared, initialized);
+ if (!backing_store) return MaybeHandle<JSArrayBuffer>();
+ }
+ Handle<Map> map(isolate()->native_context()->array_buffer_fun().initial_map(),
+ isolate());
+ auto array_buffer =
+ Handle<JSArrayBuffer>::cast(NewJSObjectFromMap(map, allocation));
+ array_buffer->Setup(SharedFlag::kNotShared, std::move(backing_store));
+ return array_buffer;
+}
+
+Handle<JSArrayBuffer> Factory::NewJSSharedArrayBuffer(
+ std::shared_ptr<BackingStore> backing_store) {
+ Handle<Map> map(
+ isolate()->native_context()->shared_array_buffer_fun().initial_map(),
isolate());
- Handle<Map> map(array_buffer_fun->initial_map(), isolate());
- return Handle<JSArrayBuffer>::cast(NewJSObjectFromMap(map, allocation));
+ auto result = Handle<JSArrayBuffer>::cast(
+ NewJSObjectFromMap(map, AllocationType::kYoung));
+ result->Setup(SharedFlag::kShared, std::move(backing_store));
+ return result;
}
Handle<JSIteratorResult> Factory::NewJSIteratorResult(Handle<Object> value,
@@ -3172,20 +3202,17 @@ void ForFixedTypedArray(ExternalArrayType array_type, size_t* element_size,
Handle<JSArrayBufferView> Factory::NewJSArrayBufferView(
Handle<Map> map, Handle<FixedArrayBase> elements,
- Handle<JSArrayBuffer> buffer, size_t byte_offset, size_t byte_length,
- AllocationType allocation) {
+ Handle<JSArrayBuffer> buffer, size_t byte_offset, size_t byte_length) {
CHECK_LE(byte_length, buffer->byte_length());
CHECK_LE(byte_offset, buffer->byte_length());
CHECK_LE(byte_offset + byte_length, buffer->byte_length());
- Handle<JSArrayBufferView> array_buffer_view =
- Handle<JSArrayBufferView>::cast(NewJSObjectFromMap(map, allocation));
+ Handle<JSArrayBufferView> array_buffer_view = Handle<JSArrayBufferView>::cast(
+ NewJSObjectFromMap(map, AllocationType::kYoung));
array_buffer_view->set_elements(*elements);
array_buffer_view->set_buffer(*buffer);
array_buffer_view->set_byte_offset(byte_offset);
array_buffer_view->set_byte_length(byte_length);
- for (int i = 0; i < v8::ArrayBufferView::kEmbedderFieldCount; i++) {
- array_buffer_view->SetEmbedderField(i, Smi::kZero);
- }
+ ZeroEmbedderFields(array_buffer_view);
DCHECK_EQ(array_buffer_view->GetEmbedderFieldCount(),
v8::ArrayBufferView::kEmbedderFieldCount);
return array_buffer_view;
@@ -3193,8 +3220,8 @@ Handle<JSArrayBufferView> Factory::NewJSArrayBufferView(
Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type,
Handle<JSArrayBuffer> buffer,
- size_t byte_offset, size_t length,
- AllocationType allocation) {
+ size_t byte_offset,
+ size_t length) {
size_t element_size;
ElementsKind elements_kind;
ForFixedTypedArray(type, &element_size, &elements_kind);
@@ -3219,24 +3246,21 @@ Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type,
default:
UNREACHABLE();
}
- Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(
- NewJSArrayBufferView(map, empty_byte_array(), buffer, byte_offset,
- byte_length, allocation));
+ Handle<JSTypedArray> typed_array =
+ Handle<JSTypedArray>::cast(NewJSArrayBufferView(
+ map, empty_byte_array(), buffer, byte_offset, byte_length));
typed_array->set_length(length);
- typed_array->set_external_pointer(
- reinterpret_cast<byte*>(buffer->backing_store()) + byte_offset);
- typed_array->set_base_pointer(Smi::kZero);
+ typed_array->SetOffHeapDataPtr(buffer->backing_store(), byte_offset);
return typed_array;
}
Handle<JSDataView> Factory::NewJSDataView(Handle<JSArrayBuffer> buffer,
size_t byte_offset,
- size_t byte_length,
- AllocationType allocation) {
+ size_t byte_length) {
Handle<Map> map(isolate()->native_context()->data_view_fun().initial_map(),
isolate());
Handle<JSDataView> obj = Handle<JSDataView>::cast(NewJSArrayBufferView(
- map, empty_fixed_array(), buffer, byte_offset, byte_length, allocation));
+ map, empty_fixed_array(), buffer, byte_offset, byte_length));
obj->set_data_pointer(static_cast<uint8_t*>(buffer->backing_store()) +
byte_offset);
return obj;
@@ -3499,11 +3523,6 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
share->clear_padding();
}
- // Link into the list.
- Handle<WeakArrayList> noscript_list = noscript_shared_function_infos();
- noscript_list = WeakArrayList::AddToEnd(isolate(), noscript_list,
- MaybeObjectHandle::Weak(share));
- isolate()->heap()->set_noscript_shared_function_infos(*noscript_list);
#ifdef VERIFY_HEAP
share->SharedFunctionInfoVerify(isolate());
@@ -3894,6 +3913,9 @@ void Factory::SetRegExpIrregexpData(Handle<JSRegExp> regexp,
JSRegExp::Flags flags, int capture_count) {
Handle<FixedArray> store = NewFixedArray(JSRegExp::kIrregexpDataSize);
Smi uninitialized = Smi::FromInt(JSRegExp::kUninitializedValue);
+ Smi ticks_until_tier_up = FLAG_regexp_tier_up
+ ? Smi::FromInt(FLAG_regexp_tier_up_ticks)
+ : uninitialized;
store->set(JSRegExp::kTagIndex, Smi::FromInt(type));
store->set(JSRegExp::kSourceIndex, *source);
store->set(JSRegExp::kFlagsIndex, Smi::FromInt(flags));
@@ -3904,7 +3926,7 @@ void Factory::SetRegExpIrregexpData(Handle<JSRegExp> regexp,
store->set(JSRegExp::kIrregexpMaxRegisterCountIndex, Smi::kZero);
store->set(JSRegExp::kIrregexpCaptureCountIndex, Smi::FromInt(capture_count));
store->set(JSRegExp::kIrregexpCaptureNameMapIndex, uninitialized);
- store->set(JSRegExp::kIrregexpTierUpTicksIndex, Smi::kZero);
+ store->set(JSRegExp::kIrregexpTicksUntilTierUpIndex, ticks_until_tier_up);
regexp->set_data(*store);
}
@@ -4141,19 +4163,18 @@ Handle<Map> Factory::CreateClassFunctionMap(Handle<JSFunction> empty_function) {
return map;
}
-Handle<JSPromise> Factory::NewJSPromiseWithoutHook(AllocationType allocation) {
- Handle<JSPromise> promise = Handle<JSPromise>::cast(
- NewJSObject(isolate()->promise_function(), allocation));
+Handle<JSPromise> Factory::NewJSPromiseWithoutHook() {
+ Handle<JSPromise> promise =
+ Handle<JSPromise>::cast(NewJSObject(isolate()->promise_function()));
promise->set_reactions_or_result(Smi::kZero);
promise->set_flags(0);
- for (int i = 0; i < v8::Promise::kEmbedderFieldCount; i++) {
- promise->SetEmbedderField(i, Smi::kZero);
- }
+ ZeroEmbedderFields(promise);
+ DCHECK_EQ(promise->GetEmbedderFieldCount(), v8::Promise::kEmbedderFieldCount);
return promise;
}
-Handle<JSPromise> Factory::NewJSPromise(AllocationType allocation) {
- Handle<JSPromise> promise = NewJSPromiseWithoutHook(allocation);
+Handle<JSPromise> Factory::NewJSPromise() {
+ Handle<JSPromise> promise = NewJSPromiseWithoutHook();
isolate()->RunPromiseHook(PromiseHookType::kInit, promise, undefined_value());
return promise;
}
diff --git a/deps/v8/src/heap/factory.h b/deps/v8/src/heap/factory.h
index 1e47926e8e..35de6425c9 100644
--- a/deps/v8/src/heap/factory.h
+++ b/deps/v8/src/heap/factory.h
@@ -74,7 +74,8 @@ class WeakCell;
struct SourceRange;
template <typename T>
class ZoneVector;
-enum class SharedFlag : uint32_t;
+enum class SharedFlag : uint8_t;
+enum class InitializedFlag : uint8_t;
enum FunctionMode {
kWithNameBit = 1 << 0,
@@ -107,14 +108,12 @@ enum FunctionMode {
// Interface for handle based allocation.
class V8_EXPORT_PRIVATE Factory {
public:
- Handle<Oddball> NewOddball(
- Handle<Map> map, const char* to_string, Handle<Object> to_number,
- const char* type_of, byte kind,
- AllocationType allocation = AllocationType::kReadOnly);
+ Handle<Oddball> NewOddball(Handle<Map> map, const char* to_string,
+ Handle<Object> to_number, const char* type_of,
+ byte kind);
// Marks self references within code generation.
- Handle<Oddball> NewSelfReferenceMarker(
- AllocationType allocation = AllocationType::kOld);
+ Handle<Oddball> NewSelfReferenceMarker();
// Allocates a fixed array-like object with given map and initialized with
// undefined values.
@@ -140,8 +139,7 @@ class V8_EXPORT_PRIVATE Factory {
int length, AllocationType allocation = AllocationType::kYoung);
// Allocates a property array initialized with undefined values.
- Handle<PropertyArray> NewPropertyArray(
- int length, AllocationType allocation = AllocationType::kYoung);
+ Handle<PropertyArray> NewPropertyArray(int length);
// Tries allocating a fixed array initialized with undefined values.
// In case of an allocation failure (OOM) an empty handle is returned.
// The caller has to manually signal an
@@ -156,24 +154,20 @@ class V8_EXPORT_PRIVATE Factory {
int length, AllocationType allocation = AllocationType::kYoung);
// Allocates an uninitialized fixed array. It must be filled by the caller.
- Handle<FixedArray> NewUninitializedFixedArray(
- int length, AllocationType allocation = AllocationType::kYoung);
+ Handle<FixedArray> NewUninitializedFixedArray(int length);
// Allocates a closure feedback cell array whose feedback cells are
// initialized with undefined values.
- Handle<ClosureFeedbackCellArray> NewClosureFeedbackCellArray(
- int num_slots, AllocationType allocation = AllocationType::kYoung);
+ Handle<ClosureFeedbackCellArray> NewClosureFeedbackCellArray(int num_slots);
// Allocates a feedback vector whose slots are initialized with undefined
// values.
Handle<FeedbackVector> NewFeedbackVector(
Handle<SharedFunctionInfo> shared,
- Handle<ClosureFeedbackCellArray> closure_feedback_cell_array,
- AllocationType allocation = AllocationType::kYoung);
+ Handle<ClosureFeedbackCellArray> closure_feedback_cell_array);
// Allocates a clean embedder data array with given capacity.
- Handle<EmbedderDataArray> NewEmbedderDataArray(
- int length, AllocationType allocation = AllocationType::kYoung);
+ Handle<EmbedderDataArray> NewEmbedderDataArray(int length);
// Allocates a fixed array for name-value pairs of boilerplate properties and
// calculates the number of properties we need to store in the backing store.
@@ -183,20 +177,17 @@ class V8_EXPORT_PRIVATE Factory {
// Allocate a new uninitialized fixed double array.
// The function returns a pre-allocated empty fixed array for length = 0,
// so the return type must be the general fixed array class.
- Handle<FixedArrayBase> NewFixedDoubleArray(
- int length, AllocationType allocation = AllocationType::kYoung);
+ Handle<FixedArrayBase> NewFixedDoubleArray(int length);
// Allocate a new fixed double array with hole values.
- Handle<FixedArrayBase> NewFixedDoubleArrayWithHoles(
- int size, AllocationType allocation = AllocationType::kYoung);
+ Handle<FixedArrayBase> NewFixedDoubleArrayWithHoles(int size);
// Allocates a FeedbackMedata object and zeroes the data section.
Handle<FeedbackMetadata> NewFeedbackMetadata(
int slot_count, int feedback_cell_count,
AllocationType allocation = AllocationType::kOld);
- Handle<FrameArray> NewFrameArray(
- int number_of_frames, AllocationType allocation = AllocationType::kYoung);
+ Handle<FrameArray> NewFrameArray(int number_of_frames);
Handle<OrderedHashSet> NewOrderedHashSet();
Handle<OrderedHashMap> NewOrderedHashMap();
@@ -223,10 +214,6 @@ class V8_EXPORT_PRIVATE Factory {
Handle<Tuple2> NewTuple2(Handle<Object> value1, Handle<Object> value2,
AllocationType allocation);
- // Create a new Tuple3 struct.
- Handle<Tuple3> NewTuple3(Handle<Object> value1, Handle<Object> value2,
- Handle<Object> value3, AllocationType allocation);
-
// Create a new ArrayBoilerplateDescription struct.
Handle<ArrayBoilerplateDescription> NewArrayBoilerplateDescription(
ElementsKind elements_kind, Handle<FixedArrayBase> constant_values);
@@ -451,11 +438,8 @@ class V8_EXPORT_PRIVATE Factory {
Handle<AccessorInfo> NewAccessorInfo();
- Handle<Script> NewScript(Handle<String> source,
- AllocationType allocation = AllocationType::kOld);
- Handle<Script> NewScriptWithId(
- Handle<String> source, int script_id,
- AllocationType allocation = AllocationType::kOld);
+ Handle<Script> NewScript(Handle<String> source);
+ Handle<Script> NewScriptWithId(Handle<String> source, int script_id);
Handle<Script> CloneScript(Handle<Script> script);
Handle<BreakPointInfo> NewBreakPointInfo(int source_position);
@@ -479,8 +463,7 @@ class V8_EXPORT_PRIVATE Factory {
Handle<JSReceiver> thenable, Handle<Context> context);
// Foreign objects are pretenured when allocated by the bootstrapper.
- Handle<Foreign> NewForeign(
- Address addr, AllocationType allocation = AllocationType::kYoung);
+ Handle<Foreign> NewForeign(Address addr);
Handle<ByteArray> NewByteArray(
int length, AllocationType allocation = AllocationType::kYoung);
@@ -498,9 +481,8 @@ class V8_EXPORT_PRIVATE Factory {
Handle<FeedbackCell> NewOneClosureCell(Handle<HeapObject> value);
Handle<FeedbackCell> NewManyClosuresCell(Handle<HeapObject> value);
- Handle<DescriptorArray> NewDescriptorArray(
- int number_of_entries, int slack = 0,
- AllocationType allocation = AllocationType::kYoung);
+ Handle<DescriptorArray> NewDescriptorArray(int number_of_entries,
+ int slack = 0);
Handle<TransitionArray> NewTransitionArray(int number_of_transitions,
int slack = 0);
@@ -537,21 +519,18 @@ class V8_EXPORT_PRIVATE Factory {
Handle<FixedArray> CopyFixedArrayWithMap(Handle<FixedArray> array,
Handle<Map> map);
- Handle<FixedArray> CopyFixedArrayAndGrow(
- Handle<FixedArray> array, int grow_by,
- AllocationType allocation = AllocationType::kYoung);
+ Handle<FixedArray> CopyFixedArrayAndGrow(Handle<FixedArray> array,
+ int grow_by);
- Handle<WeakFixedArray> CopyWeakFixedArrayAndGrow(
- Handle<WeakFixedArray> array, int grow_by,
- AllocationType allocation = AllocationType::kYoung);
+ Handle<WeakFixedArray> CopyWeakFixedArrayAndGrow(Handle<WeakFixedArray> array,
+ int grow_by);
Handle<WeakArrayList> CopyWeakArrayListAndGrow(
Handle<WeakArrayList> array, int grow_by,
AllocationType allocation = AllocationType::kYoung);
- Handle<PropertyArray> CopyPropertyArrayAndGrow(
- Handle<PropertyArray> array, int grow_by,
- AllocationType allocation = AllocationType::kYoung);
+ Handle<PropertyArray> CopyPropertyArrayAndGrow(Handle<PropertyArray> array,
+ int grow_by);
Handle<FixedArray> CopyFixedArrayUpTo(
Handle<FixedArray> array, int new_len,
@@ -567,32 +546,28 @@ class V8_EXPORT_PRIVATE Factory {
// Numbers (e.g. literals) are pretenured by the parser.
// The return value may be a smi or a heap number.
- Handle<Object> NewNumber(double value,
- AllocationType allocation = AllocationType::kYoung);
-
- Handle<Object> NewNumberFromInt(
- int32_t value, AllocationType allocation = AllocationType::kYoung);
- Handle<Object> NewNumberFromUint(
- uint32_t value, AllocationType allocation = AllocationType::kYoung);
- inline Handle<Object> NewNumberFromSize(
- size_t value, AllocationType allocation = AllocationType::kYoung);
- inline Handle<Object> NewNumberFromInt64(
- int64_t value, AllocationType allocation = AllocationType::kYoung);
- inline Handle<HeapNumber> NewHeapNumber(
- double value, AllocationType allocation = AllocationType::kYoung);
- inline Handle<HeapNumber> NewHeapNumberFromBits(
- uint64_t bits, AllocationType allocation = AllocationType::kYoung);
+ template <AllocationType allocation = AllocationType::kYoung>
+ EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
+ Handle<Object> NewNumber(double value);
+ Handle<Object> NewNumberFromInt(int32_t value);
+ Handle<Object> NewNumberFromUint(uint32_t value);
+ inline Handle<Object> NewNumberFromSize(size_t value);
+ inline Handle<Object> NewNumberFromInt64(int64_t value);
+ template <AllocationType allocation = AllocationType::kYoung>
+ inline Handle<HeapNumber> NewHeapNumber(double value);
+ template <AllocationType allocation = AllocationType::kYoung>
+ inline Handle<HeapNumber> NewHeapNumberFromBits(uint64_t bits);
// Creates heap number object with not yet set value field.
- Handle<HeapNumber> NewHeapNumber(
- AllocationType allocation = AllocationType::kYoung);
+ template <AllocationType allocation = AllocationType::kYoung>
+ EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
+ Handle<HeapNumber> NewHeapNumber();
// Creates a new HeapNumber in read-only space if possible otherwise old
// space.
Handle<HeapNumber> NewHeapNumberForCodeAssembler(double value);
- inline Handle<HeapNumber> NewHeapNumberWithHoleNaN(
- AllocationType allocation = AllocationType::kYoung);
+ inline Handle<HeapNumber> NewHeapNumberWithHoleNaN();
// Allocates a new BigInt with {length} digits. Only to be used by
// MutableBigInt::New*.
@@ -609,8 +584,7 @@ class V8_EXPORT_PRIVATE Factory {
Handle<JSFunction> constructor,
AllocationType allocation = AllocationType::kYoung);
// JSObject without a prototype.
- Handle<JSObject> NewJSObjectWithNullProto(
- AllocationType allocation = AllocationType::kYoung);
+ Handle<JSObject> NewJSObjectWithNullProto();
// Global objects are pretenured and initialized based on a constructor.
Handle<JSGlobalObject> NewJSGlobalObject(Handle<JSFunction> constructor);
@@ -644,8 +618,7 @@ class V8_EXPORT_PRIVATE Factory {
// object will have dictionary elements.
Handle<JSObject> NewSlowJSObjectWithPropertiesAndElements(
Handle<HeapObject> prototype, Handle<NameDictionary> properties,
- Handle<FixedArrayBase> elements,
- AllocationType allocation = AllocationType::kYoung);
+ Handle<FixedArrayBase> elements);
// JS arrays are pretenured when allocated by the parser.
@@ -692,20 +665,27 @@ class V8_EXPORT_PRIVATE Factory {
v8::Module::SyntheticModuleEvaluationSteps evaluation_steps);
Handle<JSArrayBuffer> NewJSArrayBuffer(
- SharedFlag shared, AllocationType allocation = AllocationType::kYoung);
+ std::shared_ptr<BackingStore> backing_store,
+ AllocationType allocation = AllocationType::kYoung);
+
+ MaybeHandle<JSArrayBuffer> NewJSArrayBufferAndBackingStore(
+ size_t byte_length, InitializedFlag initialized,
+ AllocationType allocation = AllocationType::kYoung);
+
+ Handle<JSArrayBuffer> NewJSSharedArrayBuffer(
+ std::shared_ptr<BackingStore> backing_store);
static void TypeAndSizeForElementsKind(ElementsKind kind,
ExternalArrayType* array_type,
size_t* element_size);
// Creates a new JSTypedArray with the specified buffer.
- Handle<JSTypedArray> NewJSTypedArray(
- ExternalArrayType type, Handle<JSArrayBuffer> buffer, size_t byte_offset,
- size_t length, AllocationType allocation = AllocationType::kYoung);
+ Handle<JSTypedArray> NewJSTypedArray(ExternalArrayType type,
+ Handle<JSArrayBuffer> buffer,
+ size_t byte_offset, size_t length);
- Handle<JSDataView> NewJSDataView(
- Handle<JSArrayBuffer> buffer, size_t byte_offset, size_t byte_length,
- AllocationType allocation = AllocationType::kYoung);
+ Handle<JSDataView> NewJSDataView(Handle<JSArrayBuffer> buffer,
+ size_t byte_offset, size_t byte_length);
Handle<JSIteratorResult> NewJSIteratorResult(Handle<Object> value, bool done);
Handle<JSAsyncFromSyncIterator> NewJSAsyncFromSyncIterator(
@@ -922,10 +902,8 @@ class V8_EXPORT_PRIVATE Factory {
// Converts the given ToPrimitive hint to it's string representation.
Handle<String> ToPrimitiveHintString(ToPrimitiveHint hint);
- Handle<JSPromise> NewJSPromiseWithoutHook(
- AllocationType allocation = AllocationType::kYoung);
- Handle<JSPromise> NewJSPromise(
- AllocationType allocation = AllocationType::kYoung);
+ Handle<JSPromise> NewJSPromiseWithoutHook();
+ Handle<JSPromise> NewJSPromise();
Handle<CallHandlerInfo> NewCallHandlerInfo(bool has_no_side_effect = false);
@@ -1034,8 +1012,7 @@ class V8_EXPORT_PRIVATE Factory {
Handle<JSArrayBufferView> NewJSArrayBufferView(
Handle<Map> map, Handle<FixedArrayBase> elements,
- Handle<JSArrayBuffer> buffer, size_t byte_offset, size_t byte_length,
- AllocationType allocation);
+ Handle<JSArrayBuffer> buffer, size_t byte_offset, size_t byte_length);
// Allocate memory for an uninitialized array (e.g., a FixedArray or similar).
HeapObject AllocateRawArray(int size, AllocationType allocation);
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index da803f3339..56f3590b8a 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -111,10 +111,6 @@ void Heap::SetRootStringTable(StringTable value) {
roots_table()[RootIndex::kStringTable] = value.ptr();
}
-void Heap::SetRootNoScriptSharedFunctionInfos(Object value) {
- roots_table()[RootIndex::kNoScriptSharedFunctionInfos] = value.ptr();
-}
-
void Heap::SetMessageListeners(TemplateList value) {
roots_table()[RootIndex::kMessageListeners] = value.ptr();
}
@@ -163,7 +159,7 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
AllocationAlignment alignment) {
DCHECK(AllowHandleAllocation::IsAllowed());
DCHECK(AllowHeapAllocation::IsAllowed());
- DCHECK(gc_state_ == NOT_IN_GC);
+ DCHECK_EQ(gc_state_, NOT_IN_GC);
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
if (!always_allocate() && Heap::allocation_timeout_-- <= 0) {
@@ -180,8 +176,9 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
HeapObject object;
AllocationResult allocation;
- if (FLAG_single_generation && type == AllocationType::kYoung)
+ if (FLAG_single_generation && type == AllocationType::kYoung) {
type = AllocationType::kOld;
+ }
if (AllocationType::kYoung == type) {
if (large_object) {
@@ -212,9 +209,7 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
} else if (AllocationType::kMap == type) {
allocation = map_space_->AllocateRawUnaligned(size_in_bytes);
} else if (AllocationType::kReadOnly == type) {
-#ifdef V8_USE_SNAPSHOT
DCHECK(isolate_->serializer_enabled());
-#endif
DCHECK(!large_object);
DCHECK(CanAllocateInReadOnlySpace());
DCHECK_EQ(AllocationOrigin::kRuntime, origin);
@@ -242,6 +237,40 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
return allocation;
}
+template <Heap::AllocationRetryMode mode>
+HeapObject Heap::AllocateRawWith(int size, AllocationType allocation,
+ AllocationOrigin origin,
+ AllocationAlignment alignment) {
+ DCHECK(AllowHandleAllocation::IsAllowed());
+ DCHECK(AllowHeapAllocation::IsAllowed());
+ DCHECK_EQ(gc_state_, NOT_IN_GC);
+ Heap* heap = isolate()->heap();
+ Address* top = heap->NewSpaceAllocationTopAddress();
+ Address* limit = heap->NewSpaceAllocationLimitAddress();
+ if (allocation == AllocationType::kYoung &&
+ alignment == AllocationAlignment::kWordAligned &&
+ size <= kMaxRegularHeapObjectSize &&
+ (*limit - *top >= static_cast<unsigned>(size)) &&
+ V8_LIKELY(!FLAG_single_generation && FLAG_inline_new &&
+ FLAG_gc_interval == 0)) {
+ DCHECK(IsAligned(size, kTaggedSize));
+ HeapObject obj = HeapObject::FromAddress(*top);
+ *top += size;
+ heap->CreateFillerObjectAt(obj.address(), size, ClearRecordedSlots::kNo);
+ MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size);
+ return obj;
+ }
+ switch (mode) {
+ case kLightRetry:
+ return AllocateRawWithLightRetrySlowPath(size, allocation, origin,
+ alignment);
+ case kRetryOrFail:
+ return AllocateRawWithRetryOrFailSlowPath(size, allocation, origin,
+ alignment);
+ }
+ UNREACHABLE();
+}
+
void Heap::OnAllocationEvent(HeapObject object, int size_in_bytes) {
for (auto& tracker : allocation_trackers_) {
tracker->AllocationEvent(object.address(), size_in_bytes);
diff --git a/deps/v8/src/heap/heap-write-barrier-inl.h b/deps/v8/src/heap/heap-write-barrier-inl.h
index 5687284b1e..a0d9902006 100644
--- a/deps/v8/src/heap/heap-write-barrier-inl.h
+++ b/deps/v8/src/heap/heap-write-barrier-inl.h
@@ -212,6 +212,7 @@ inline void MarkingBarrierForDescriptorArray(Heap* heap, HeapObject host,
inline WriteBarrierMode GetWriteBarrierModeForObject(
HeapObject object, const DisallowHeapAllocation* promise) {
+ if (FLAG_disable_write_barriers) return SKIP_WRITE_BARRIER;
DCHECK(Heap_PageFlagsAreConsistent(object));
heap_internals::MemoryChunk* chunk =
heap_internals::MemoryChunk::FromHeapObject(object);
@@ -221,6 +222,9 @@ inline WriteBarrierMode GetWriteBarrierModeForObject(
}
inline bool ObjectInYoungGeneration(Object object) {
+ // TODO(rong): Fix caller of this function when we deploy
+ // v8_use_third_party_heap.
+ if (FLAG_single_generation) return false;
if (object.IsSmi()) return false;
return heap_internals::MemoryChunk::FromHeapObject(HeapObject::cast(object))
->InYoungGeneration();
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index ff3b34cfb4..45b2273c50 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -39,6 +39,7 @@
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/mark-compact.h"
+#include "src/heap/memory-measurement.h"
#include "src/heap/memory-reducer.h"
#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h"
@@ -47,7 +48,6 @@
#include "src/heap/remembered-set.h"
#include "src/heap/scavenge-job.h"
#include "src/heap/scavenger-inl.h"
-#include "src/heap/store-buffer.h"
#include "src/heap/stress-marking-observer.h"
#include "src/heap/stress-scavenge-observer.h"
#include "src/heap/sweeper.h"
@@ -913,23 +913,6 @@ void Heap::RemoveAllocationObserversFromAllSpaces(
}
}
-class Heap::SkipStoreBufferScope {
- public:
- explicit SkipStoreBufferScope(StoreBuffer* store_buffer)
- : store_buffer_(store_buffer) {
- store_buffer_->MoveAllEntriesToRememberedSet();
- store_buffer_->SetMode(StoreBuffer::IN_GC);
- }
-
- ~SkipStoreBufferScope() {
- DCHECK(store_buffer_->Empty());
- store_buffer_->SetMode(StoreBuffer::NOT_IN_GC);
- }
-
- private:
- StoreBuffer* store_buffer_;
-};
-
namespace {
inline bool MakePretenureDecision(
AllocationSite site, AllocationSite::PretenureDecision current_decision,
@@ -1965,44 +1948,40 @@ bool Heap::PerformGarbageCollection(
size_t start_young_generation_size =
Heap::new_space()->Size() + new_lo_space()->SizeOfObjects();
- {
- Heap::SkipStoreBufferScope skip_store_buffer_scope(store_buffer_.get());
-
- switch (collector) {
- case MARK_COMPACTOR:
- UpdateOldGenerationAllocationCounter();
- // Perform mark-sweep with optional compaction.
- MarkCompact();
- old_generation_size_configured_ = true;
- // This should be updated before PostGarbageCollectionProcessing, which
- // can cause another GC. Take into account the objects promoted during
- // GC.
- old_generation_allocation_counter_at_last_gc_ +=
- static_cast<size_t>(promoted_objects_size_);
- old_generation_size_at_last_gc_ = OldGenerationSizeOfObjects();
- break;
- case MINOR_MARK_COMPACTOR:
- MinorMarkCompact();
- break;
- case SCAVENGER:
- if ((fast_promotion_mode_ &&
- CanExpandOldGeneration(new_space()->Size() +
- new_lo_space()->Size()))) {
- tracer()->NotifyYoungGenerationHandling(
- YoungGenerationHandling::kFastPromotionDuringScavenge);
- EvacuateYoungGeneration();
- } else {
- tracer()->NotifyYoungGenerationHandling(
- YoungGenerationHandling::kRegularScavenge);
-
- Scavenge();
- }
- break;
- }
+ switch (collector) {
+ case MARK_COMPACTOR:
+ UpdateOldGenerationAllocationCounter();
+ // Perform mark-sweep with optional compaction.
+ MarkCompact();
+ old_generation_size_configured_ = true;
+ // This should be updated before PostGarbageCollectionProcessing, which
+ // can cause another GC. Take into account the objects promoted during
+ // GC.
+ old_generation_allocation_counter_at_last_gc_ +=
+ static_cast<size_t>(promoted_objects_size_);
+ old_generation_size_at_last_gc_ = OldGenerationSizeOfObjects();
+ break;
+ case MINOR_MARK_COMPACTOR:
+ MinorMarkCompact();
+ break;
+ case SCAVENGER:
+ if ((fast_promotion_mode_ &&
+ CanExpandOldGeneration(new_space()->Size() +
+ new_lo_space()->Size()))) {
+ tracer()->NotifyYoungGenerationHandling(
+ YoungGenerationHandling::kFastPromotionDuringScavenge);
+ EvacuateYoungGeneration();
+ } else {
+ tracer()->NotifyYoungGenerationHandling(
+ YoungGenerationHandling::kRegularScavenge);
- ProcessPretenuringFeedback();
+ Scavenge();
+ }
+ break;
}
+ ProcessPretenuringFeedback();
+
UpdateSurvivalStatistics(static_cast<int>(start_young_generation_size));
ConfigureInitialOldGenerationSize();
@@ -2780,12 +2759,34 @@ HeapObject Heap::AlignWithFiller(HeapObject object, int object_size,
return object;
}
-void Heap::RegisterNewArrayBuffer(JSArrayBuffer buffer) {
- ArrayBufferTracker::RegisterNew(this, buffer);
+void* Heap::AllocateExternalBackingStore(
+ const std::function<void*(size_t)>& allocate, size_t byte_length) {
+ // TODO(ulan): Perform GCs proactively based on the byte_length and
+ // the current external backing store counters.
+ void* result = allocate(byte_length);
+ if (result) return result;
+ for (int i = 0; i < 2; i++) {
+ CollectGarbage(OLD_SPACE, GarbageCollectionReason::kExternalMemoryPressure);
+ result = allocate(byte_length);
+ if (result) return result;
+ }
+ isolate()->counters()->gc_last_resort_from_handles()->Increment();
+ CollectAllAvailableGarbage(GarbageCollectionReason::kExternalMemoryPressure);
+ return allocate(byte_length);
+}
+
+void Heap::RegisterBackingStore(JSArrayBuffer buffer,
+ std::shared_ptr<BackingStore> backing_store) {
+ ArrayBufferTracker::RegisterNew(this, buffer, std::move(backing_store));
}
-void Heap::UnregisterArrayBuffer(JSArrayBuffer buffer) {
- ArrayBufferTracker::Unregister(this, buffer);
+std::shared_ptr<BackingStore> Heap::UnregisterBackingStore(
+ JSArrayBuffer buffer) {
+ return ArrayBufferTracker::Unregister(this, buffer);
+}
+
+std::shared_ptr<BackingStore> Heap::LookupBackingStore(JSArrayBuffer buffer) {
+ return ArrayBufferTracker::Lookup(this, buffer);
}
void Heap::ConfigureInitialOldGenerationSize() {
@@ -3387,16 +3388,23 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation(
}
}
-void Heap::NotifyObjectLayoutChange(HeapObject object, int size,
- const DisallowHeapAllocation&) {
+void Heap::NotifyObjectLayoutChange(
+ HeapObject object, const DisallowHeapAllocation&,
+ InvalidateRecordedSlots invalidate_recorded_slots) {
if (incremental_marking()->IsMarking()) {
incremental_marking()->MarkBlackAndVisitObjectDueToLayoutChange(object);
if (incremental_marking()->IsCompacting() &&
+ invalidate_recorded_slots == InvalidateRecordedSlots::kYes &&
MayContainRecordedSlots(object)) {
MemoryChunk::FromHeapObject(object)
- ->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object, size);
+ ->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object);
}
}
+ if (invalidate_recorded_slots == InvalidateRecordedSlots::kYes &&
+ MayContainRecordedSlots(object)) {
+ MemoryChunk::FromHeapObject(object)
+ ->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object);
+ }
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
DCHECK(pending_layout_change_object_.is_null());
@@ -3684,8 +3692,7 @@ void Heap::MemoryPressureNotification(MemoryPressureLevel level,
isolate()->stack_guard()->RequestGC();
auto taskrunner = V8::GetCurrentPlatform()->GetForegroundTaskRunner(
reinterpret_cast<v8::Isolate*>(isolate()));
- taskrunner->PostTask(
- base::make_unique<MemoryPressureInterruptTask>(this));
+ taskrunner->PostTask(std::make_unique<MemoryPressureInterruptTask>(this));
}
}
}
@@ -3748,6 +3755,11 @@ bool Heap::InvokeNearHeapLimitCallback() {
return false;
}
+Handle<JSPromise> Heap::MeasureMemory(Handle<NativeContext> context,
+ v8::MeasureMemoryMode mode) {
+ return memory_measurement_->EnqueueRequest(context, mode);
+}
+
void Heap::CollectCodeStatistics() {
TRACE_EVENT0("v8", "Heap::CollectCodeStatistics");
CodeStatistics::ResetCodeAndMetadataStatistics(isolate());
@@ -4096,7 +4108,19 @@ void CollectSlots(MemoryChunk* chunk, Address start, Address end,
}
return KEEP_SLOT;
},
- SlotSet::PREFREE_EMPTY_BUCKETS);
+ SlotSet::FREE_EMPTY_BUCKETS);
+ if (direction == OLD_TO_NEW) {
+ CHECK(chunk->SweepingDone());
+ RememberedSetSweeping::Iterate(
+ chunk,
+ [start, end, untyped](MaybeObjectSlot slot) {
+ if (start <= slot.address() && slot.address() < end) {
+ untyped->insert(slot.address());
+ }
+ return KEEP_SLOT;
+ },
+ SlotSet::FREE_EMPTY_BUCKETS);
+ }
RememberedSet<direction>::IterateTyped(
chunk, [=](SlotType type, Address slot) {
if (start <= slot && slot < end) {
@@ -4117,7 +4141,6 @@ void Heap::VerifyRememberedSetFor(HeapObject object) {
std::set<Address> old_to_new;
std::set<std::pair<SlotType, Address> > typed_old_to_new;
if (!InYoungGeneration(object)) {
- store_buffer()->MoveAllEntriesToRememberedSet();
CollectSlots<OLD_TO_NEW>(chunk, start, end, &old_to_new, &typed_old_to_new);
OldToNewSlotVerifyingVisitor visitor(&old_to_new, &typed_old_to_new,
&this->ephemeron_remembered_set_);
@@ -4288,6 +4311,7 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
FixStaleLeftTrimmedHandlesVisitor left_trim_visitor(this);
isolate_->handle_scope_implementer()->Iterate(&left_trim_visitor);
isolate_->handle_scope_implementer()->Iterate(v);
+ isolate_->IterateDeferredHandles(&left_trim_visitor);
isolate_->IterateDeferredHandles(v);
v->Synchronize(VisitorSynchronization::kHandleScope);
@@ -4879,9 +4903,9 @@ HeapObject Heap::EnsureImmovableCode(HeapObject heap_object, int object_size) {
return heap_object;
}
-HeapObject Heap::AllocateRawWithLightRetry(int size, AllocationType allocation,
- AllocationOrigin origin,
- AllocationAlignment alignment) {
+HeapObject Heap::AllocateRawWithLightRetrySlowPath(
+ int size, AllocationType allocation, AllocationOrigin origin,
+ AllocationAlignment alignment) {
HeapObject result;
AllocationResult alloc = AllocateRaw(size, allocation, origin, alignment);
if (alloc.To(&result)) {
@@ -4901,12 +4925,12 @@ HeapObject Heap::AllocateRawWithLightRetry(int size, AllocationType allocation,
return HeapObject();
}
-HeapObject Heap::AllocateRawWithRetryOrFail(int size, AllocationType allocation,
- AllocationOrigin origin,
- AllocationAlignment alignment) {
+HeapObject Heap::AllocateRawWithRetryOrFailSlowPath(
+ int size, AllocationType allocation, AllocationOrigin origin,
+ AllocationAlignment alignment) {
AllocationResult alloc;
HeapObject result =
- AllocateRawWithLightRetry(size, allocation, origin, alignment);
+ AllocateRawWithLightRetrySlowPath(size, allocation, origin, alignment);
if (!result.is_null()) return result;
isolate()->counters()->gc_last_resort_from_handles()->Increment();
@@ -4979,8 +5003,6 @@ void Heap::SetUp() {
memory_allocator_.reset(
new MemoryAllocator(isolate_, MaxReserved(), code_range_size_));
- store_buffer_.reset(new StoreBuffer(this));
-
mark_compact_collector_.reset(new MarkCompactCollector(this));
scavenger_collector_.reset(new ScavengerCollector(this));
@@ -5039,6 +5061,7 @@ void Heap::SetUpSpaces() {
#endif // ENABLE_MINOR_MC
array_buffer_collector_.reset(new ArrayBufferCollector(this));
gc_idle_time_handler_.reset(new GCIdleTimeHandler());
+ memory_measurement_.reset(new MemoryMeasurement(isolate()));
memory_reducer_.reset(new MemoryReducer(this));
if (V8_UNLIKELY(TracingFlags::is_gc_stats_enabled())) {
live_object_stats_.reset(new ObjectStats(this));
@@ -5049,8 +5072,6 @@ void Heap::SetUpSpaces() {
LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
LOG(isolate_, IntPtrTEvent("heap-available", Available()));
- store_buffer()->SetUp();
-
mark_compact_collector()->SetUp();
#ifdef ENABLE_MINOR_MC
if (minor_mark_compact_collector() != nullptr) {
@@ -5282,8 +5303,6 @@ void Heap::TearDown() {
space_[i] = nullptr;
}
- store_buffer()->TearDown();
-
memory_allocator()->TearDown();
StrongRootsList* next = nullptr;
@@ -5293,7 +5312,6 @@ void Heap::TearDown() {
}
strong_roots_list_ = nullptr;
- store_buffer_.reset();
memory_allocator_.reset();
}
@@ -5404,13 +5422,6 @@ void Heap::CompactWeakArrayLists(AllocationType allocation) {
DCHECK_IMPLIES(allocation == AllocationType::kOld, InOldSpace(*scripts));
scripts = CompactWeakArrayList(this, scripts, allocation);
set_script_list(*scripts);
-
- Handle<WeakArrayList> no_script_list(noscript_shared_function_infos(),
- isolate());
- DCHECK_IMPLIES(allocation == AllocationType::kOld,
- InOldSpace(*no_script_list));
- no_script_list = CompactWeakArrayList(this, no_script_list, allocation);
- set_noscript_shared_function_infos(*no_script_list);
}
void Heap::AddRetainedMap(Handle<Map> map) {
@@ -5511,53 +5522,55 @@ void Heap::CheckHandleCount() {
isolate_->handle_scope_implementer()->Iterate(&v);
}
-Address* Heap::store_buffer_top_address() {
- return store_buffer()->top_address();
-}
-
-// static
-intptr_t Heap::store_buffer_mask_constant() {
- return StoreBuffer::kStoreBufferMask;
-}
-
-// static
-Address Heap::store_buffer_overflow_function_address() {
- return FUNCTION_ADDR(StoreBuffer::StoreBufferOverflow);
-}
-
void Heap::ClearRecordedSlot(HeapObject object, ObjectSlot slot) {
+#ifndef V8_DISABLE_WRITE_BARRIERS
DCHECK(!IsLargeObject(object));
Page* page = Page::FromAddress(slot.address());
if (!page->InYoungGeneration()) {
DCHECK_EQ(page->owner_identity(), OLD_SPACE);
- store_buffer()->MoveAllEntriesToRememberedSet();
- RememberedSet<OLD_TO_NEW>::Remove(page, slot.address());
+
+ if (!page->SweepingDone()) {
+ RememberedSet<OLD_TO_NEW>::Remove(page, slot.address());
+ }
}
+#endif
+}
+
+// static
+int Heap::InsertIntoRememberedSetFromCode(MemoryChunk* chunk, Address slot) {
+ RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(chunk, slot);
+ return 0;
}
#ifdef DEBUG
void Heap::VerifyClearedSlot(HeapObject object, ObjectSlot slot) {
+#ifndef V8_DISABLE_WRITE_BARRIERS
DCHECK(!IsLargeObject(object));
if (InYoungGeneration(object)) return;
Page* page = Page::FromAddress(slot.address());
DCHECK_EQ(page->owner_identity(), OLD_SPACE);
- store_buffer()->MoveAllEntriesToRememberedSet();
- CHECK(!RememberedSet<OLD_TO_NEW>::Contains(page, slot.address()));
- // Old to old slots are filtered with invalidated slots.
+ // Slots are filtered with invalidated slots.
+ CHECK_IMPLIES(RememberedSet<OLD_TO_NEW>::Contains(page, slot.address()),
+ page->RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>(object));
CHECK_IMPLIES(RememberedSet<OLD_TO_OLD>::Contains(page, slot.address()),
page->RegisteredObjectWithInvalidatedSlots<OLD_TO_OLD>(object));
+#endif
}
#endif
void Heap::ClearRecordedSlotRange(Address start, Address end) {
+#ifndef V8_DISABLE_WRITE_BARRIERS
Page* page = Page::FromAddress(start);
DCHECK(!page->IsLargePage());
if (!page->InYoungGeneration()) {
DCHECK_EQ(page->owner_identity(), OLD_SPACE);
- store_buffer()->MoveAllEntriesToRememberedSet();
- RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end,
- SlotSet::KEEP_EMPTY_BUCKETS);
+
+ if (!page->SweepingDone()) {
+ RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end,
+ SlotSet::KEEP_EMPTY_BUCKETS);
+ }
}
+#endif
}
PagedSpace* PagedSpaceIterator::Next() {
@@ -6164,8 +6177,8 @@ void Heap::WriteBarrierForCodeSlow(Code code) {
void Heap::GenerationalBarrierSlow(HeapObject object, Address slot,
HeapObject value) {
- Heap* heap = Heap::FromWritableHeapObject(object);
- heap->store_buffer()->InsertEntry(slot);
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
+ RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(chunk, slot);
}
void Heap::RecordEphemeronKeyWrite(EphemeronHashTable table, Address slot) {
@@ -6207,7 +6220,6 @@ void Heap::WriteBarrierForRangeImpl(MemoryChunk* source_page, HeapObject object,
STATIC_ASSERT(!(kModeMask & kDoEvacuationSlotRecording) ||
(kModeMask & kDoMarking));
- StoreBuffer* store_buffer = this->store_buffer();
IncrementalMarking* incremental_marking = this->incremental_marking();
MarkCompactCollector* collector = this->mark_compact_collector();
@@ -6218,7 +6230,8 @@ void Heap::WriteBarrierForRangeImpl(MemoryChunk* source_page, HeapObject object,
if ((kModeMask & kDoGenerational) &&
Heap::InYoungGeneration(value_heap_object)) {
- store_buffer->InsertEntry(slot.address());
+ RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(source_page,
+ slot.address());
}
if ((kModeMask & kDoMarking) &&
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index 2b8b963a79..182096f29c 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -7,6 +7,7 @@
#include <cmath>
#include <map>
+#include <memory>
#include <unordered_map>
#include <unordered_set>
#include <vector>
@@ -45,7 +46,11 @@ class TestMemoryAllocatorScope;
} // namespace heap
class IncrementalMarking;
+class BackingStore;
class JSArrayBuffer;
+class JSPromise;
+class NativeContext;
+
using v8::MemoryPressureLevel;
class AllocationObserver;
@@ -62,6 +67,7 @@ class Isolate;
class JSFinalizationGroup;
class LocalEmbedderHeapTracer;
class MemoryAllocator;
+class MemoryMeasurement;
class MemoryReducer;
class MinorMarkCompactCollector;
class ObjectIterator;
@@ -74,7 +80,6 @@ class ScavengeJob;
class Scavenger;
class ScavengerCollector;
class Space;
-class StoreBuffer;
class StressScavengeObserver;
class TimedHistogram;
class WeakObjectRetainer;
@@ -86,6 +91,8 @@ enum ArrayStorageAllocationMode {
enum class ClearRecordedSlots { kYes, kNo };
+enum class InvalidateRecordedSlots { kYes, kNo };
+
enum class ClearFreedMemoryMode { kClearFreedMemory, kDontClearFreedMemory };
enum ExternalBackingStoreType { kArrayBuffer, kExternalString, kNumTypes };
@@ -560,6 +567,9 @@ class Heap {
void RecordStats(HeapStats* stats, bool take_snapshot = false);
+ Handle<JSPromise> MeasureMemory(Handle<NativeContext> context,
+ v8::MeasureMemoryMode mode);
+
// Check new space expansion criteria and expand semispaces if it was hit.
void CheckNewSpaceExpansionCriteria();
@@ -839,12 +849,13 @@ class Heap {
void SetIsMarkingFlag(uint8_t flag) { is_marking_flag_ = flag; }
- Address* store_buffer_top_address();
+ V8_EXPORT_PRIVATE Address* store_buffer_top_address();
static intptr_t store_buffer_mask_constant();
static Address store_buffer_overflow_function_address();
void ClearRecordedSlot(HeapObject object, ObjectSlot slot);
void ClearRecordedSlotRange(Address start, Address end);
+ static int InsertIntoRememberedSetFromCode(MemoryChunk* chunk, Address slot);
#ifdef DEBUG
void VerifyClearedSlot(HeapObject object, ObjectSlot slot);
@@ -896,8 +907,13 @@ class Heap {
// The runtime uses this function to notify potentially unsafe object layout
// changes that require special synchronization with the concurrent marker.
// The old size is the size of the object before layout change.
- void NotifyObjectLayoutChange(HeapObject object, int old_size,
- const DisallowHeapAllocation&);
+ // By default recorded slots in the object are invalidated. Pass
+ // InvalidateRecordedSlots::kNo if this is not necessary or to perform this
+ // manually.
+ void NotifyObjectLayoutChange(
+ HeapObject object, const DisallowHeapAllocation&,
+ InvalidateRecordedSlots invalidate_recorded_slots =
+ InvalidateRecordedSlots::kYes);
#ifdef VERIFY_HEAP
// This function checks that either
@@ -1214,16 +1230,24 @@ class Heap {
AlignWithFiller(HeapObject object, int object_size, int allocation_size,
AllocationAlignment alignment);
+ // Allocate an external backing store with the given allocation callback.
+ // If the callback fails (indicated by a nullptr result) then this function
+ // will re-try the allocation after performing GCs. This is useful for
+ // external backing stores that may be retained by (unreachable) V8 objects
+ // such as ArrayBuffers, ExternalStrings, etc.
+ //
+ // The function may also proactively trigger GCs even if the allocation
+ // callback does not fail to keep the memory usage low.
+ V8_EXPORT_PRIVATE void* AllocateExternalBackingStore(
+ const std::function<void*(size_t)>& allocate, size_t byte_length);
+
// ===========================================================================
// ArrayBuffer tracking. =====================================================
// ===========================================================================
-
- // TODO(gc): API usability: encapsulate mutation of JSArrayBuffer::is_external
- // in the registration/unregistration APIs. Consider dropping the "New" from
- // "RegisterNewArrayBuffer" because one can re-register a previously
- // unregistered buffer, too, and the name is confusing.
- void RegisterNewArrayBuffer(JSArrayBuffer buffer);
- void UnregisterArrayBuffer(JSArrayBuffer buffer);
+ void RegisterBackingStore(JSArrayBuffer buffer,
+ std::shared_ptr<BackingStore> backing_store);
+ std::shared_ptr<BackingStore> UnregisterBackingStore(JSArrayBuffer buffer);
+ std::shared_ptr<BackingStore> LookupBackingStore(JSArrayBuffer buffer);
// ===========================================================================
// Allocation site tracking. =================================================
@@ -1332,9 +1356,7 @@ class Heap {
// per call to mmap(). The page is only reclaimed when the process is
// killed. Confine the hint to a 32-bit section of the virtual address
// space. See crbug.com/700928.
- uintptr_t offset =
- reinterpret_cast<uintptr_t>(v8::internal::GetRandomMmapAddr()) &
- kMmapRegionMask;
+ uintptr_t offset = reinterpret_cast<uintptr_t>(result) & kMmapRegionMask;
result = reinterpret_cast<void*>(mmap_region_base_ + offset);
#endif // V8_OS_MACOSX
#endif // V8_TARGET_ARCH_X64
@@ -1348,8 +1370,6 @@ class Heap {
inline int MaxNumberToStringCacheSize() const;
private:
- class SkipStoreBufferScope;
-
using ExternalStringTableUpdaterCallback = String (*)(Heap* heap,
FullObjectSlot pointer);
@@ -1462,11 +1482,7 @@ class Heap {
ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
- StoreBuffer* store_buffer() { return store_buffer_.get(); }
-
- void set_current_gc_flags(int flags) {
- current_gc_flags_ = flags;
- }
+ void set_current_gc_flags(int flags) { current_gc_flags_ = flags; }
inline bool ShouldReduceMemory() const {
return (current_gc_flags_ & kReduceMemoryFootprintMask) != 0;
@@ -1732,20 +1748,23 @@ class Heap {
AllocationOrigin origin = AllocationOrigin::kRuntime,
AllocationAlignment alignment = kWordAligned);
+ // This method will try to allocate objects quickly (AllocationType::kYoung)
+ // otherwise it falls back to a slower path indicated by the mode.
+ enum AllocationRetryMode { kLightRetry, kRetryOrFail };
+ template <AllocationRetryMode mode>
+ V8_WARN_UNUSED_RESULT inline HeapObject AllocateRawWith(
+ int size, AllocationType allocation,
+ AllocationOrigin origin = AllocationOrigin::kRuntime,
+ AllocationAlignment alignment = kWordAligned);
+
// This method will try to perform an allocation of a given size of a given
// AllocationType. If the allocation fails, a regular full garbage collection
// is triggered and the allocation is retried. This is performed multiple
// times. If after that retry procedure the allocation still fails nullptr is
// returned.
- HeapObject AllocateRawWithLightRetry(
+ V8_WARN_UNUSED_RESULT HeapObject AllocateRawWithLightRetrySlowPath(
int size, AllocationType allocation, AllocationOrigin origin,
AllocationAlignment alignment = kWordAligned);
- HeapObject AllocateRawWithLightRetry(
- int size, AllocationType allocation,
- AllocationAlignment alignment = kWordAligned) {
- return AllocateRawWithLightRetry(size, allocation,
- AllocationOrigin::kRuntime, alignment);
- }
// This method will try to perform an allocation of a given size of a given
// AllocationType. If the allocation fails, a regular full garbage collection
@@ -1753,17 +1772,11 @@ class Heap {
// times. If after that retry procedure the allocation still fails a "hammer"
// garbage collection is triggered which tries to significantly reduce memory.
// If the allocation still fails after that a fatal error is thrown.
- HeapObject AllocateRawWithRetryOrFail(
+ V8_WARN_UNUSED_RESULT HeapObject AllocateRawWithRetryOrFailSlowPath(
int size, AllocationType allocation, AllocationOrigin origin,
AllocationAlignment alignment = kWordAligned);
- HeapObject AllocateRawWithRetryOrFail(
- int size, AllocationType allocation,
- AllocationAlignment alignment = kWordAligned) {
- return AllocateRawWithRetryOrFail(size, allocation,
- AllocationOrigin::kRuntime, alignment);
- }
- HeapObject AllocateRawCodeInLargeObjectSpace(int size);
+ V8_WARN_UNUSED_RESULT HeapObject AllocateRawCodeInLargeObjectSpace(int size);
// Allocates a heap object based on the map.
V8_WARN_UNUSED_RESULT AllocationResult Allocate(Map map,
@@ -1980,10 +1993,10 @@ class Heap {
std::unique_ptr<ScavengerCollector> scavenger_collector_;
std::unique_ptr<ArrayBufferCollector> array_buffer_collector_;
std::unique_ptr<MemoryAllocator> memory_allocator_;
- std::unique_ptr<StoreBuffer> store_buffer_;
std::unique_ptr<IncrementalMarking> incremental_marking_;
std::unique_ptr<ConcurrentMarking> concurrent_marking_;
std::unique_ptr<GCIdleTimeHandler> gc_idle_time_handler_;
+ std::unique_ptr<MemoryMeasurement> memory_measurement_;
std::unique_ptr<MemoryReducer> memory_reducer_;
std::unique_ptr<ObjectStats> live_object_stats_;
std::unique_ptr<ObjectStats> dead_object_stats_;
@@ -2101,7 +2114,6 @@ class Heap {
friend class Scavenger;
friend class ScavengerCollector;
friend class Space;
- friend class StoreBuffer;
friend class Sweeper;
friend class heap::TestMemoryAllocatorScope;
@@ -2152,7 +2164,6 @@ class HeapStats {
intptr_t* end_marker; // 27
};
-
class AlwaysAllocateScope {
public:
explicit inline AlwaysAllocateScope(Heap* heap);
@@ -2232,7 +2243,6 @@ class VerifyPointersVisitor : public ObjectVisitor, public RootVisitor {
Heap* heap_;
};
-
// Verify that all objects are Smis.
class VerifySmisVisitor : public RootVisitor {
public:
@@ -2263,7 +2273,7 @@ class V8_EXPORT_PRIVATE SpaceIterator : public Malloced {
private:
Heap* heap_;
- int current_space_; // from enum AllocationSpace.
+ int current_space_; // from enum AllocationSpace.
};
// A HeapObjectIterator provides iteration over the entire non-read-only heap.
diff --git a/deps/v8/src/heap/incremental-marking-job.cc b/deps/v8/src/heap/incremental-marking-job.cc
index c6e607c3ea..1f924ff139 100644
--- a/deps/v8/src/heap/incremental-marking-job.cc
+++ b/deps/v8/src/heap/incremental-marking-job.cc
@@ -54,24 +54,24 @@ void IncrementalMarkingJob::ScheduleTask(Heap* heap, TaskType task_type) {
V8::GetCurrentPlatform()->GetForegroundTaskRunner(isolate);
if (task_type == TaskType::kNormal) {
if (taskrunner->NonNestableTasksEnabled()) {
- taskrunner->PostNonNestableTask(base::make_unique<Task>(
+ taskrunner->PostNonNestableTask(std::make_unique<Task>(
heap->isolate(), this,
EmbedderHeapTracer::EmbedderStackState::kEmpty, task_type));
} else {
- taskrunner->PostTask(base::make_unique<Task>(
+ taskrunner->PostTask(std::make_unique<Task>(
heap->isolate(), this,
EmbedderHeapTracer::EmbedderStackState::kUnknown, task_type));
}
} else {
if (taskrunner->NonNestableDelayedTasksEnabled()) {
taskrunner->PostNonNestableDelayedTask(
- base::make_unique<Task>(
+ std::make_unique<Task>(
heap->isolate(), this,
EmbedderHeapTracer::EmbedderStackState::kEmpty, task_type),
kDelayInSeconds);
} else {
taskrunner->PostDelayedTask(
- base::make_unique<Task>(
+ std::make_unique<Task>(
heap->isolate(), this,
EmbedderHeapTracer::EmbedderStackState::kUnknown, task_type),
kDelayInSeconds);
diff --git a/deps/v8/src/heap/invalidated-slots-inl.h b/deps/v8/src/heap/invalidated-slots-inl.h
index 35a08108f6..546667b2b2 100644
--- a/deps/v8/src/heap/invalidated-slots-inl.h
+++ b/deps/v8/src/heap/invalidated-slots-inl.h
@@ -24,42 +24,40 @@ bool InvalidatedSlotsFilter::IsValid(Address slot) {
DCHECK_LE(last_slot_, slot);
last_slot_ = slot;
#endif
- while (slot >= invalidated_end_) {
- ++iterator_;
- if (iterator_ != iterator_end_) {
- // Invalidated ranges must not overlap.
- DCHECK_LE(invalidated_end_, iterator_->first.address());
- invalidated_start_ = iterator_->first.address();
- invalidated_end_ = invalidated_start_ + iterator_->second;
- invalidated_object_ = HeapObject();
- invalidated_object_size_ = 0;
- } else {
- invalidated_start_ = sentinel_;
- invalidated_end_ = sentinel_;
- }
- }
- // Now the invalidated region ends after the slot.
if (slot < invalidated_start_) {
- // The invalidated region starts after the slot.
return true;
}
- // The invalidated region includes the slot.
- // Ask the object if the slot is valid.
- if (invalidated_object_.is_null()) {
- invalidated_object_ = HeapObject::FromAddress(invalidated_start_);
- DCHECK(!invalidated_object_.IsFiller());
- invalidated_object_size_ =
- invalidated_object_.SizeFromMap(invalidated_object_.map());
+
+ while (slot >= next_invalidated_start_) {
+ NextInvalidatedObject();
+ }
+
+ HeapObject invalidated_object = HeapObject::FromAddress(invalidated_start_);
+
+ if (invalidated_size_ == 0) {
+ DCHECK(invalidated_object.map().IsMap());
+ invalidated_size_ = invalidated_object.Size();
}
+
int offset = static_cast<int>(slot - invalidated_start_);
DCHECK_GT(offset, 0);
- DCHECK_LE(invalidated_object_size_,
- static_cast<int>(invalidated_end_ - invalidated_start_));
+ if (offset < invalidated_size_)
+ return invalidated_object.IsValidSlot(invalidated_object.map(), offset);
+
+ NextInvalidatedObject();
+ return true;
+}
+
+void InvalidatedSlotsFilter::NextInvalidatedObject() {
+ invalidated_start_ = next_invalidated_start_;
+ invalidated_size_ = 0;
- if (offset >= invalidated_object_size_) {
- return slots_in_free_space_are_valid_;
+ if (iterator_ == iterator_end_) {
+ next_invalidated_start_ = sentinel_;
+ } else {
+ next_invalidated_start_ = iterator_->address();
+ iterator_++;
}
- return invalidated_object_.IsValidSlot(invalidated_object_.map(), offset);
}
void InvalidatedSlotsCleanup::Free(Address free_start, Address free_end) {
@@ -72,35 +70,25 @@ void InvalidatedSlotsCleanup::Free(Address free_start, Address free_end) {
if (iterator_ == iterator_end_) return;
- // Ignore invalidated objects before free region
- while (free_start >= invalidated_end_) {
+ // Ignore invalidated objects that start before free region
+ while (invalidated_start_ < free_start) {
++iterator_;
NextInvalidatedObject();
}
- // Loop here: Free region might contain multiple invalidated objects
- while (free_end > invalidated_start_) {
- // Case: Free region starts before current invalidated object
- if (free_start <= invalidated_start_) {
- iterator_ = invalidated_slots_->erase(iterator_);
-
- } else {
- // Case: Free region starts within current invalidated object
- // (Can happen for right-trimmed objects)
- iterator_++;
- }
-
+ // Remove all invalidated objects that start within
+ // free region.
+ while (invalidated_start_ < free_end) {
+ iterator_ = invalidated_slots_->erase(iterator_);
NextInvalidatedObject();
}
}
void InvalidatedSlotsCleanup::NextInvalidatedObject() {
if (iterator_ != iterator_end_) {
- invalidated_start_ = iterator_->first.address();
- invalidated_end_ = invalidated_start_ + iterator_->second;
+ invalidated_start_ = iterator_->address();
} else {
invalidated_start_ = sentinel_;
- invalidated_end_ = sentinel_;
}
}
diff --git a/deps/v8/src/heap/invalidated-slots.cc b/deps/v8/src/heap/invalidated-slots.cc
index 8fa1518d68..9f29af218b 100644
--- a/deps/v8/src/heap/invalidated-slots.cc
+++ b/deps/v8/src/heap/invalidated-slots.cc
@@ -3,52 +3,35 @@
// found in the LICENSE file.
#include "src/heap/invalidated-slots.h"
+#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/spaces.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
InvalidatedSlotsFilter InvalidatedSlotsFilter::OldToOld(MemoryChunk* chunk) {
- // The sweeper removes invalid slots and makes free space available for
- // allocation. Slots for new objects can be recorded in the free space.
- // Note that we cannot simply check for SweepingDone because pages in large
- // object space are not swept but have SweepingDone() == true.
- bool slots_in_free_space_are_valid =
- chunk->SweepingDone() && chunk->InOldSpace();
- return InvalidatedSlotsFilter(chunk, chunk->invalidated_slots<OLD_TO_OLD>(),
- slots_in_free_space_are_valid);
+ return InvalidatedSlotsFilter(chunk, chunk->invalidated_slots<OLD_TO_OLD>());
}
InvalidatedSlotsFilter InvalidatedSlotsFilter::OldToNew(MemoryChunk* chunk) {
- // Always treat these slots as valid for old-to-new for now. Invalid
- // old-to-new slots are always cleared.
- bool slots_in_free_space_are_valid = true;
- return InvalidatedSlotsFilter(chunk, chunk->invalidated_slots<OLD_TO_NEW>(),
- slots_in_free_space_are_valid);
+ return InvalidatedSlotsFilter(chunk, chunk->invalidated_slots<OLD_TO_NEW>());
}
InvalidatedSlotsFilter::InvalidatedSlotsFilter(
- MemoryChunk* chunk, InvalidatedSlots* invalidated_slots,
- bool slots_in_free_space_are_valid) {
- // Adjust slots_in_free_space_are_valid_ if more spaces are added.
- DCHECK_IMPLIES(invalidated_slots != nullptr,
- chunk->InOldSpace() || chunk->InLargeObjectSpace());
-
- slots_in_free_space_are_valid_ = slots_in_free_space_are_valid;
+ MemoryChunk* chunk, InvalidatedSlots* invalidated_slots) {
invalidated_slots = invalidated_slots ? invalidated_slots : &empty_;
iterator_ = invalidated_slots->begin();
iterator_end_ = invalidated_slots->end();
sentinel_ = chunk->area_end();
- if (iterator_ != iterator_end_) {
- invalidated_start_ = iterator_->first.address();
- invalidated_end_ = invalidated_start_ + iterator_->second;
- } else {
- invalidated_start_ = sentinel_;
- invalidated_end_ = sentinel_;
- }
- // These values will be lazily set when needed.
- invalidated_object_size_ = 0;
+
+ // Invoke NextInvalidatedObject twice, to initialize
+ // invalidated_start_ to the first invalidated object and
+ // next_invalidated_object_ to the second one.
+ NextInvalidatedObject();
+ NextInvalidatedObject();
+
#ifdef DEBUG
last_slot_ = chunk->area_start();
#endif
@@ -69,13 +52,7 @@ InvalidatedSlotsCleanup::InvalidatedSlotsCleanup(
iterator_end_ = invalidated_slots_->end();
sentinel_ = chunk->area_end();
- if (iterator_ != iterator_end_) {
- invalidated_start_ = iterator_->first.address();
- invalidated_end_ = invalidated_start_ + iterator_->second;
- } else {
- invalidated_start_ = sentinel_;
- invalidated_end_ = sentinel_;
- }
+ NextInvalidatedObject();
#ifdef DEBUG
last_free_ = chunk->area_start();
diff --git a/deps/v8/src/heap/invalidated-slots.h b/deps/v8/src/heap/invalidated-slots.h
index 4a72271910..15be3ce44c 100644
--- a/deps/v8/src/heap/invalidated-slots.h
+++ b/deps/v8/src/heap/invalidated-slots.h
@@ -5,7 +5,7 @@
#ifndef V8_HEAP_INVALIDATED_SLOTS_H_
#define V8_HEAP_INVALIDATED_SLOTS_H_
-#include <map>
+#include <set>
#include <stack>
#include "src/base/atomic-utils.h"
@@ -20,7 +20,7 @@ namespace internal {
// that potentially invalidates slots recorded concurrently. The second part
// of each element is the size of the corresponding object before the layout
// change.
-using InvalidatedSlots = std::map<HeapObject, int, Object::Comparer>;
+using InvalidatedSlots = std::set<HeapObject, Object::Comparer>;
// This class provides IsValid predicate that takes into account the set
// of invalidated objects in the given memory chunk.
@@ -34,8 +34,7 @@ class V8_EXPORT_PRIVATE InvalidatedSlotsFilter {
static InvalidatedSlotsFilter OldToNew(MemoryChunk* chunk);
explicit InvalidatedSlotsFilter(MemoryChunk* chunk,
- InvalidatedSlots* invalidated_slots,
- bool slots_in_free_space_are_valid);
+ InvalidatedSlots* invalidated_slots);
inline bool IsValid(Address slot);
private:
@@ -43,14 +42,15 @@ class V8_EXPORT_PRIVATE InvalidatedSlotsFilter {
InvalidatedSlots::const_iterator iterator_end_;
Address sentinel_;
Address invalidated_start_;
- Address invalidated_end_;
- HeapObject invalidated_object_;
- int invalidated_object_size_;
- bool slots_in_free_space_are_valid_;
+ Address next_invalidated_start_;
+ int invalidated_size_;
InvalidatedSlots empty_;
#ifdef DEBUG
Address last_slot_;
#endif
+
+ private:
+ inline void NextInvalidatedObject();
};
class V8_EXPORT_PRIVATE InvalidatedSlotsCleanup {
@@ -71,7 +71,6 @@ class V8_EXPORT_PRIVATE InvalidatedSlotsCleanup {
Address sentinel_;
Address invalidated_start_;
- Address invalidated_end_;
inline void NextInvalidatedObject();
#ifdef DEBUG
diff --git a/deps/v8/src/heap/mark-compact-inl.h b/deps/v8/src/heap/mark-compact-inl.h
index cf6d96cef8..ed7e251f44 100644
--- a/deps/v8/src/heap/mark-compact-inl.h
+++ b/deps/v8/src/heap/mark-compact-inl.h
@@ -485,7 +485,8 @@ void MarkCompactCollector::RecordSlot(HeapObject object, HeapObjectSlot slot,
MemoryChunk* source_page = MemoryChunk::FromHeapObject(object);
if (target_page->IsEvacuationCandidate<AccessMode::ATOMIC>() &&
!source_page->ShouldSkipEvacuationSlotRecording<AccessMode::ATOMIC>()) {
- RememberedSet<OLD_TO_OLD>::Insert(source_page, slot.address());
+ RememberedSet<OLD_TO_OLD>::Insert<AccessMode::ATOMIC>(source_page,
+ slot.address());
}
}
@@ -493,7 +494,8 @@ void MarkCompactCollector::RecordSlot(MemoryChunk* source_page,
HeapObjectSlot slot, HeapObject target) {
MemoryChunk* target_page = MemoryChunk::FromHeapObject(target);
if (target_page->IsEvacuationCandidate<AccessMode::ATOMIC>()) {
- RememberedSet<OLD_TO_OLD>::Insert(source_page, slot.address());
+ RememberedSet<OLD_TO_OLD>::Insert<AccessMode::ATOMIC>(source_page,
+ slot.address());
}
}
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index f7067a60ea..c18b2652d7 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -2080,12 +2080,13 @@ void MarkCompactCollector::FlushBytecodeFromSFI(
MemoryChunk* chunk = MemoryChunk::FromAddress(compiled_data_start);
// Clear any recorded slots for the compiled data as being invalid.
+ DCHECK_NULL(chunk->sweeping_slot_set());
RememberedSet<OLD_TO_NEW>::RemoveRange(
chunk, compiled_data_start, compiled_data_start + compiled_data_size,
- SlotSet::PREFREE_EMPTY_BUCKETS);
+ SlotSet::FREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_OLD>::RemoveRange(
chunk, compiled_data_start, compiled_data_start + compiled_data_size,
- SlotSet::PREFREE_EMPTY_BUCKETS);
+ SlotSet::FREE_EMPTY_BUCKETS);
// Swap the map, using set_map_after_allocation to avoid verify heap checks
// which are not necessary since we are doing this during the GC atomic pause.
@@ -2233,12 +2234,12 @@ void MarkCompactCollector::RightTrimDescriptorArray(DescriptorArray array,
DCHECK_LE(0, new_nof_all_descriptors);
Address start = array.GetDescriptorSlot(new_nof_all_descriptors).address();
Address end = array.GetDescriptorSlot(old_nof_all_descriptors).address();
- RememberedSet<OLD_TO_NEW>::RemoveRange(MemoryChunk::FromHeapObject(array),
- start, end,
- SlotSet::PREFREE_EMPTY_BUCKETS);
- RememberedSet<OLD_TO_OLD>::RemoveRange(MemoryChunk::FromHeapObject(array),
- start, end,
- SlotSet::PREFREE_EMPTY_BUCKETS);
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(array);
+ DCHECK_NULL(chunk->sweeping_slot_set());
+ RememberedSet<OLD_TO_NEW>::RemoveRange(chunk, start, end,
+ SlotSet::FREE_EMPTY_BUCKETS);
+ RememberedSet<OLD_TO_OLD>::RemoveRange(chunk, start, end,
+ SlotSet::FREE_EMPTY_BUCKETS);
heap()->CreateFillerObjectAt(start, static_cast<int>(end - start),
ClearRecordedSlots::kNo);
array.set_number_of_all_descriptors(new_nof_all_descriptors);
@@ -3411,15 +3412,32 @@ class RememberedSetUpdatingItem : public UpdatingItem {
void UpdateUntypedPointers() {
if (chunk_->slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() != nullptr) {
+ InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(chunk_);
RememberedSet<OLD_TO_NEW>::Iterate(
chunk_,
- [this](MaybeObjectSlot slot) {
+ [this, &filter](MaybeObjectSlot slot) {
+ if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
return CheckAndUpdateOldToNewSlot(slot);
},
- SlotSet::PREFREE_EMPTY_BUCKETS);
+ SlotSet::FREE_EMPTY_BUCKETS);
}
- DCHECK_NULL(chunk_->invalidated_slots<OLD_TO_NEW>());
+ if (chunk_->sweeping_slot_set<AccessMode::NON_ATOMIC>()) {
+ InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(chunk_);
+ RememberedSetSweeping::Iterate(
+ chunk_,
+ [this, &filter](MaybeObjectSlot slot) {
+ if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
+ return CheckAndUpdateOldToNewSlot(slot);
+ },
+ SlotSet::FREE_EMPTY_BUCKETS);
+ }
+
+ if (chunk_->invalidated_slots<OLD_TO_NEW>() != nullptr) {
+ // The invalidated slots are not needed after old-to-new slots were
+ // processed.
+ chunk_->ReleaseInvalidatedSlots<OLD_TO_NEW>();
+ }
if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
(chunk_->slot_set<OLD_TO_OLD, AccessMode::NON_ATOMIC>() != nullptr)) {
@@ -3430,17 +3448,11 @@ class RememberedSetUpdatingItem : public UpdatingItem {
if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
return UpdateSlot<AccessMode::NON_ATOMIC>(slot);
},
- SlotSet::PREFREE_EMPTY_BUCKETS);
+ SlotSet::FREE_EMPTY_BUCKETS);
+ chunk_->ReleaseSlotSet<OLD_TO_OLD>();
}
if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
chunk_->invalidated_slots<OLD_TO_OLD>() != nullptr) {
-#ifdef DEBUG
- for (auto object_size : *chunk_->invalidated_slots<OLD_TO_OLD>()) {
- HeapObject object = object_size.first;
- int size = object_size.second;
- DCHECK_LE(object.SizeFromMap(object.map()), size);
- }
-#endif
// The invalidated slots are not needed after old-to-old slots were
// processsed.
chunk_->ReleaseInvalidatedSlots<OLD_TO_OLD>();
@@ -3557,15 +3569,18 @@ int MarkCompactCollectorBase::CollectRememberedSetUpdatingItems(
const bool contains_old_to_new_slots =
chunk->slot_set<OLD_TO_NEW>() != nullptr ||
chunk->typed_slot_set<OLD_TO_NEW>() != nullptr;
+ const bool contains_old_to_new_sweeping_slots =
+ chunk->sweeping_slot_set() != nullptr;
const bool contains_old_to_old_invalidated_slots =
chunk->invalidated_slots<OLD_TO_OLD>() != nullptr;
const bool contains_old_to_new_invalidated_slots =
chunk->invalidated_slots<OLD_TO_NEW>() != nullptr;
- if (!contains_old_to_new_slots && !contains_old_to_old_slots &&
- !contains_old_to_old_invalidated_slots &&
+ if (!contains_old_to_new_slots && !contains_old_to_new_sweeping_slots &&
+ !contains_old_to_old_slots && !contains_old_to_old_invalidated_slots &&
!contains_old_to_new_invalidated_slots)
continue;
if (mode == RememberedSetUpdatingMode::ALL || contains_old_to_new_slots ||
+ contains_old_to_new_sweeping_slots ||
contains_old_to_old_invalidated_slots ||
contains_old_to_new_invalidated_slots) {
job->AddItem(CreateRememberedSetUpdatingItem(chunk, mode));
@@ -3773,11 +3788,22 @@ void MarkCompactCollector::PostProcessEvacuationCandidates() {
// might not have recorded them in first place.
// Remove outdated slots.
+ RememberedSetSweeping::RemoveRange(page, page->address(),
+ failed_object.address(),
+ SlotSet::FREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_NEW>::RemoveRange(page, page->address(),
failed_object.address(),
- SlotSet::PREFREE_EMPTY_BUCKETS);
+ SlotSet::FREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, page->address(),
failed_object.address());
+
+ // Remove invalidated slots.
+ if (failed_object.address() > page->area_start()) {
+ InvalidatedSlotsCleanup old_to_new_cleanup =
+ InvalidatedSlotsCleanup::OldToNew(page);
+ old_to_new_cleanup.Free(page->area_start(), failed_object.address());
+ }
+
// Recompute live bytes.
LiveObjectVisitor::RecomputeLiveBytes(page, non_atomic_marking_state());
// Re-record slots.
@@ -4350,11 +4376,7 @@ void MinorMarkCompactCollector::CollectGarbage() {
RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
heap(), [](MemoryChunk* chunk) {
- if (chunk->SweepingDone()) {
- RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
- } else {
- RememberedSet<OLD_TO_NEW>::PreFreeEmptyBuckets(chunk);
- }
+ RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
});
heap()->account_external_memory_concurrently_freed();
@@ -4651,7 +4673,15 @@ class PageMarkingItem : public MarkingItem {
if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
return CheckAndMarkObject(task, slot);
},
- SlotSet::PREFREE_EMPTY_BUCKETS);
+ SlotSet::FREE_EMPTY_BUCKETS);
+ filter = InvalidatedSlotsFilter::OldToNew(chunk_);
+ RememberedSetSweeping::Iterate(
+ chunk_,
+ [this, task, &filter](MaybeObjectSlot slot) {
+ if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
+ return CheckAndMarkObject(task, slot);
+ },
+ SlotSet::FREE_EMPTY_BUCKETS);
}
void MarkTypedPointers(YoungGenerationMarkingTask* task) {
diff --git a/deps/v8/src/heap/memory-measurement.cc b/deps/v8/src/heap/memory-measurement.cc
new file mode 100644
index 0000000000..62cd5dadb9
--- /dev/null
+++ b/deps/v8/src/heap/memory-measurement.cc
@@ -0,0 +1,80 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/memory-measurement.h"
+
+#include "src/execution/isolate-inl.h"
+#include "src/execution/isolate.h"
+#include "src/heap/factory-inl.h"
+#include "src/heap/factory.h"
+#include "src/objects/js-promise.h"
+
+namespace v8 {
+namespace internal {
+
+MemoryMeasurement::MemoryMeasurement(Isolate* isolate) : isolate_(isolate) {}
+
+namespace {
+
+class MemoryMeasurementResultBuilder {
+ public:
+ MemoryMeasurementResultBuilder(Isolate* isolate, Factory* factory)
+ : isolate_(isolate), factory_(factory) {
+ result_ = NewJSObject();
+ }
+
+ void AddTotals(size_t estimate, size_t lower_bound, size_t upper_bound) {
+ Handle<JSObject> total = NewJSObject();
+ Handle<Object> estimate_obj = NewNumber(estimate);
+ AddProperty(total, factory_->jsMemoryEstimate_string(), estimate_obj);
+ Handle<Object> range = NewRange(lower_bound, upper_bound);
+ AddProperty(total, factory_->jsMemoryRange_string(), range);
+ AddProperty(result_, factory_->total_string(), total);
+ }
+
+ Handle<JSObject> Build() { return result_; }
+
+ private:
+ Handle<Object> NewNumber(size_t value) {
+ return factory_->NewNumberFromSize(value);
+ }
+
+ Handle<JSObject> NewJSObject() {
+ return factory_->NewJSObject(isolate_->object_function());
+ }
+
+ Handle<JSArray> NewRange(size_t lower_bound, size_t upper_bound) {
+ Handle<Object> lower = NewNumber(lower_bound);
+ Handle<Object> upper = NewNumber(upper_bound);
+ Handle<FixedArray> elements = factory_->NewFixedArray(2);
+ elements->set(0, *lower);
+ elements->set(1, *upper);
+ return factory_->NewJSArrayWithElements(elements);
+ }
+
+ void AddProperty(Handle<JSObject> object, Handle<String> name,
+ Handle<Object> value) {
+ JSObject::AddProperty(isolate_, object, name, value, NONE);
+ }
+
+ Isolate* isolate_;
+ Factory* factory_;
+ Handle<JSObject> result_;
+};
+
+} // anonymous namespace
+
+Handle<JSPromise> MemoryMeasurement::EnqueueRequest(
+ Handle<NativeContext> context, v8::MeasureMemoryMode mode) {
+ Handle<JSPromise> promise = isolate_->factory()->NewJSPromise();
+ MemoryMeasurementResultBuilder result_builder(isolate_, isolate_->factory());
+ result_builder.AddTotals(isolate_->heap()->SizeOfObjects(), 0,
+ isolate_->heap()->SizeOfObjects());
+ Handle<JSObject> result = result_builder.Build();
+ JSPromise::Resolve(promise, result).ToHandleChecked();
+ return promise;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/memory-measurement.h b/deps/v8/src/heap/memory-measurement.h
new file mode 100644
index 0000000000..6de7c8c970
--- /dev/null
+++ b/deps/v8/src/heap/memory-measurement.h
@@ -0,0 +1,29 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_MEMORY_MEASUREMENT_H_
+#define V8_HEAP_MEMORY_MEASUREMENT_H_
+
+#include "src/common/globals.h"
+#include "src/objects/objects.h"
+
+namespace v8 {
+namespace internal {
+
+class Heap;
+
+class V8_EXPORT_PRIVATE MemoryMeasurement {
+ public:
+ explicit MemoryMeasurement(Isolate* isolate);
+ Handle<JSPromise> EnqueueRequest(Handle<NativeContext> context,
+ v8::MeasureMemoryMode mode);
+
+ private:
+ Isolate* isolate_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_MEMORY_MEASUREMENT_H_
diff --git a/deps/v8/src/heap/memory-reducer.cc b/deps/v8/src/heap/memory-reducer.cc
index 704e656796..37dca5b99c 100644
--- a/deps/v8/src/heap/memory-reducer.cc
+++ b/deps/v8/src/heap/memory-reducer.cc
@@ -214,9 +214,8 @@ void MemoryReducer::ScheduleTimer(double delay_ms) {
if (heap()->IsTearingDown()) return;
// Leave some room for precision error in task scheduler.
const double kSlackMs = 100;
- taskrunner_->PostDelayedTask(
- base::make_unique<MemoryReducer::TimerTask>(this),
- (delay_ms + kSlackMs) / 1000.0);
+ taskrunner_->PostDelayedTask(std::make_unique<MemoryReducer::TimerTask>(this),
+ (delay_ms + kSlackMs) / 1000.0);
}
void MemoryReducer::TearDown() { state_ = State(kDone, 0, 0, 0.0, 0); }
diff --git a/deps/v8/src/heap/object-stats.cc b/deps/v8/src/heap/object-stats.cc
index 2ee88361c9..44798a3928 100644
--- a/deps/v8/src/heap/object-stats.cc
+++ b/deps/v8/src/heap/object-stats.cc
@@ -150,9 +150,8 @@ FieldStatsCollector::GetInobjectFieldStats(Map map) {
JSObjectFieldStats stats;
stats.embedded_fields_count_ = JSObject::GetEmbedderFieldCount(map);
if (!map.is_dictionary_map()) {
- int nof = map.NumberOfOwnDescriptors();
DescriptorArray descriptors = map.instance_descriptors();
- for (int descriptor = 0; descriptor < nof; descriptor++) {
+ for (InternalIndex descriptor : map.IterateOwnDescriptors()) {
PropertyDetails details = descriptors.GetDetails(descriptor);
if (details.location() == kField) {
FieldIndex index = FieldIndex::ForDescriptor(map, descriptor);
@@ -658,8 +657,7 @@ static ObjectStats::VirtualInstanceType GetFeedbackSlotType(
Object obj = maybe_obj->GetHeapObjectOrSmi();
switch (kind) {
case FeedbackSlotKind::kCall:
- if (obj == *isolate->factory()->uninitialized_symbol() ||
- obj == *isolate->factory()->premonomorphic_symbol()) {
+ if (obj == *isolate->factory()->uninitialized_symbol()) {
return ObjectStats::FEEDBACK_VECTOR_SLOT_CALL_UNUSED_TYPE;
}
return ObjectStats::FEEDBACK_VECTOR_SLOT_CALL_TYPE;
@@ -669,8 +667,7 @@ static ObjectStats::VirtualInstanceType GetFeedbackSlotType(
case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
case FeedbackSlotKind::kLoadKeyed:
case FeedbackSlotKind::kHasKeyed:
- if (obj == *isolate->factory()->uninitialized_symbol() ||
- obj == *isolate->factory()->premonomorphic_symbol()) {
+ if (obj == *isolate->factory()->uninitialized_symbol()) {
return ObjectStats::FEEDBACK_VECTOR_SLOT_LOAD_UNUSED_TYPE;
}
return ObjectStats::FEEDBACK_VECTOR_SLOT_LOAD_TYPE;
@@ -682,8 +679,7 @@ static ObjectStats::VirtualInstanceType GetFeedbackSlotType(
case FeedbackSlotKind::kStoreGlobalStrict:
case FeedbackSlotKind::kStoreKeyedSloppy:
case FeedbackSlotKind::kStoreKeyedStrict:
- if (obj == *isolate->factory()->uninitialized_symbol() ||
- obj == *isolate->factory()->premonomorphic_symbol()) {
+ if (obj == *isolate->factory()->uninitialized_symbol()) {
return ObjectStats::FEEDBACK_VECTOR_SLOT_STORE_UNUSED_TYPE;
}
return ObjectStats::FEEDBACK_VECTOR_SLOT_STORE_TYPE;
@@ -829,10 +825,6 @@ void ObjectStatsCollectorImpl::CollectGlobalStatistics() {
ObjectStats::RETAINED_MAPS_TYPE);
// WeakArrayList.
- RecordSimpleVirtualObjectStats(
- HeapObject(),
- WeakArrayList::cast(heap_->noscript_shared_function_infos()),
- ObjectStats::NOSCRIPT_SHARED_FUNCTION_INFOS_TYPE);
RecordSimpleVirtualObjectStats(HeapObject(),
WeakArrayList::cast(heap_->script_list()),
ObjectStats::SCRIPT_LIST_TYPE);
diff --git a/deps/v8/src/heap/object-stats.h b/deps/v8/src/heap/object-stats.h
index 2a9b9675ef..28ef967c5c 100644
--- a/deps/v8/src/heap/object-stats.h
+++ b/deps/v8/src/heap/object-stats.h
@@ -54,7 +54,6 @@
V(MAP_PROTOTYPE_DICTIONARY_TYPE) \
V(MAP_PROTOTYPE_TYPE) \
V(MAP_STABLE_TYPE) \
- V(NOSCRIPT_SHARED_FUNCTION_INFOS_TYPE) \
V(NUMBER_STRING_CACHE_TYPE) \
V(OBJECT_DICTIONARY_ELEMENTS_TYPE) \
V(OBJECT_ELEMENTS_TYPE) \
diff --git a/deps/v8/src/heap/objects-visiting-inl.h b/deps/v8/src/heap/objects-visiting-inl.h
index ba0bfa2415..d4d6d9375c 100644
--- a/deps/v8/src/heap/objects-visiting-inl.h
+++ b/deps/v8/src/heap/objects-visiting-inl.h
@@ -38,7 +38,7 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit(Map map,
HeapObject object) {
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
switch (map.visitor_id()) {
-#define CASE(TypeName, Type) \
+#define CASE(TypeName) \
case kVisit##TypeName: \
return visitor->Visit##TypeName( \
map, ConcreteVisitor::template Cast<TypeName>(object));
@@ -77,10 +77,10 @@ void HeapVisitor<ResultType, ConcreteVisitor>::VisitMapPointer(
static_cast<ConcreteVisitor*>(this)->VisitPointer(host, host.map_slot());
}
-#define VISIT(TypeName, Type) \
+#define VISIT(TypeName) \
template <typename ResultType, typename ConcreteVisitor> \
ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit##TypeName( \
- Map map, Type object) { \
+ Map map, TypeName object) { \
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this); \
if (!visitor->ShouldVisit(object)) return ResultType(); \
if (!visitor->AllowDefaultJSObjectVisit()) { \
diff --git a/deps/v8/src/heap/objects-visiting.h b/deps/v8/src/heap/objects-visiting.h
index a5c291458f..0f972737d2 100644
--- a/deps/v8/src/heap/objects-visiting.h
+++ b/deps/v8/src/heap/objects-visiting.h
@@ -13,59 +13,58 @@
namespace v8 {
namespace internal {
-// TODO(jkummerow): Drop the duplication: V(x, x) -> V(x).
-#define TYPED_VISITOR_ID_LIST(V) \
- V(AllocationSite, AllocationSite) \
- V(BigInt, BigInt) \
- V(ByteArray, ByteArray) \
- V(BytecodeArray, BytecodeArray) \
- V(Cell, Cell) \
- V(Code, Code) \
- V(CodeDataContainer, CodeDataContainer) \
- V(ConsString, ConsString) \
- V(Context, Context) \
- V(DataHandler, DataHandler) \
- V(DescriptorArray, DescriptorArray) \
- V(EmbedderDataArray, EmbedderDataArray) \
- V(EphemeronHashTable, EphemeronHashTable) \
- V(FeedbackCell, FeedbackCell) \
- V(FeedbackVector, FeedbackVector) \
- V(FixedArray, FixedArray) \
- V(FixedDoubleArray, FixedDoubleArray) \
- V(JSArrayBuffer, JSArrayBuffer) \
- V(JSDataView, JSDataView) \
- V(JSFunction, JSFunction) \
- V(JSObject, JSObject) \
- V(JSTypedArray, JSTypedArray) \
- V(WeakCell, WeakCell) \
- V(JSWeakCollection, JSWeakCollection) \
- V(JSWeakRef, JSWeakRef) \
- V(Map, Map) \
- V(NativeContext, NativeContext) \
- V(Oddball, Oddball) \
- V(PreparseData, PreparseData) \
- V(PropertyArray, PropertyArray) \
- V(PropertyCell, PropertyCell) \
- V(PrototypeInfo, PrototypeInfo) \
- V(SeqOneByteString, SeqOneByteString) \
- V(SeqTwoByteString, SeqTwoByteString) \
- V(SharedFunctionInfo, SharedFunctionInfo) \
- V(SlicedString, SlicedString) \
- V(SmallOrderedHashMap, SmallOrderedHashMap) \
- V(SmallOrderedHashSet, SmallOrderedHashSet) \
- V(SmallOrderedNameDictionary, SmallOrderedNameDictionary) \
- V(SourceTextModule, SourceTextModule) \
- V(Symbol, Symbol) \
- V(SyntheticModule, SyntheticModule) \
- V(ThinString, ThinString) \
- V(TransitionArray, TransitionArray) \
- V(UncompiledDataWithoutPreparseData, UncompiledDataWithoutPreparseData) \
- V(UncompiledDataWithPreparseData, UncompiledDataWithPreparseData) \
- V(WasmCapiFunctionData, WasmCapiFunctionData) \
- V(WasmIndirectFunctionTable, WasmIndirectFunctionTable) \
- V(WasmInstanceObject, WasmInstanceObject)
-
-#define FORWARD_DECLARE(TypeName, Type) class Type;
+#define TYPED_VISITOR_ID_LIST(V) \
+ V(AllocationSite) \
+ V(BigInt) \
+ V(ByteArray) \
+ V(BytecodeArray) \
+ V(Cell) \
+ V(Code) \
+ V(CodeDataContainer) \
+ V(ConsString) \
+ V(Context) \
+ V(DataHandler) \
+ V(DescriptorArray) \
+ V(EmbedderDataArray) \
+ V(EphemeronHashTable) \
+ V(FeedbackCell) \
+ V(FeedbackVector) \
+ V(FixedArray) \
+ V(FixedDoubleArray) \
+ V(JSArrayBuffer) \
+ V(JSDataView) \
+ V(JSFunction) \
+ V(JSObject) \
+ V(JSTypedArray) \
+ V(WeakCell) \
+ V(JSWeakCollection) \
+ V(JSWeakRef) \
+ V(Map) \
+ V(NativeContext) \
+ V(Oddball) \
+ V(PreparseData) \
+ V(PropertyArray) \
+ V(PropertyCell) \
+ V(PrototypeInfo) \
+ V(SeqOneByteString) \
+ V(SeqTwoByteString) \
+ V(SharedFunctionInfo) \
+ V(SlicedString) \
+ V(SmallOrderedHashMap) \
+ V(SmallOrderedHashSet) \
+ V(SmallOrderedNameDictionary) \
+ V(SourceTextModule) \
+ V(Symbol) \
+ V(SyntheticModule) \
+ V(ThinString) \
+ V(TransitionArray) \
+ V(UncompiledDataWithoutPreparseData) \
+ V(UncompiledDataWithPreparseData) \
+ V(WasmCapiFunctionData) \
+ V(WasmIndirectFunctionTable) \
+ V(WasmInstanceObject)
+
+#define FORWARD_DECLARE(TypeName) class TypeName;
TYPED_VISITOR_ID_LIST(FORWARD_DECLARE)
#undef FORWARD_DECLARE
@@ -99,8 +98,8 @@ class HeapVisitor : public ObjectVisitor {
// in default Visit implemention for subclasses of JSObject.
V8_INLINE bool AllowDefaultJSObjectVisit() { return true; }
-#define VISIT(TypeName, Type) \
- V8_INLINE ResultType Visit##TypeName(Map map, Type object);
+#define VISIT(TypeName) \
+ V8_INLINE ResultType Visit##TypeName(Map map, TypeName object);
TYPED_VISITOR_ID_LIST(VISIT)
#undef VISIT
V8_INLINE ResultType VisitShortcutCandidate(Map map, ConsString object);
diff --git a/deps/v8/src/heap/remembered-set.h b/deps/v8/src/heap/remembered-set.h
index eefc565e00..3c8984c83a 100644
--- a/deps/v8/src/heap/remembered-set.h
+++ b/deps/v8/src/heap/remembered-set.h
@@ -5,6 +5,8 @@
#ifndef V8_HEAP_REMEMBERED_SET_H_
#define V8_HEAP_REMEMBERED_SET_H_
+#include <memory>
+
#include "src/base/memory.h"
#include "src/codegen/reloc-info.h"
#include "src/heap/heap.h"
@@ -16,54 +18,39 @@ namespace internal {
enum RememberedSetIterationMode { SYNCHRONIZED, NON_SYNCHRONIZED };
-// TODO(ulan): Investigate performance of de-templatizing this class.
-template <RememberedSetType type>
-class RememberedSet : public AllStatic {
+class RememberedSetOperations {
public:
// Given a page and a slot in that page, this function adds the slot to the
// remembered set.
- template <AccessMode access_mode = AccessMode::ATOMIC>
- static void Insert(MemoryChunk* chunk, Address slot_addr) {
+ template <AccessMode access_mode>
+ static void Insert(SlotSet* slot_set, MemoryChunk* chunk, Address slot_addr) {
DCHECK(chunk->Contains(slot_addr));
- SlotSet* slot_set = chunk->slot_set<type, access_mode>();
- if (slot_set == nullptr) {
- slot_set = chunk->AllocateSlotSet<type>();
- }
uintptr_t offset = slot_addr - chunk->address();
slot_set[offset / Page::kPageSize].Insert<access_mode>(offset %
Page::kPageSize);
}
- // Given a page and a slot in that page, this function returns true if
- // the remembered set contains the slot.
- static bool Contains(MemoryChunk* chunk, Address slot_addr) {
- DCHECK(chunk->Contains(slot_addr));
- SlotSet* slot_set = chunk->slot_set<type>();
- if (slot_set == nullptr) {
- return false;
+ template <typename Callback>
+ static void Iterate(SlotSet* slots, MemoryChunk* chunk, Callback callback,
+ SlotSet::EmptyBucketMode mode) {
+ if (slots != nullptr) {
+ size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize;
+ for (size_t page = 0; page < pages; page++) {
+ slots[page].Iterate(chunk->address() + page * Page::kPageSize, callback,
+ mode);
+ }
}
- uintptr_t offset = slot_addr - chunk->address();
- return slot_set[offset / Page::kPageSize].Contains(offset %
- Page::kPageSize);
}
- // Given a page and a slot in that page, this function removes the slot from
- // the remembered set.
- // If the slot was never added, then the function does nothing.
- static void Remove(MemoryChunk* chunk, Address slot_addr) {
- DCHECK(chunk->Contains(slot_addr));
- SlotSet* slot_set = chunk->slot_set<type>();
+ static void Remove(SlotSet* slot_set, MemoryChunk* chunk, Address slot_addr) {
if (slot_set != nullptr) {
uintptr_t offset = slot_addr - chunk->address();
slot_set[offset / Page::kPageSize].Remove(offset % Page::kPageSize);
}
}
- // Given a page and a range of slots in that page, this function removes the
- // slots from the remembered set.
- static void RemoveRange(MemoryChunk* chunk, Address start, Address end,
- SlotSet::EmptyBucketMode mode) {
- SlotSet* slot_set = chunk->slot_set<type>();
+ static void RemoveRange(SlotSet* slot_set, MemoryChunk* chunk, Address start,
+ Address end, SlotSet::EmptyBucketMode mode) {
if (slot_set != nullptr) {
uintptr_t start_offset = start - chunk->address();
uintptr_t end_offset = end - chunk->address();
@@ -99,6 +86,53 @@ class RememberedSet : public AllStatic {
}
}
}
+};
+
+// TODO(ulan): Investigate performance of de-templatizing this class.
+template <RememberedSetType type>
+class RememberedSet : public AllStatic {
+ public:
+ // Given a page and a slot in that page, this function adds the slot to the
+ // remembered set.
+ template <AccessMode access_mode>
+ static void Insert(MemoryChunk* chunk, Address slot_addr) {
+ DCHECK(chunk->Contains(slot_addr));
+ SlotSet* slot_set = chunk->slot_set<type, access_mode>();
+ if (slot_set == nullptr) {
+ slot_set = chunk->AllocateSlotSet<type>();
+ }
+ RememberedSetOperations::Insert<access_mode>(slot_set, chunk, slot_addr);
+ }
+
+ // Given a page and a slot in that page, this function returns true if
+ // the remembered set contains the slot.
+ static bool Contains(MemoryChunk* chunk, Address slot_addr) {
+ DCHECK(chunk->Contains(slot_addr));
+ SlotSet* slot_set = chunk->slot_set<type>();
+ if (slot_set == nullptr) {
+ return false;
+ }
+ uintptr_t offset = slot_addr - chunk->address();
+ return slot_set[offset / Page::kPageSize].Contains(offset %
+ Page::kPageSize);
+ }
+
+ // Given a page and a slot in that page, this function removes the slot from
+ // the remembered set.
+ // If the slot was never added, then the function does nothing.
+ static void Remove(MemoryChunk* chunk, Address slot_addr) {
+ DCHECK(chunk->Contains(slot_addr));
+ SlotSet* slot_set = chunk->slot_set<type>();
+ RememberedSetOperations::Remove(slot_set, chunk, slot_addr);
+ }
+
+ // Given a page and a range of slots in that page, this function removes the
+ // slots from the remembered set.
+ static void RemoveRange(MemoryChunk* chunk, Address start, Address end,
+ SlotSet::EmptyBucketMode mode) {
+ SlotSet* slot_set = chunk->slot_set<type>();
+ RememberedSetOperations::RemoveRange(slot_set, chunk, start, end, mode);
+ }
// Iterates and filters the remembered set with the given callback.
// The callback should take (Address slot) and return SlotCallbackResult.
@@ -120,8 +154,11 @@ class RememberedSet : public AllStatic {
MemoryChunk* chunk;
while ((chunk = it.next()) != nullptr) {
SlotSet* slots = chunk->slot_set<type>();
+ SlotSet* sweeping_slots =
+ type == OLD_TO_NEW ? chunk->sweeping_slot_set() : nullptr;
TypedSlotSet* typed_slots = chunk->typed_slot_set<type>();
- if (slots != nullptr || typed_slots != nullptr ||
+ if (slots != nullptr || sweeping_slots != nullptr ||
+ typed_slots != nullptr ||
chunk->invalidated_slots<type>() != nullptr) {
callback(chunk);
}
@@ -138,42 +175,7 @@ class RememberedSet : public AllStatic {
static void Iterate(MemoryChunk* chunk, Callback callback,
SlotSet::EmptyBucketMode mode) {
SlotSet* slots = chunk->slot_set<type>();
- if (slots != nullptr) {
- size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize;
- int new_count = 0;
- for (size_t page = 0; page < pages; page++) {
- new_count += slots[page].Iterate(callback, mode);
- }
- // Only old-to-old slot sets are released eagerly. Old-new-slot sets are
- // released by the sweeper threads.
- if (type == OLD_TO_OLD && new_count == 0) {
- chunk->ReleaseSlotSet<OLD_TO_OLD>();
- }
- }
- }
-
- static int NumberOfPreFreedEmptyBuckets(MemoryChunk* chunk) {
- DCHECK(type == OLD_TO_NEW);
- int result = 0;
- SlotSet* slots = chunk->slot_set<type>();
- if (slots != nullptr) {
- size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize;
- for (size_t page = 0; page < pages; page++) {
- result += slots[page].NumberOfPreFreedEmptyBuckets();
- }
- }
- return result;
- }
-
- static void PreFreeEmptyBuckets(MemoryChunk* chunk) {
- DCHECK(type == OLD_TO_NEW);
- SlotSet* slots = chunk->slot_set<type>();
- if (slots != nullptr) {
- size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize;
- for (size_t page = 0; page < pages; page++) {
- slots[page].PreFreeEmptyBuckets();
- }
- }
+ RememberedSetOperations::Iterate(slots, chunk, callback, mode);
}
static void FreeEmptyBuckets(MemoryChunk* chunk) {
@@ -183,7 +185,6 @@ class RememberedSet : public AllStatic {
size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize;
for (size_t page = 0; page < pages; page++) {
slots[page].FreeEmptyBuckets();
- slots[page].FreeToBeFreedBuckets();
}
}
}
@@ -217,7 +218,7 @@ class RememberedSet : public AllStatic {
return start <= slot_addr && slot_addr < end ? REMOVE_SLOT
: KEEP_SLOT;
},
- TypedSlotSet::PREFREE_EMPTY_CHUNKS);
+ TypedSlotSet::FREE_EMPTY_CHUNKS);
}
}
@@ -234,9 +235,9 @@ class RememberedSet : public AllStatic {
});
}
- // Iterates and filters typed old to old pointers in the given memory chunk
- // with the given callback. The callback should take (SlotType slot_type,
- // Address addr) and return SlotCallbackResult.
+ // Iterates and filters typed pointers in the given memory chunk with the
+ // given callback. The callback should take (SlotType slot_type, Address addr)
+ // and return SlotCallbackResult.
template <typename Callback>
static void IterateTyped(MemoryChunk* chunk, Callback callback) {
TypedSlotSet* slots = chunk->typed_slot_set<type>();
@@ -259,9 +260,6 @@ class RememberedSet : public AllStatic {
chunk->ReleaseInvalidatedSlots<OLD_TO_OLD>();
}
}
-
- private:
- static bool IsValidSlot(Heap* heap, MemoryChunk* chunk, ObjectSlot slot);
};
class UpdateTypedSlotHelper {
@@ -347,6 +345,46 @@ class UpdateTypedSlotHelper {
}
};
+class RememberedSetSweeping {
+ public:
+ template <AccessMode access_mode>
+ static void Insert(MemoryChunk* chunk, Address slot_addr) {
+ DCHECK(chunk->Contains(slot_addr));
+ SlotSet* slot_set = chunk->sweeping_slot_set<access_mode>();
+ if (slot_set == nullptr) {
+ slot_set = chunk->AllocateSweepingSlotSet();
+ }
+ RememberedSetOperations::Insert<access_mode>(slot_set, chunk, slot_addr);
+ }
+
+ static void Remove(MemoryChunk* chunk, Address slot_addr) {
+ DCHECK(chunk->Contains(slot_addr));
+ SlotSet* slot_set = chunk->sweeping_slot_set<AccessMode::ATOMIC>();
+ RememberedSetOperations::Remove(slot_set, chunk, slot_addr);
+ }
+
+ // Given a page and a range of slots in that page, this function removes the
+ // slots from the remembered set.
+ static void RemoveRange(MemoryChunk* chunk, Address start, Address end,
+ SlotSet::EmptyBucketMode mode) {
+ SlotSet* slot_set = chunk->sweeping_slot_set();
+ RememberedSetOperations::RemoveRange(slot_set, chunk, start, end, mode);
+ }
+
+ // Iterates and filters the remembered set in the given memory chunk with
+ // the given callback. The callback should take (Address slot) and return
+ // SlotCallbackResult.
+ //
+ // Notice that |mode| can only be of FREE* or PREFREE* if there are no other
+ // threads concurrently inserting slots.
+ template <typename Callback>
+ static void Iterate(MemoryChunk* chunk, Callback callback,
+ SlotSet::EmptyBucketMode mode) {
+ SlotSet* slots = chunk->sweeping_slot_set();
+ RememberedSetOperations::Iterate(slots, chunk, callback, mode);
+ }
+};
+
inline SlotType SlotTypeForRelocInfoMode(RelocInfo::Mode rmode) {
if (RelocInfo::IsCodeTargetMode(rmode)) {
return CODE_TARGET_SLOT;
diff --git a/deps/v8/src/heap/scavenge-job.cc b/deps/v8/src/heap/scavenge-job.cc
index 273866d5e4..3730bfeecb 100644
--- a/deps/v8/src/heap/scavenge-job.cc
+++ b/deps/v8/src/heap/scavenge-job.cc
@@ -108,7 +108,7 @@ void ScavengeJob::ScheduleIdleTask(Heap* heap) {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap->isolate());
if (V8::GetCurrentPlatform()->IdleTasksEnabled(isolate)) {
idle_task_pending_ = true;
- auto task = base::make_unique<IdleTask>(heap->isolate(), this);
+ auto task = std::make_unique<IdleTask>(heap->isolate(), this);
V8::GetCurrentPlatform()->GetForegroundTaskRunner(isolate)->PostIdleTask(
std::move(task));
}
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
index 7d56882953..47c19d4fcc 100644
--- a/deps/v8/src/heap/scavenger.cc
+++ b/deps/v8/src/heap/scavenger.cc
@@ -153,8 +153,17 @@ class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
if (result == KEEP_SLOT) {
SLOW_DCHECK(target.IsHeapObject());
- RememberedSet<OLD_TO_NEW>::Insert(MemoryChunk::FromHeapObject(host),
- slot.address());
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(host);
+
+ // Sweeper is stopped during scavenge, so we can directly
+ // insert into its remembered set here.
+ if (chunk->sweeping_slot_set()) {
+ RememberedSetSweeping::Insert<AccessMode::ATOMIC>(chunk,
+ slot.address());
+ } else {
+ RememberedSet<OLD_TO_NEW>::Insert<AccessMode::ATOMIC>(chunk,
+ slot.address());
+ }
}
SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(
HeapObject::cast(target)));
@@ -165,8 +174,8 @@ class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
// We cannot call MarkCompactCollector::RecordSlot because that checks
// that the host page is not in young generation, which does not hold
// for pending large pages.
- RememberedSet<OLD_TO_OLD>::Insert(MemoryChunk::FromHeapObject(host),
- slot.address());
+ RememberedSet<OLD_TO_OLD>::Insert<AccessMode::ATOMIC>(
+ MemoryChunk::FromHeapObject(host), slot.address());
}
}
@@ -239,8 +248,10 @@ void ScavengerCollector::CollectGarbage() {
// access to the slots of a page and can completely avoid any locks on
// the page itself.
Sweeper::FilterSweepingPagesScope filter_scope(sweeper, pause_scope);
- filter_scope.FilterOldSpaceSweepingPages(
- [](Page* page) { return !page->ContainsSlots<OLD_TO_NEW>(); });
+ filter_scope.FilterOldSpaceSweepingPages([](Page* page) {
+ return !page->ContainsSlots<OLD_TO_NEW>() && !page->sweeping_slot_set();
+ });
+
RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
heap_, [&job](MemoryChunk* chunk) {
job.AddItem(new PageScavengingItem(chunk));
@@ -335,11 +346,7 @@ void ScavengerCollector::CollectGarbage() {
heap_->new_lo_space()->FreeDeadObjects([](HeapObject) { return true; });
RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(heap_, [](MemoryChunk* chunk) {
- if (chunk->SweepingDone()) {
- RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
- } else {
- RememberedSet<OLD_TO_NEW>::PreFreeEmptyBuckets(chunk);
- }
+ RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
});
// Update how much has survived scavenge.
@@ -430,16 +437,45 @@ void Scavenger::AddPageToSweeperIfNecessary(MemoryChunk* page) {
}
}
+// Remove this crashkey after chromium:1010312 is fixed.
+class ScopedFullHeapCrashKey {
+ public:
+ explicit ScopedFullHeapCrashKey(Isolate* isolate) : isolate_(isolate) {
+ isolate_->AddCrashKey(v8::CrashKeyId::kDumpType, "heap");
+ }
+ ~ScopedFullHeapCrashKey() {
+ isolate_->AddCrashKey(v8::CrashKeyId::kDumpType, "");
+ }
+
+ private:
+ Isolate* isolate_ = nullptr;
+};
+
void Scavenger::ScavengePage(MemoryChunk* page) {
+ ScopedFullHeapCrashKey collect_full_heap_dump_if_crash(heap_->isolate());
CodePageMemoryModificationScope memory_modification_scope(page);
+ InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(page);
RememberedSet<OLD_TO_NEW>::Iterate(
page,
- [this](MaybeObjectSlot addr) {
- return CheckAndScavengeObject(heap_, addr);
+ [this, &filter](MaybeObjectSlot slot) {
+ if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
+ return CheckAndScavengeObject(heap_, slot);
+ },
+ SlotSet::KEEP_EMPTY_BUCKETS);
+ filter = InvalidatedSlotsFilter::OldToNew(page);
+ RememberedSetSweeping::Iterate(
+ page,
+ [this, &filter](MaybeObjectSlot slot) {
+ if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
+ return CheckAndScavengeObject(heap_, slot);
},
SlotSet::KEEP_EMPTY_BUCKETS);
- DCHECK_NULL(page->invalidated_slots<OLD_TO_NEW>());
+ if (page->invalidated_slots<OLD_TO_NEW>() != nullptr) {
+ // The invalidated slots are not needed after old-to-new slots were
+ // processed.
+ page->ReleaseInvalidatedSlots<OLD_TO_NEW>();
+ }
RememberedSet<OLD_TO_NEW>::IterateTyped(
page, [=](SlotType type, Address addr) {
diff --git a/deps/v8/src/heap/setup-heap-internal.cc b/deps/v8/src/heap/setup-heap-internal.cc
index 15ca6d7930..9f94029af3 100644
--- a/deps/v8/src/heap/setup-heap-internal.cc
+++ b/deps/v8/src/heap/setup-heap-internal.cc
@@ -7,6 +7,7 @@
#include "src/builtins/accessors.h"
#include "src/codegen/compilation-cache.h"
#include "src/execution/isolate.h"
+#include "src/execution/protectors.h"
#include "src/heap/factory.h"
#include "src/heap/heap-inl.h"
#include "src/ic/handler-configuration.h"
@@ -616,17 +617,17 @@ void Heap::CreateInitialObjects() {
// The -0 value must be set before NewNumber works.
set_minus_zero_value(
- *factory->NewHeapNumber(-0.0, AllocationType::kReadOnly));
+ *factory->NewHeapNumber<AllocationType::kReadOnly>(-0.0));
DCHECK(std::signbit(roots.minus_zero_value().Number()));
- set_nan_value(*factory->NewHeapNumber(
- std::numeric_limits<double>::quiet_NaN(), AllocationType::kReadOnly));
- set_hole_nan_value(*factory->NewHeapNumberFromBits(
- kHoleNanInt64, AllocationType::kReadOnly));
+ set_nan_value(*factory->NewHeapNumber<AllocationType::kReadOnly>(
+ std::numeric_limits<double>::quiet_NaN()));
+ set_hole_nan_value(*factory->NewHeapNumberFromBits<AllocationType::kReadOnly>(
+ kHoleNanInt64));
set_infinity_value(
- *factory->NewHeapNumber(V8_INFINITY, AllocationType::kReadOnly));
+ *factory->NewHeapNumber<AllocationType::kReadOnly>(V8_INFINITY));
set_minus_infinity_value(
- *factory->NewHeapNumber(-V8_INFINITY, AllocationType::kReadOnly));
+ *factory->NewHeapNumber<AllocationType::kReadOnly>(-V8_INFINITY));
set_hash_seed(*factory->NewByteArray(kInt64Size, AllocationType::kReadOnly));
InitializeHashSeed();
@@ -704,8 +705,7 @@ void Heap::CreateInitialObjects() {
Oddball::kStaleRegister));
// Initialize the self-reference marker.
- set_self_reference_marker(
- *factory->NewSelfReferenceMarker(AllocationType::kReadOnly));
+ set_self_reference_marker(*factory->NewSelfReferenceMarker());
set_interpreter_entry_trampoline_for_profiling(roots.undefined_value());
@@ -781,13 +781,13 @@ void Heap::CreateInitialObjects() {
set_feedback_vectors_for_profiling_tools(roots.undefined_value());
set_pending_optimize_for_test_bytecode(roots.undefined_value());
+ set_shared_wasm_memories(roots.empty_weak_array_list());
set_script_list(roots.empty_weak_array_list());
Handle<NumberDictionary> slow_element_dictionary = NumberDictionary::New(
isolate(), 1, AllocationType::kReadOnly, USE_CUSTOM_MINIMUM_CAPACITY);
DCHECK(!slow_element_dictionary->HasSufficientCapacityToAdd(1));
- slow_element_dictionary->set_requires_slow_elements();
set_empty_slow_element_dictionary(*slow_element_dictionary);
set_materialized_objects(*factory->NewFixedArray(0, AllocationType::kOld));
@@ -839,76 +839,122 @@ void Heap::CreateInitialObjects() {
script->set_origin_options(ScriptOriginOptions(true, false));
set_empty_script(*script);
- Handle<Cell> array_constructor_cell = factory->NewCell(
- handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
- set_array_constructor_protector(*array_constructor_cell);
+ {
+ Handle<PropertyCell> cell = factory->NewPropertyCell(
+ factory->empty_string(), AllocationType::kReadOnly);
+ cell->set_value(roots.the_hole_value());
+ set_empty_property_cell(*cell);
+ }
- Handle<PropertyCell> cell = factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
- set_no_elements_protector(*cell);
+ // Protectors
+ {
+ Handle<PropertyCell> cell =
+ factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
+ set_array_constructor_protector(*cell);
+ }
- cell = factory->NewPropertyCell(factory->empty_string(),
- AllocationType::kReadOnly);
- cell->set_value(roots.the_hole_value());
- set_empty_property_cell(*cell);
+ {
+ Handle<PropertyCell> cell =
+ factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
+ set_no_elements_protector(*cell);
+ }
- cell = factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
- set_array_iterator_protector(*cell);
+ {
+ Handle<PropertyCell> cell =
+ factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
+ set_array_iterator_protector(*cell);
+ }
- cell = factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
- set_map_iterator_protector(*cell);
+ {
+ Handle<PropertyCell> cell =
+ factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
+ set_map_iterator_protector(*cell);
+ }
- cell = factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
- set_set_iterator_protector(*cell);
+ {
+ Handle<PropertyCell> cell =
+ factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
+ set_set_iterator_protector(*cell);
+ }
- Handle<Cell> is_concat_spreadable_cell = factory->NewCell(
- handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
- set_is_concat_spreadable_protector(*is_concat_spreadable_cell);
+ {
+ Handle<PropertyCell> cell =
+ factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
+ set_is_concat_spreadable_protector(*cell);
+ }
- cell = factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
- set_array_species_protector(*cell);
+ {
+ Handle<PropertyCell> cell =
+ factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
+ set_array_species_protector(*cell);
+ }
- cell = factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
- set_typed_array_species_protector(*cell);
+ {
+ Handle<PropertyCell> cell =
+ factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
+ set_typed_array_species_protector(*cell);
+ }
- cell = factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
- set_promise_species_protector(*cell);
+ {
+ Handle<PropertyCell> cell =
+ factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
+ set_promise_species_protector(*cell);
+ }
- cell = factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
- set_string_iterator_protector(*cell);
+ {
+ Handle<PropertyCell> cell =
+ factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
+ set_string_iterator_protector(*cell);
+ }
- Handle<Cell> string_length_overflow_cell = factory->NewCell(
- handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
- set_string_length_protector(*string_length_overflow_cell);
+ {
+ Handle<PropertyCell> cell =
+ factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
+ set_string_length_protector(*cell);
+ }
- cell = factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
- set_array_buffer_detaching_protector(*cell);
+ {
+ Handle<PropertyCell> cell =
+ factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
+ set_array_buffer_detaching_protector(*cell);
+ }
- cell = factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
- set_promise_hook_protector(*cell);
+ {
+ Handle<PropertyCell> cell =
+ factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
+ set_promise_hook_protector(*cell);
+ }
- Handle<Cell> promise_resolve_cell = factory->NewCell(
- handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
- set_promise_resolve_protector(*promise_resolve_cell);
+ {
+ Handle<PropertyCell> cell =
+ factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
+ set_promise_resolve_protector(*cell);
+ }
- cell = factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
- set_promise_then_protector(*cell);
+ {
+ Handle<PropertyCell> cell =
+ factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
+ set_promise_then_protector(*cell);
+ }
set_serialized_objects(roots.empty_fixed_array());
set_serialized_global_proxy_sizes(roots.empty_fixed_array());
- set_noscript_shared_function_infos(roots.empty_weak_array_list());
-
/* Canonical off-heap trampoline data */
set_off_heap_trampoline_relocation_info(
*Builtins::GenerateOffHeapTrampolineRelocInfo(isolate_));
diff --git a/deps/v8/src/heap/slot-set.cc b/deps/v8/src/heap/slot-set.cc
index 12cf6bab5a..92540574a0 100644
--- a/deps/v8/src/heap/slot-set.cc
+++ b/deps/v8/src/heap/slot-set.cc
@@ -11,7 +11,6 @@ TypedSlots::~TypedSlots() {
Chunk* chunk = head_;
while (chunk != nullptr) {
Chunk* next = chunk->next;
- delete[] chunk->buffer;
delete chunk;
chunk = next;
}
@@ -22,9 +21,8 @@ TypedSlots::~TypedSlots() {
void TypedSlots::Insert(SlotType type, uint32_t offset) {
TypedSlot slot = {TypeField::encode(type) | OffsetField::encode(offset)};
Chunk* chunk = EnsureChunk();
- DCHECK_LT(chunk->count, chunk->capacity);
- chunk->buffer[chunk->count] = slot;
- ++chunk->count;
+ DCHECK_LT(chunk->buffer.size(), chunk->buffer.capacity());
+ chunk->buffer.push_back(slot);
}
void TypedSlots::Merge(TypedSlots* other) {
@@ -46,37 +44,25 @@ TypedSlots::Chunk* TypedSlots::EnsureChunk() {
if (!head_) {
head_ = tail_ = NewChunk(nullptr, kInitialBufferSize);
}
- if (head_->count == head_->capacity) {
- head_ = NewChunk(head_, NextCapacity(head_->capacity));
+ if (head_->buffer.size() == head_->buffer.capacity()) {
+ head_ = NewChunk(head_, NextCapacity(head_->buffer.capacity()));
}
return head_;
}
-TypedSlots::Chunk* TypedSlots::NewChunk(Chunk* next, int capacity) {
+TypedSlots::Chunk* TypedSlots::NewChunk(Chunk* next, size_t capacity) {
Chunk* chunk = new Chunk;
chunk->next = next;
- chunk->buffer = new TypedSlot[capacity];
- chunk->capacity = capacity;
- chunk->count = 0;
+ chunk->buffer.reserve(capacity);
+ DCHECK_EQ(chunk->buffer.capacity(), capacity);
return chunk;
}
-TypedSlotSet::~TypedSlotSet() { FreeToBeFreedChunks(); }
-
-void TypedSlotSet::FreeToBeFreedChunks() {
- base::MutexGuard guard(&to_be_freed_chunks_mutex_);
- std::stack<std::unique_ptr<Chunk>> empty;
- to_be_freed_chunks_.swap(empty);
-}
-
void TypedSlotSet::ClearInvalidSlots(
const std::map<uint32_t, uint32_t>& invalid_ranges) {
Chunk* chunk = LoadHead();
while (chunk != nullptr) {
- TypedSlot* buffer = chunk->buffer;
- int count = chunk->count;
- for (int i = 0; i < count; i++) {
- TypedSlot slot = LoadTypedSlot(buffer + i);
+ for (TypedSlot& slot : chunk->buffer) {
SlotType type = TypeField::decode(slot.type_and_offset);
if (type == CLEARED_SLOT) continue;
uint32_t offset = OffsetField::decode(slot.type_and_offset);
@@ -88,7 +74,7 @@ void TypedSlotSet::ClearInvalidSlots(
upper_bound--;
DCHECK_LE(upper_bound->first, offset);
if (upper_bound->second > offset) {
- ClearTypedSlot(buffer + i);
+ slot = ClearedTypedSlot();
}
}
chunk = LoadNext(chunk);
diff --git a/deps/v8/src/heap/slot-set.h b/deps/v8/src/heap/slot-set.h
index c71192bfdc..b1321b6fca 100644
--- a/deps/v8/src/heap/slot-set.h
+++ b/deps/v8/src/heap/slot-set.h
@@ -6,6 +6,7 @@
#define V8_HEAP_SLOT_SET_H_
#include <map>
+#include <memory>
#include <stack>
#include "src/base/atomic-utils.h"
@@ -21,19 +22,15 @@ namespace internal {
enum SlotCallbackResult { KEEP_SLOT, REMOVE_SLOT };
// Data structure for maintaining a set of slots in a standard (non-large)
-// page. The base address of the page must be set with SetPageStart before any
-// operation.
+// page.
// The data structure assumes that the slots are pointer size aligned and
// splits the valid slot offset range into kBuckets buckets.
// Each bucket is a bitmap with a bit corresponding to a single slot offset.
class SlotSet : public Malloced {
public:
enum EmptyBucketMode {
- FREE_EMPTY_BUCKETS, // An empty bucket will be deallocated immediately.
- PREFREE_EMPTY_BUCKETS, // An empty bucket will be unlinked from the slot
- // set, but deallocated on demand by a sweeper
- // thread.
- KEEP_EMPTY_BUCKETS // An empty bucket will be kept.
+ FREE_EMPTY_BUCKETS, // An empty bucket will be deallocated immediately.
+ KEEP_EMPTY_BUCKETS // An empty bucket will be kept.
};
SlotSet() {
@@ -46,15 +43,12 @@ class SlotSet : public Malloced {
for (int i = 0; i < kBuckets; i++) {
ReleaseBucket(i);
}
- FreeToBeFreedBuckets();
}
- void SetPageStart(Address page_start) { page_start_ = page_start; }
-
// The slot offset specifies a slot at address page_start_ + slot_offset.
// AccessMode defines whether there can be concurrent access on the buckets
// or not.
- template <AccessMode access_mode = AccessMode::ATOMIC>
+ template <AccessMode access_mode>
void Insert(int slot_offset) {
int bucket_index, cell_index, bit_index;
SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
@@ -138,9 +132,7 @@ class SlotSet : public Malloced {
DCHECK(current_bucket == end_bucket ||
(current_bucket < end_bucket && current_cell == 0));
while (current_bucket < end_bucket) {
- if (mode == PREFREE_EMPTY_BUCKETS) {
- PreFreeEmptyBucket(current_bucket);
- } else if (mode == FREE_EMPTY_BUCKETS) {
+ if (mode == FREE_EMPTY_BUCKETS) {
ReleaseBucket(current_bucket);
} else {
DCHECK(mode == KEEP_EMPTY_BUCKETS);
@@ -152,11 +144,11 @@ class SlotSet : public Malloced {
current_bucket++;
}
// All buckets between start_bucket and end_bucket are cleared.
+ DCHECK(current_bucket == end_bucket);
+ if (current_bucket == kBuckets) return;
bucket = LoadBucket(&buckets_[current_bucket]);
- DCHECK(current_bucket == end_bucket && current_cell <= end_cell);
- if (current_bucket == kBuckets || bucket == nullptr) {
- return;
- }
+ DCHECK(current_cell <= end_cell);
+ if (bucket == nullptr) return;
while (current_cell < end_cell) {
StoreCell(&bucket[current_cell], 0);
current_cell++;
@@ -189,7 +181,7 @@ class SlotSet : public Malloced {
// else return REMOVE_SLOT;
// });
template <typename Callback>
- int Iterate(Callback callback, EmptyBucketMode mode) {
+ int Iterate(Address page_start, Callback callback, EmptyBucketMode mode) {
int new_count = 0;
for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) {
Bucket bucket = LoadBucket(&buckets_[bucket_index]);
@@ -205,7 +197,7 @@ class SlotSet : public Malloced {
int bit_offset = base::bits::CountTrailingZeros(cell);
uint32_t bit_mask = 1u << bit_offset;
uint32_t slot = (cell_offset + bit_offset) << kTaggedSizeLog2;
- if (callback(MaybeObjectSlot(page_start_ + slot)) == KEEP_SLOT) {
+ if (callback(MaybeObjectSlot(page_start + slot)) == KEEP_SLOT) {
++in_bucket_count;
} else {
mask |= bit_mask;
@@ -218,31 +210,12 @@ class SlotSet : public Malloced {
}
}
}
- if (mode == PREFREE_EMPTY_BUCKETS && in_bucket_count == 0) {
- PreFreeEmptyBucket(bucket_index);
- }
new_count += in_bucket_count;
}
}
return new_count;
}
- int NumberOfPreFreedEmptyBuckets() {
- base::MutexGuard guard(&to_be_freed_buckets_mutex_);
- return static_cast<int>(to_be_freed_buckets_.size());
- }
-
- void PreFreeEmptyBuckets() {
- for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) {
- Bucket bucket = LoadBucket(&buckets_[bucket_index]);
- if (bucket != nullptr) {
- if (IsEmptyBucket(bucket)) {
- PreFreeEmptyBucket(bucket_index);
- }
- }
- }
- }
-
void FreeEmptyBuckets() {
for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) {
Bucket bucket = LoadBucket(&buckets_[bucket_index]);
@@ -254,27 +227,22 @@ class SlotSet : public Malloced {
}
}
- void FreeToBeFreedBuckets() {
- base::MutexGuard guard(&to_be_freed_buckets_mutex_);
- while (!to_be_freed_buckets_.empty()) {
- Bucket top = to_be_freed_buckets_.top();
- to_be_freed_buckets_.pop();
- DeleteArray<uint32_t>(top);
- }
- DCHECK_EQ(0u, to_be_freed_buckets_.size());
- }
-
- private:
- using Bucket = uint32_t*;
static const int kMaxSlots = (1 << kPageSizeBits) / kTaggedSize;
static const int kCellsPerBucket = 32;
static const int kCellsPerBucketLog2 = 5;
+ static const int kCellSizeBytesLog2 = 2;
+ static const int kCellSizeBytes = 1 << kCellSizeBytesLog2;
static const int kBitsPerCell = 32;
static const int kBitsPerCellLog2 = 5;
static const int kBitsPerBucket = kCellsPerBucket * kBitsPerCell;
static const int kBitsPerBucketLog2 = kCellsPerBucketLog2 + kBitsPerCellLog2;
static const int kBuckets = kMaxSlots / kCellsPerBucket / kBitsPerCell;
+ static const int kSize = kBuckets * kSystemPointerSize;
+
+ using Bucket = uint32_t*;
+
+ private:
Bucket AllocateBucket() {
Bucket result = NewArray<uint32_t>(kCellsPerBucket);
for (int i = 0; i < kCellsPerBucket; i++) {
@@ -293,15 +261,6 @@ class SlotSet : public Malloced {
}
}
- void PreFreeEmptyBucket(int bucket_index) {
- Bucket bucket = LoadBucket(&buckets_[bucket_index]);
- if (bucket != nullptr) {
- base::MutexGuard guard(&to_be_freed_buckets_mutex_);
- to_be_freed_buckets_.push(bucket);
- StoreBucket(&buckets_[bucket_index], nullptr);
- }
- }
-
void ReleaseBucket(int bucket_index) {
Bucket bucket = LoadBucket(&buckets_[bucket_index]);
StoreBucket(&buckets_[bucket_index], nullptr);
@@ -381,11 +340,11 @@ class SlotSet : public Malloced {
}
Bucket buckets_[kBuckets];
- Address page_start_;
- base::Mutex to_be_freed_buckets_mutex_;
- std::stack<uint32_t*> to_be_freed_buckets_;
};
+STATIC_ASSERT(std::is_standard_layout<SlotSet>::value);
+STATIC_ASSERT(sizeof(SlotSet) == SlotSet::kSize);
+
enum SlotType {
FULL_EMBEDDED_OBJECT_SLOT,
COMPRESSED_EMBEDDED_OBJECT_SLOT,
@@ -396,9 +355,9 @@ enum SlotType {
};
// Data structure for maintaining a list of typed slots in a page.
-// Typed slots can only appear in Code and JSFunction objects, so
+// Typed slots can only appear in Code objects, so
// the maximum possible offset is limited by the LargePage::kMaxCodePageSize.
-// The implementation is a chain of chunks, where each chunks is an array of
+// The implementation is a chain of chunks, where each chunk is an array of
// encoded (slot type, slot offset) pairs.
// There is no duplicate detection and we do not expect many duplicates because
// typed slots contain V8 internal pointers that are not directly exposed to JS.
@@ -418,17 +377,15 @@ class V8_EXPORT_PRIVATE TypedSlots {
};
struct Chunk {
Chunk* next;
- TypedSlot* buffer;
- int32_t capacity;
- int32_t count;
+ std::vector<TypedSlot> buffer;
};
- static const int kInitialBufferSize = 100;
- static const int kMaxBufferSize = 16 * KB;
- static int NextCapacity(int capacity) {
+ static const size_t kInitialBufferSize = 100;
+ static const size_t kMaxBufferSize = 16 * KB;
+ static size_t NextCapacity(size_t capacity) {
return Min(kMaxBufferSize, capacity * 2);
}
Chunk* EnsureChunk();
- Chunk* NewChunk(Chunk* next, int capacity);
+ Chunk* NewChunk(Chunk* next, size_t capacity);
Chunk* head_ = nullptr;
Chunk* tail_ = nullptr;
};
@@ -437,15 +394,10 @@ class V8_EXPORT_PRIVATE TypedSlots {
// clearing of invalid slots.
class V8_EXPORT_PRIVATE TypedSlotSet : public TypedSlots {
public:
- // The PREFREE_EMPTY_CHUNKS indicates that chunks detected as empty
- // during the iteration are queued in to_be_freed_chunks_, which are
- // then freed in FreeToBeFreedChunks.
- enum IterationMode { PREFREE_EMPTY_CHUNKS, KEEP_EMPTY_CHUNKS };
+ enum IterationMode { FREE_EMPTY_CHUNKS, KEEP_EMPTY_CHUNKS };
explicit TypedSlotSet(Address page_start) : page_start_(page_start) {}
- ~TypedSlotSet() override;
-
// Iterate over all slots in the set and for each slot invoke the callback.
// If the callback returns REMOVE_SLOT then the slot is removed from the set.
// Returns the new number of slots.
@@ -463,11 +415,8 @@ class V8_EXPORT_PRIVATE TypedSlotSet : public TypedSlots {
Chunk* previous = nullptr;
int new_count = 0;
while (chunk != nullptr) {
- TypedSlot* buffer = chunk->buffer;
- int count = chunk->count;
bool empty = true;
- for (int i = 0; i < count; i++) {
- TypedSlot slot = LoadTypedSlot(buffer + i);
+ for (TypedSlot& slot : chunk->buffer) {
SlotType type = TypeField::decode(slot.type_and_offset);
if (type != CLEARED_SLOT) {
uint32_t offset = OffsetField::decode(slot.type_and_offset);
@@ -476,12 +425,12 @@ class V8_EXPORT_PRIVATE TypedSlotSet : public TypedSlots {
new_count++;
empty = false;
} else {
- ClearTypedSlot(buffer + i);
+ slot = ClearedTypedSlot();
}
}
}
Chunk* next = chunk->next;
- if (mode == PREFREE_EMPTY_CHUNKS && empty) {
+ if (mode == FREE_EMPTY_CHUNKS && empty) {
// We remove the chunk from the list but let it still point its next
// chunk to allow concurrent iteration.
if (previous) {
@@ -489,8 +438,8 @@ class V8_EXPORT_PRIVATE TypedSlotSet : public TypedSlots {
} else {
StoreHead(next);
}
- base::MutexGuard guard(&to_be_freed_chunks_mutex_);
- to_be_freed_chunks_.push(std::unique_ptr<Chunk>(chunk));
+
+ delete chunk;
} else {
previous = chunk;
}
@@ -518,19 +467,11 @@ class V8_EXPORT_PRIVATE TypedSlotSet : public TypedSlots {
void StoreHead(Chunk* chunk) {
base::AsAtomicPointer::Relaxed_Store(&head_, chunk);
}
- TypedSlot LoadTypedSlot(TypedSlot* slot) {
- return TypedSlot{base::AsAtomic32::Relaxed_Load(&slot->type_and_offset)};
- }
- void ClearTypedSlot(TypedSlot* slot) {
- // Order is important here and should match that of LoadTypedSlot.
- base::AsAtomic32::Relaxed_Store(
- &slot->type_and_offset,
- TypeField::encode(CLEARED_SLOT) | OffsetField::encode(0));
+ static TypedSlot ClearedTypedSlot() {
+ return TypedSlot{TypeField::encode(CLEARED_SLOT) | OffsetField::encode(0)};
}
Address page_start_;
- base::Mutex to_be_freed_chunks_mutex_;
- std::stack<std::unique_ptr<Chunk>> to_be_freed_chunks_;
};
} // namespace internal
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index dd8ba30101..2c5d5c298d 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -11,14 +11,14 @@
#include "src/base/lsan.h"
#include "src/base/macros.h"
#include "src/base/platform/semaphore.h"
-#include "src/base/template-utils.h"
#include "src/execution/vm-state-inl.h"
-#include "src/heap/array-buffer-tracker.h"
+#include "src/heap/array-buffer-tracker-inl.h"
#include "src/heap/combined-heap.h"
#include "src/heap/concurrent-marking.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-controller.h"
#include "src/heap/incremental-marking-inl.h"
+#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/mark-compact.h"
#include "src/heap/read-only-heap.h"
#include "src/heap/remembered-set.h"
@@ -220,7 +220,7 @@ void MemoryAllocator::InitializeCodePageAllocator(
requested));
heap_reservation_ = std::move(reservation);
- code_page_allocator_instance_ = base::make_unique<base::BoundedPageAllocator>(
+ code_page_allocator_instance_ = std::make_unique<base::BoundedPageAllocator>(
page_allocator, aligned_base, size,
static_cast<size_t>(MemoryChunk::kAlignment));
code_page_allocator_ = code_page_allocator_instance_.get();
@@ -286,7 +286,7 @@ void MemoryAllocator::Unmapper::FreeQueuedChunks() {
}
return;
}
- auto task = base::make_unique<UnmapFreeMemoryTask>(heap_->isolate(), this);
+ auto task = std::make_unique<UnmapFreeMemoryTask>(heap_->isolate(), this);
if (FLAG_trace_unmapper) {
PrintIsolate(heap_->isolate(),
"Unmapper::FreeQueuedChunks: new task id=%" PRIu64 "\n",
@@ -699,6 +699,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->InitializeReservedMemory();
base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_NEW], nullptr);
base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_OLD], nullptr);
+ base::AsAtomicPointer::Release_Store(&chunk->sweeping_slot_set_, nullptr);
base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_NEW],
nullptr);
base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_OLD],
@@ -856,6 +857,33 @@ Page* Page::ConvertNewToOld(Page* old_page) {
return new_page;
}
+void Page::MoveOldToNewRememberedSetForSweeping() {
+ CHECK_NULL(sweeping_slot_set_);
+ sweeping_slot_set_ = slot_set_[OLD_TO_NEW];
+ slot_set_[OLD_TO_NEW] = nullptr;
+}
+
+void Page::MergeOldToNewRememberedSets() {
+ if (sweeping_slot_set_ == nullptr) return;
+
+ RememberedSet<OLD_TO_NEW>::Iterate(
+ this,
+ [this](MaybeObjectSlot slot) {
+ Address address = slot.address();
+ RememberedSetSweeping::Insert<AccessMode::NON_ATOMIC>(this, address);
+ return KEEP_SLOT;
+ },
+ SlotSet::KEEP_EMPTY_BUCKETS);
+
+ if (slot_set_[OLD_TO_NEW]) {
+ ReleaseSlotSet<OLD_TO_NEW>();
+ }
+
+ CHECK_NULL(slot_set_[OLD_TO_NEW]);
+ slot_set_[OLD_TO_NEW] = sweeping_slot_set_;
+ sweeping_slot_set_ = nullptr;
+}
+
size_t MemoryChunk::CommittedPhysicalMemory() {
if (!base::OS::HasLazyCommits() || owner_identity() == LO_SPACE)
return size();
@@ -1376,6 +1404,7 @@ void MemoryChunk::ReleaseAllocatedMemoryNeededForWritableChunk() {
}
ReleaseSlotSet<OLD_TO_NEW>();
+ ReleaseSlotSet(&sweeping_slot_set_);
ReleaseSlotSet<OLD_TO_OLD>();
ReleaseTypedSlotSet<OLD_TO_NEW>();
ReleaseTypedSlotSet<OLD_TO_OLD>();
@@ -1399,11 +1428,7 @@ void MemoryChunk::ReleaseAllAllocatedMemory() {
static SlotSet* AllocateAndInitializeSlotSet(size_t size, Address page_start) {
size_t pages = (size + Page::kPageSize - 1) / Page::kPageSize;
DCHECK_LT(0, pages);
- SlotSet* slot_set = new SlotSet[pages];
- for (size_t i = 0; i < pages; i++) {
- slot_set[i].SetPageStart(page_start + i * Page::kPageSize);
- }
- return slot_set;
+ return new SlotSet[pages];
}
template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_NEW>();
@@ -1411,15 +1436,23 @@ template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_OLD>();
template <RememberedSetType type>
SlotSet* MemoryChunk::AllocateSlotSet() {
- SlotSet* slot_set = AllocateAndInitializeSlotSet(size(), address());
+ return AllocateSlotSet(&slot_set_[type]);
+}
+
+SlotSet* MemoryChunk::AllocateSweepingSlotSet() {
+ return AllocateSlotSet(&sweeping_slot_set_);
+}
+
+SlotSet* MemoryChunk::AllocateSlotSet(SlotSet** slot_set) {
+ SlotSet* new_slot_set = AllocateAndInitializeSlotSet(size(), address());
SlotSet* old_slot_set = base::AsAtomicPointer::Release_CompareAndSwap(
- &slot_set_[type], nullptr, slot_set);
+ slot_set, nullptr, new_slot_set);
if (old_slot_set != nullptr) {
- delete[] slot_set;
- slot_set = old_slot_set;
+ delete[] new_slot_set;
+ new_slot_set = old_slot_set;
}
- DCHECK(slot_set);
- return slot_set;
+ DCHECK(new_slot_set);
+ return new_slot_set;
}
template void MemoryChunk::ReleaseSlotSet<OLD_TO_NEW>();
@@ -1427,10 +1460,13 @@ template void MemoryChunk::ReleaseSlotSet<OLD_TO_OLD>();
template <RememberedSetType type>
void MemoryChunk::ReleaseSlotSet() {
- SlotSet* slot_set = slot_set_[type];
- if (slot_set) {
- slot_set_[type] = nullptr;
- delete[] slot_set;
+ ReleaseSlotSet(&slot_set_[type]);
+}
+
+void MemoryChunk::ReleaseSlotSet(SlotSet** slot_set) {
+ if (*slot_set) {
+ delete[] * slot_set;
+ *slot_set = nullptr;
}
}
@@ -1484,15 +1520,12 @@ void MemoryChunk::ReleaseInvalidatedSlots() {
}
template V8_EXPORT_PRIVATE void
-MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(HeapObject object,
- int size);
+MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(HeapObject object);
template V8_EXPORT_PRIVATE void
-MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(HeapObject object,
- int size);
+MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(HeapObject object);
template <RememberedSetType type>
-void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object,
- int size) {
+void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object) {
bool skip_slot_recording;
if (type == OLD_TO_NEW) {
@@ -1509,27 +1542,17 @@ void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object,
AllocateInvalidatedSlots<type>();
}
- InvalidatedSlots* invalidated_slots = this->invalidated_slots<type>();
- InvalidatedSlots::iterator it = invalidated_slots->lower_bound(object);
+ invalidated_slots<type>()->insert(object);
+}
- if (it != invalidated_slots->end() && it->first == object) {
- // object was already inserted
- CHECK_LE(size, it->second);
- return;
+void MemoryChunk::InvalidateRecordedSlots(HeapObject object) {
+ if (heap()->incremental_marking()->IsCompacting()) {
+ // We cannot check slot_set_[OLD_TO_OLD] here, since the
+ // concurrent markers might insert slots concurrently.
+ RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object);
}
- it = invalidated_slots->insert(it, std::make_pair(object, size));
-
- // prevent overlapping invalidated objects for old-to-new.
- if (type == OLD_TO_NEW && it != invalidated_slots->begin()) {
- HeapObject pred = (--it)->first;
- int pred_size = it->second;
- DCHECK_LT(pred.address(), object.address());
-
- if (pred.address() + pred_size > object.address()) {
- it->second = static_cast<int>(object.address() - pred.address());
- }
- }
+ RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object);
}
template bool MemoryChunk::RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>(
@@ -1546,27 +1569,6 @@ bool MemoryChunk::RegisteredObjectWithInvalidatedSlots(HeapObject object) {
invalidated_slots<type>()->end();
}
-template void MemoryChunk::MoveObjectWithInvalidatedSlots<OLD_TO_OLD>(
- HeapObject old_start, HeapObject new_start);
-
-template <RememberedSetType type>
-void MemoryChunk::MoveObjectWithInvalidatedSlots(HeapObject old_start,
- HeapObject new_start) {
- DCHECK_LT(old_start, new_start);
- DCHECK_EQ(MemoryChunk::FromHeapObject(old_start),
- MemoryChunk::FromHeapObject(new_start));
- static_assert(type == OLD_TO_OLD, "only use this for old-to-old slots");
- if (!ShouldSkipEvacuationSlotRecording() && invalidated_slots<type>()) {
- auto it = invalidated_slots<type>()->find(old_start);
- if (it != invalidated_slots<type>()->end()) {
- int old_size = it->second;
- int delta = static_cast<int>(new_start.address() - old_start.address());
- invalidated_slots<type>()->erase(it);
- (*invalidated_slots<type>())[new_start] = old_size - delta;
- }
- }
-}
-
void MemoryChunk::ReleaseLocalTracker() {
DCHECK_NOT_NULL(local_tracker_);
delete local_tracker_;
@@ -1657,6 +1659,7 @@ void PagedSpace::RefillFreeList() {
DCHECK(!IsDetached());
MarkCompactCollector* collector = heap()->mark_compact_collector();
size_t added = 0;
+
{
Page* p = nullptr;
while ((p = collector->sweeper()->GetSweptPageSafe(this)) != nullptr) {
@@ -1667,6 +1670,15 @@ void PagedSpace::RefillFreeList() {
category->Reset(free_list());
});
}
+
+ // Also merge old-to-new remembered sets outside of collections.
+ // Do not do this during GC, because of races during scavenges.
+ // One thread might iterate remembered set, while another thread merges
+ // them.
+ if (!is_local()) {
+ p->MergeOldToNewRememberedSets();
+ }
+
// Only during compaction pages can actually change ownership. This is
// safe because there exists no other competing action on the page links
// during compaction.
@@ -1709,6 +1721,9 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
// Move over pages.
for (auto it = other->begin(); it != other->end();) {
Page* p = *(it++);
+
+ p->MergeOldToNewRememberedSets();
+
// Relinking requires the category to be unlinked.
other->RemovePage(p);
AddPage(p);
@@ -1883,19 +1898,8 @@ Address SpaceWithLinearArea::ComputeLimit(Address start, Address end,
// Generated code may allocate inline from the linear allocation area for.
// To make sure we can observe these allocations, we use a lower limit.
size_t step = GetNextInlineAllocationStepSize();
-
- // TODO(ofrobots): there is subtle difference between old space and new
- // space here. Any way to avoid it? `step - 1` makes more sense as we would
- // like to sample the object that straddles the `start + step` boundary.
- // Rounding down further would introduce a small statistical error in
- // sampling. However, presently PagedSpace requires limit to be aligned.
- size_t rounded_step;
- if (identity() == NEW_SPACE) {
- DCHECK_GE(step, 1);
- rounded_step = step - 1;
- } else {
- rounded_step = RoundSizeDownToObjectAlignment(static_cast<int>(step));
- }
+ size_t rounded_step =
+ RoundSizeDownToObjectAlignment(static_cast<int>(step - 1));
return Min(static_cast<Address>(start + min_size + rounded_step), end);
} else {
// The entire node can be used as the linear allocation area.
@@ -2139,7 +2143,7 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
} else if (object.IsJSArrayBuffer()) {
JSArrayBuffer array_buffer = JSArrayBuffer::cast(object);
if (ArrayBufferTracker::IsTracked(array_buffer)) {
- size_t size = array_buffer.byte_length();
+ size_t size = PerIsolateAccountingLength(array_buffer);
external_page_bytes[ExternalBackingStoreType::kArrayBuffer] += size;
}
}
@@ -2628,7 +2632,7 @@ void NewSpace::Verify(Isolate* isolate) {
} else if (object.IsJSArrayBuffer()) {
JSArrayBuffer array_buffer = JSArrayBuffer::cast(object);
if (ArrayBufferTracker::IsTracked(array_buffer)) {
- size_t size = array_buffer.byte_length();
+ size_t size = PerIsolateAccountingLength(array_buffer);
external_space_bytes[ExternalBackingStoreType::kArrayBuffer] += size;
}
}
@@ -3942,6 +3946,7 @@ Address LargePage::GetAddressToShrink(Address object_address,
}
void LargePage::ClearOutOfLiveRangeSlots(Address free_start) {
+ DCHECK_NULL(this->sweeping_slot_set());
RememberedSet<OLD_TO_NEW>::RemoveRange(this, free_start, area_end(),
SlotSet::FREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_OLD>::RemoveRange(this, free_start, area_end(),
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index ebb6876cbe..5652042d20 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -130,12 +130,6 @@ enum FreeMode { kLinkCategory, kDoNotLinkCategory };
enum class SpaceAccountingMode { kSpaceAccounted, kSpaceUnaccounted };
-enum RememberedSetType {
- OLD_TO_NEW,
- OLD_TO_OLD,
- NUMBER_OF_REMEMBERED_SET_TYPES = OLD_TO_OLD + 1
-};
-
// A free list category maintains a linked list of free memory blocks.
class FreeListCategory {
public:
@@ -606,7 +600,7 @@ class MemoryChunk : public BasicMemoryChunk {
+ kSystemPointerSize // Address owner_
+ kSizetSize // size_t progress_bar_
+ kIntptrSize // intptr_t live_byte_count_
- + kSystemPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // SlotSet* array
+ + kSystemPointerSize // SlotSet* sweeping_slot_set_
+ kSystemPointerSize *
NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array
+ kSystemPointerSize *
@@ -706,6 +700,13 @@ class MemoryChunk : public BasicMemoryChunk {
return slot_set_[type];
}
+ template <AccessMode access_mode = AccessMode::ATOMIC>
+ SlotSet* sweeping_slot_set() {
+ if (access_mode == AccessMode::ATOMIC)
+ return base::AsAtomicPointer::Acquire_Load(&sweeping_slot_set_);
+ return sweeping_slot_set_;
+ }
+
template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
TypedSlotSet* typed_slot_set() {
if (access_mode == AccessMode::ATOMIC)
@@ -715,9 +716,13 @@ class MemoryChunk : public BasicMemoryChunk {
template <RememberedSetType type>
V8_EXPORT_PRIVATE SlotSet* AllocateSlotSet();
+ SlotSet* AllocateSweepingSlotSet();
+ SlotSet* AllocateSlotSet(SlotSet** slot_set);
+
// Not safe to be called concurrently.
template <RememberedSetType type>
void ReleaseSlotSet();
+ void ReleaseSlotSet(SlotSet** slot_set);
template <RememberedSetType type>
TypedSlotSet* AllocateTypedSlotSet();
// Not safe to be called concurrently.
@@ -729,12 +734,8 @@ class MemoryChunk : public BasicMemoryChunk {
template <RememberedSetType type>
void ReleaseInvalidatedSlots();
template <RememberedSetType type>
- V8_EXPORT_PRIVATE void RegisterObjectWithInvalidatedSlots(HeapObject object,
- int size);
- // Updates invalidated_slots after array left-trimming.
- template <RememberedSetType type>
- void MoveObjectWithInvalidatedSlots(HeapObject old_start,
- HeapObject new_start);
+ V8_EXPORT_PRIVATE void RegisterObjectWithInvalidatedSlots(HeapObject object);
+ void InvalidateRecordedSlots(HeapObject object);
template <RememberedSetType type>
bool RegisteredObjectWithInvalidatedSlots(HeapObject object);
template <RememberedSetType type>
@@ -914,7 +915,7 @@ class MemoryChunk : public BasicMemoryChunk {
// A single slot set for small pages (of size kPageSize) or an array of slot
// set for large pages. In the latter case the number of entries in the array
// is ceil(size() / kPageSize).
- SlotSet* slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
+ SlotSet* sweeping_slot_set_;
TypedSlotSet* typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
InvalidatedSlots* invalidated_slots_[NUMBER_OF_REMEMBERED_SET_TYPES];
@@ -1097,6 +1098,9 @@ class Page : public MemoryChunk {
void AllocateFreeListCategories();
void ReleaseFreeListCategories();
+ void MoveOldToNewRememberedSetForSweeping();
+ void MergeOldToNewRememberedSets();
+
#ifdef DEBUG
void Print();
#endif // DEBUG
diff --git a/deps/v8/src/heap/store-buffer-inl.h b/deps/v8/src/heap/store-buffer-inl.h
deleted file mode 100644
index b43098bf57..0000000000
--- a/deps/v8/src/heap/store-buffer-inl.h
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_HEAP_STORE_BUFFER_INL_H_
-#define V8_HEAP_STORE_BUFFER_INL_H_
-
-#include "src/heap/store-buffer.h"
-
-#include "src/heap/heap-inl.h"
-
-namespace v8 {
-namespace internal {
-
-void StoreBuffer::InsertIntoStoreBuffer(Address slot) {
- if (top_ + sizeof(Address) > limit_[current_]) {
- StoreBufferOverflow(heap_->isolate());
- }
- *top_ = slot;
- top_++;
-}
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_HEAP_STORE_BUFFER_INL_H_
diff --git a/deps/v8/src/heap/store-buffer.cc b/deps/v8/src/heap/store-buffer.cc
deleted file mode 100644
index 349e787740..0000000000
--- a/deps/v8/src/heap/store-buffer.cc
+++ /dev/null
@@ -1,174 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/heap/store-buffer.h"
-
-#include <algorithm>
-
-#include "src/base/bits.h"
-#include "src/base/macros.h"
-#include "src/base/template-utils.h"
-#include "src/execution/isolate.h"
-#include "src/heap/incremental-marking.h"
-#include "src/heap/store-buffer-inl.h"
-#include "src/init/v8.h"
-#include "src/logging/counters.h"
-#include "src/objects/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-StoreBuffer::StoreBuffer(Heap* heap)
- : heap_(heap), top_(nullptr), current_(0), mode_(NOT_IN_GC) {
- for (int i = 0; i < kStoreBuffers; i++) {
- start_[i] = nullptr;
- limit_[i] = nullptr;
- lazy_top_[i] = nullptr;
- }
- task_running_ = false;
- insertion_callback = &InsertDuringRuntime;
-}
-
-void StoreBuffer::SetUp() {
- v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
- // Round up the requested size in order to fulfill the VirtualMemory's
- // requrements on the requested size alignment. This may cause a bit of
- // memory wastage if the actual CommitPageSize() will be bigger than the
- // kMinExpectedOSPageSize value but this is a trade-off for keeping the
- // store buffer overflow check in write barriers cheap.
- const size_t requested_size = RoundUp(kStoreBufferSize * kStoreBuffers,
- page_allocator->CommitPageSize());
- // Allocate buffer memory aligned at least to kStoreBufferSize. This lets us
- // use a bit test to detect the ends of the buffers.
- STATIC_ASSERT(base::bits::IsPowerOfTwo(kStoreBufferSize));
- const size_t alignment =
- std::max<size_t>(kStoreBufferSize, page_allocator->AllocatePageSize());
- void* hint = AlignedAddress(heap_->GetRandomMmapAddr(), alignment);
- VirtualMemory reservation(page_allocator, requested_size, hint, alignment);
- if (!reservation.IsReserved()) {
- heap_->FatalProcessOutOfMemory("StoreBuffer::SetUp");
- }
-
- Address start = reservation.address();
- const size_t allocated_size = reservation.size();
-
- start_[0] = reinterpret_cast<Address*>(start);
- limit_[0] = start_[0] + (kStoreBufferSize / kSystemPointerSize);
- start_[1] = limit_[0];
- limit_[1] = start_[1] + (kStoreBufferSize / kSystemPointerSize);
-
- // Sanity check the buffers.
- Address* vm_limit = reinterpret_cast<Address*>(start + allocated_size);
- USE(vm_limit);
- for (int i = 0; i < kStoreBuffers; i++) {
- DCHECK(reinterpret_cast<Address>(start_[i]) >= reservation.address());
- DCHECK(reinterpret_cast<Address>(limit_[i]) >= reservation.address());
- DCHECK(start_[i] <= vm_limit);
- DCHECK(limit_[i] <= vm_limit);
- DCHECK_EQ(0, reinterpret_cast<Address>(limit_[i]) & kStoreBufferMask);
- }
-
- // Set RW permissions only on the pages we use.
- const size_t used_size = RoundUp(requested_size, CommitPageSize());
- if (!reservation.SetPermissions(start, used_size,
- PageAllocator::kReadWrite)) {
- heap_->FatalProcessOutOfMemory("StoreBuffer::SetUp");
- }
- current_ = 0;
- top_ = start_[current_];
- virtual_memory_ = std::move(reservation);
-}
-
-void StoreBuffer::TearDown() {
- if (virtual_memory_.IsReserved()) virtual_memory_.Free();
- top_ = nullptr;
- for (int i = 0; i < kStoreBuffers; i++) {
- start_[i] = nullptr;
- limit_[i] = nullptr;
- lazy_top_[i] = nullptr;
- }
-}
-
-void StoreBuffer::InsertDuringRuntime(StoreBuffer* store_buffer, Address slot) {
- DCHECK(store_buffer->mode() == StoreBuffer::NOT_IN_GC);
- store_buffer->InsertIntoStoreBuffer(slot);
-}
-
-void StoreBuffer::InsertDuringGarbageCollection(StoreBuffer* store_buffer,
- Address slot) {
- DCHECK(store_buffer->mode() != StoreBuffer::NOT_IN_GC);
- RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot);
-}
-
-void StoreBuffer::SetMode(StoreBufferMode mode) {
- mode_ = mode;
- if (mode == NOT_IN_GC) {
- insertion_callback = &InsertDuringRuntime;
- } else {
- insertion_callback = &InsertDuringGarbageCollection;
- }
-}
-
-int StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
- isolate->heap()->store_buffer()->FlipStoreBuffers();
- isolate->counters()->store_buffer_overflows()->Increment();
- // Called by RecordWriteCodeStubAssembler, which doesnt accept void type
- return 0;
-}
-
-void StoreBuffer::FlipStoreBuffers() {
- base::MutexGuard guard(&mutex_);
- int other = (current_ + 1) % kStoreBuffers;
- MoveEntriesToRememberedSet(other);
- lazy_top_[current_] = top_;
- current_ = other;
- top_ = start_[current_];
-
- if (!task_running_ && FLAG_concurrent_store_buffer) {
- task_running_ = true;
- V8::GetCurrentPlatform()->CallOnWorkerThread(
- base::make_unique<Task>(heap_->isolate(), this));
- }
-}
-
-void StoreBuffer::MoveEntriesToRememberedSet(int index) {
- if (!lazy_top_[index]) return;
- DCHECK_GE(index, 0);
- DCHECK_LT(index, kStoreBuffers);
- Address last_inserted_addr = kNullAddress;
- MemoryChunk* chunk = nullptr;
-
- for (Address* current = start_[index]; current < lazy_top_[index];
- current++) {
- Address addr = *current;
- if (chunk == nullptr ||
- MemoryChunk::BaseAddress(addr) != chunk->address()) {
- chunk = MemoryChunk::FromAnyPointerAddress(addr);
- }
- if (addr != last_inserted_addr) {
- RememberedSet<OLD_TO_NEW>::Insert(chunk, addr);
- last_inserted_addr = addr;
- }
- }
- lazy_top_[index] = nullptr;
-}
-
-void StoreBuffer::MoveAllEntriesToRememberedSet() {
- base::MutexGuard guard(&mutex_);
- int other = (current_ + 1) % kStoreBuffers;
- MoveEntriesToRememberedSet(other);
- lazy_top_[current_] = top_;
- MoveEntriesToRememberedSet(current_);
- top_ = start_[current_];
-}
-
-void StoreBuffer::ConcurrentlyProcessStoreBuffer() {
- base::MutexGuard guard(&mutex_);
- int other = (current_ + 1) % kStoreBuffers;
- MoveEntriesToRememberedSet(other);
- task_running_ = false;
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/heap/store-buffer.h b/deps/v8/src/heap/store-buffer.h
deleted file mode 100644
index 025bb6a060..0000000000
--- a/deps/v8/src/heap/store-buffer.h
+++ /dev/null
@@ -1,153 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_HEAP_STORE_BUFFER_H_
-#define V8_HEAP_STORE_BUFFER_H_
-
-#include "src/base/logging.h"
-#include "src/base/platform/platform.h"
-#include "src/common/globals.h"
-#include "src/heap/gc-tracer.h"
-#include "src/heap/remembered-set.h"
-#include "src/heap/slot-set.h"
-#include "src/tasks/cancelable-task.h"
-#include "src/utils/allocation.h"
-
-namespace v8 {
-namespace internal {
-
-// Intermediate buffer that accumulates old-to-new stores from the generated
-// code. Moreover, it stores invalid old-to-new slots with two entries.
-// The first is a tagged address of the start of the invalid range, the second
-// one is the end address of the invalid range or null if there is just one slot
-// that needs to be removed from the remembered set. On buffer overflow the
-// slots are moved to the remembered set.
-// Store buffer entries are always full pointers.
-class StoreBuffer {
- public:
- enum StoreBufferMode { IN_GC, NOT_IN_GC };
-
- static const int kStoreBuffers = 2;
- static const int kStoreBufferSize =
- Max(static_cast<int>(kMinExpectedOSPageSize / kStoreBuffers),
- 1 << (11 + kSystemPointerSizeLog2));
- static const int kStoreBufferMask = kStoreBufferSize - 1;
-
- V8_EXPORT_PRIVATE static int StoreBufferOverflow(Isolate* isolate);
-
- static void InsertDuringGarbageCollection(StoreBuffer* store_buffer,
- Address slot);
- static void InsertDuringRuntime(StoreBuffer* store_buffer, Address slot);
-
- explicit StoreBuffer(Heap* heap);
- void SetUp();
- void TearDown();
-
- // Used to add entries from generated code.
- inline Address* top_address() { return reinterpret_cast<Address*>(&top_); }
-
- // Moves entries from a specific store buffer to the remembered set. This
- // method takes a lock.
- void MoveEntriesToRememberedSet(int index);
-
- // This method ensures that all used store buffer entries are transferred to
- // the remembered set.
- void MoveAllEntriesToRememberedSet();
-
- inline void InsertIntoStoreBuffer(Address slot);
-
- void InsertEntry(Address slot) {
- // Insertions coming from the GC are directly inserted into the remembered
- // set. Insertions coming from the runtime are added to the store buffer to
- // allow concurrent processing.
- insertion_callback(this, slot);
- }
-
- void SetMode(StoreBufferMode mode);
-
- // Used by the concurrent processing thread to transfer entries from the
- // store buffer to the remembered set.
- void ConcurrentlyProcessStoreBuffer();
-
- bool Empty() {
- for (int i = 0; i < kStoreBuffers; i++) {
- if (lazy_top_[i]) {
- return false;
- }
- }
- return top_ == start_[current_];
- }
-
- Heap* heap() { return heap_; }
-
- private:
- // There are two store buffers. If one store buffer fills up, the main thread
- // publishes the top pointer of the store buffer that needs processing in its
- // global lazy_top_ field. After that it start the concurrent processing
- // thread. The concurrent processing thread uses the pointer in lazy_top_.
- // It will grab the given mutex and transfer its entries to the remembered
- // set. If the concurrent thread does not make progress, the main thread will
- // perform the work.
- // Important: there is an ordering constrained. The store buffer with the
- // older entries has to be processed first.
- class Task : public CancelableTask {
- public:
- Task(Isolate* isolate, StoreBuffer* store_buffer)
- : CancelableTask(isolate),
- store_buffer_(store_buffer),
- tracer_(isolate->heap()->tracer()) {}
- ~Task() override = default;
-
- private:
- void RunInternal() override {
- TRACE_BACKGROUND_GC(tracer_,
- GCTracer::BackgroundScope::BACKGROUND_STORE_BUFFER);
- store_buffer_->ConcurrentlyProcessStoreBuffer();
- }
- StoreBuffer* store_buffer_;
- GCTracer* tracer_;
- DISALLOW_COPY_AND_ASSIGN(Task);
- };
-
- StoreBufferMode mode() const { return mode_; }
-
- void FlipStoreBuffers();
-
- Heap* heap_;
-
- Address* top_;
-
- // The start and the limit of the buffer that contains store slots
- // added from the generated code. We have two chunks of store buffers.
- // Whenever one fills up, we notify a concurrent processing thread and
- // use the other empty one in the meantime.
- Address* start_[kStoreBuffers];
- Address* limit_[kStoreBuffers];
-
- // At most one lazy_top_ pointer is set at any time.
- Address* lazy_top_[kStoreBuffers];
- base::Mutex mutex_;
-
- // We only want to have at most one concurrent processing tas running.
- bool task_running_;
-
- // Points to the current buffer in use.
- int current_;
-
- // During GC, entries are directly added to the remembered set without
- // going through the store buffer. This is signaled by a special
- // IN_GC mode.
- StoreBufferMode mode_;
-
- VirtualMemory virtual_memory_;
-
- // Callbacks are more efficient than reading out the gc state for every
- // store buffer operation.
- void (*insertion_callback)(StoreBuffer*, Address);
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_HEAP_STORE_BUFFER_H_
diff --git a/deps/v8/src/heap/sweeper.cc b/deps/v8/src/heap/sweeper.cc
index c3c6b58835..11be775485 100644
--- a/deps/v8/src/heap/sweeper.cc
+++ b/deps/v8/src/heap/sweeper.cc
@@ -4,7 +4,6 @@
#include "src/heap/sweeper.h"
-#include "src/base/template-utils.h"
#include "src/execution/vm-state-inl.h"
#include "src/heap/array-buffer-tracker-inl.h"
#include "src/heap/gc-tracer.h"
@@ -181,7 +180,7 @@ void Sweeper::StartSweeperTasks() {
ForAllSweepingSpaces([this](AllocationSpace space) {
DCHECK(IsValidSweepingSpace(space));
num_sweeping_tasks_++;
- auto task = base::make_unique<SweeperTask>(
+ auto task = std::make_unique<SweeperTask>(
heap_->isolate(), this, &pending_sweeper_tasks_semaphore_,
&num_sweeping_tasks_, space);
DCHECK_LT(num_tasks_, kMaxSweeperTasks);
@@ -321,8 +320,8 @@ int Sweeper::RawSweep(
ClearFreedMemoryMode::kClearFreedMemory);
}
if (should_reduce_memory_) p->DiscardUnusedMemory(free_start, size);
- RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, free_end,
- SlotSet::KEEP_EMPTY_BUCKETS);
+ RememberedSetSweeping::RemoveRange(p, free_start, free_end,
+ SlotSet::KEEP_EMPTY_BUCKETS);
RememberedSet<OLD_TO_OLD>::RemoveRange(p, free_start, free_end,
SlotSet::KEEP_EMPTY_BUCKETS);
if (non_empty_typed_slots) {
@@ -355,8 +354,8 @@ int Sweeper::RawSweep(
ClearFreedMemoryMode::kClearFreedMemory);
}
if (should_reduce_memory_) p->DiscardUnusedMemory(free_start, size);
- RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, p->area_end(),
- SlotSet::KEEP_EMPTY_BUCKETS);
+ RememberedSetSweeping::RemoveRange(p, free_start, p->area_end(),
+ SlotSet::KEEP_EMPTY_BUCKETS);
RememberedSet<OLD_TO_OLD>::RemoveRange(p, free_start, p->area_end(),
SlotSet::KEEP_EMPTY_BUCKETS);
if (non_empty_typed_slots) {
@@ -404,6 +403,10 @@ void Sweeper::SweepSpaceFromTask(AllocationSpace identity) {
Page* page = nullptr;
while (!stop_sweeper_tasks_ &&
((page = GetSweepingPageSafe(identity)) != nullptr)) {
+ // Typed slot sets are only recorded on code pages. Code pages
+ // are not swept concurrently to the application to ensure W^X.
+ DCHECK(!page->typed_slot_set<OLD_TO_NEW>() &&
+ !page->typed_slot_set<OLD_TO_OLD>());
ParallelSweepPage(page, identity);
}
}
@@ -462,16 +465,6 @@ int Sweeper::ParallelSweepPage(
max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode,
invalidated_slots_in_free_space);
DCHECK(page->SweepingDone());
-
- // After finishing sweeping of a page we clean up its remembered set.
- TypedSlotSet* typed_slot_set = page->typed_slot_set<OLD_TO_NEW>();
- if (typed_slot_set) {
- typed_slot_set->FreeToBeFreedChunks();
- }
- SlotSet* slot_set = page->slot_set<OLD_TO_NEW>();
- if (slot_set) {
- slot_set->FreeToBeFreedBuckets();
- }
}
{
@@ -488,7 +481,7 @@ void Sweeper::ScheduleIncrementalSweepingTask() {
auto taskrunner =
V8::GetCurrentPlatform()->GetForegroundTaskRunner(isolate);
taskrunner->PostTask(
- base::make_unique<IncrementalSweeperTask>(heap_->isolate(), this));
+ std::make_unique<IncrementalSweeperTask>(heap_->isolate(), this));
}
}
@@ -517,6 +510,7 @@ void Sweeper::PrepareToBeSweptPage(AllocationSpace space, Page* page) {
DCHECK(!category->is_linked(page->owner()->free_list()));
});
#endif // DEBUG
+ page->MoveOldToNewRememberedSetForSweeping();
page->set_concurrent_sweeping_state(Page::kSweepingPending);
heap_->paged_space(space)->IncreaseAllocatedBytes(
marking_state_->live_bytes(page), page);
@@ -596,8 +590,8 @@ void Sweeper::StartIterabilityTasks() {
DCHECK(!iterability_task_started_);
if (FLAG_concurrent_sweeping && !iterability_list_.empty()) {
- auto task = base::make_unique<IterabilityTask>(
- heap_->isolate(), this, &iterability_task_semaphore_);
+ auto task = std::make_unique<IterabilityTask>(heap_->isolate(), this,
+ &iterability_task_semaphore_);
iterability_task_id_ = task->id();
iterability_task_started_ = true;
V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
diff --git a/deps/v8/src/ic/accessor-assembler.cc b/deps/v8/src/ic/accessor-assembler.cc
index f9efcba05f..99cbd3c3c8 100644
--- a/deps/v8/src/ic/accessor-assembler.cc
+++ b/deps/v8/src/ic/accessor-assembler.cc
@@ -5,6 +5,7 @@
#include "src/ic/accessor-assembler.h"
#include "src/ast/ast.h"
+#include "src/base/optional.h"
#include "src/codegen/code-factory.h"
#include "src/ic/handler-configuration.h"
#include "src/ic/ic.h"
@@ -16,6 +17,7 @@
#include "src/objects/heap-number.h"
#include "src/objects/module.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/property-details.h"
#include "src/objects/smi.h"
namespace v8 {
@@ -23,10 +25,6 @@ namespace internal {
using compiler::CodeAssemblerState;
using compiler::Node;
-template <typename T>
-using TNode = compiler::TNode<T>;
-template <typename T>
-using SloppyTNode = compiler::SloppyTNode<T>;
//////////////////// Private helpers.
@@ -66,27 +64,25 @@ TNode<MaybeObject> AccessorAssembler::LoadHandlerDataField(
}
TNode<MaybeObject> AccessorAssembler::TryMonomorphicCase(
- Node* slot, Node* vector, Node* receiver_map, Label* if_handler,
- TVariable<MaybeObject>* var_handler, Label* if_miss) {
+ TNode<Smi> slot, TNode<FeedbackVector> vector, TNode<Map> receiver_map,
+ Label* if_handler, TVariable<MaybeObject>* var_handler, Label* if_miss) {
Comment("TryMonomorphicCase");
DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep());
// TODO(ishell): add helper class that hides offset computations for a series
// of loads.
- CSA_ASSERT(this, IsFeedbackVector(vector), vector);
int32_t header_size = FeedbackVector::kFeedbackSlotsOffset - kHeapObjectTag;
// Adding |header_size| with a separate IntPtrAdd rather than passing it
// into ElementOffsetFromIndex() allows it to be folded into a single
// [base, index, offset] indirect memory access on x64.
- TNode<IntPtrT> offset =
- ElementOffsetFromIndex(slot, HOLEY_ELEMENTS, SMI_PARAMETERS);
+ TNode<IntPtrT> offset = ElementOffsetFromIndex(slot, HOLEY_ELEMENTS);
TNode<MaybeObject> feedback = ReinterpretCast<MaybeObject>(
Load(MachineType::AnyTagged(), vector,
IntPtrAdd(offset, IntPtrConstant(header_size))));
// Try to quickly handle the monomorphic case without knowing for sure
// if we have a weak reference in feedback.
- GotoIf(IsNotWeakReferenceTo(feedback, CAST(receiver_map)), if_miss);
+ GotoIfNot(IsWeakReferenceTo(feedback, receiver_map), if_miss);
TNode<MaybeObject> handler = UncheckedCast<MaybeObject>(
Load(MachineType::AnyTagged(), vector,
@@ -98,7 +94,7 @@ TNode<MaybeObject> AccessorAssembler::TryMonomorphicCase(
}
void AccessorAssembler::HandlePolymorphicCase(
- Node* receiver_map, TNode<WeakFixedArray> feedback, Label* if_handler,
+ TNode<Map> receiver_map, TNode<WeakFixedArray> feedback, Label* if_handler,
TVariable<MaybeObject>* var_handler, Label* if_miss) {
Comment("HandlePolymorphicCase");
DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep());
@@ -121,8 +117,7 @@ void AccessorAssembler::HandlePolymorphicCase(
TNode<MaybeObject> maybe_cached_map =
LoadWeakFixedArrayElement(feedback, var_index.value());
CSA_ASSERT(this, IsWeakOrCleared(maybe_cached_map));
- GotoIf(IsNotWeakReferenceTo(maybe_cached_map, CAST(receiver_map)),
- &loop_next);
+ GotoIfNot(IsWeakReferenceTo(maybe_cached_map, receiver_map), &loop_next);
// Found, now call handler.
TNode<MaybeObject> handler =
@@ -157,7 +152,7 @@ void AccessorAssembler::HandleLoadICHandlerCase(
BIND(&try_proto_handler);
{
GotoIf(IsCodeMap(LoadMap(CAST(handler))), &call_handler);
- HandleLoadICProtoHandler(p, handler, &var_holder, &var_smi_handler,
+ HandleLoadICProtoHandler(p, CAST(handler), &var_holder, &var_smi_handler,
&if_smi_handler, miss, exit_point, ic_mode,
access_mode);
}
@@ -167,8 +162,8 @@ void AccessorAssembler::HandleLoadICHandlerCase(
BIND(&if_smi_handler);
{
HandleLoadICSmiHandlerCase(p, var_holder.value(), var_smi_handler.value(),
- handler, miss, exit_point, on_nonexistent,
- support_elements, access_mode);
+ handler, miss, exit_point, ic_mode,
+ on_nonexistent, support_elements, access_mode);
}
BIND(&call_handler);
@@ -237,9 +232,10 @@ void AccessorAssembler::HandleLoadAccessor(
api_holder.value(), p->receiver()));
}
-void AccessorAssembler::HandleLoadField(Node* holder, Node* handler_word,
+void AccessorAssembler::HandleLoadField(SloppyTNode<JSObject> holder,
+ TNode<WordT> handler_word,
Variable* var_double_value,
- Label* rebox_double,
+ Label* rebox_double, Label* miss,
ExitPoint* exit_point) {
Comment("field_load");
TNode<IntPtrT> index =
@@ -261,8 +257,13 @@ void AccessorAssembler::HandleLoadField(Node* holder, Node* handler_word,
var_double_value->Bind(
LoadObjectField(holder, offset, MachineType::Float64()));
} else {
- TNode<HeapNumber> heap_number = CAST(LoadObjectField(holder, offset));
- var_double_value->Bind(LoadHeapNumberValue(heap_number));
+ TNode<Object> heap_number = LoadObjectField(holder, offset);
+ // This is not an "old" Smi value from before a Smi->Double transition.
+ // Rather, it's possible that since the last update of this IC, the Double
+ // field transitioned to a Tagged field, and was then assigned a Smi.
+ GotoIf(TaggedIsSmi(heap_number), miss);
+ GotoIfNot(IsHeapNumber(CAST(heap_number)), miss);
+ var_double_value->Bind(LoadHeapNumberValue(CAST(heap_number)));
}
Goto(rebox_double);
}
@@ -276,6 +277,13 @@ void AccessorAssembler::HandleLoadField(Node* holder, Node* handler_word,
exit_point->Return(value);
BIND(&is_double);
+ if (!FLAG_unbox_double_fields) {
+ // This is not an "old" Smi value from before a Smi->Double transition.
+ // Rather, it's possible that since the last update of this IC, the Double
+ // field transitioned to a Tagged field, and was then assigned a Smi.
+ GotoIf(TaggedIsSmi(value), miss);
+ GotoIfNot(IsHeapNumber(CAST(value)), miss);
+ }
var_double_value->Bind(LoadHeapNumberValue(CAST(value)));
Goto(rebox_double);
}
@@ -293,10 +301,10 @@ TNode<MaybeObject> AccessorAssembler::LoadDescriptorValueOrFieldType(
}
void AccessorAssembler::HandleLoadICSmiHandlerCase(
- const LazyLoadICParameters* p, Node* holder, SloppyTNode<Smi> smi_handler,
- SloppyTNode<Object> handler, Label* miss, ExitPoint* exit_point,
- OnNonExistent on_nonexistent, ElementSupport support_elements,
- LoadAccessMode access_mode) {
+ const LazyLoadICParameters* p, SloppyTNode<HeapObject> holder,
+ SloppyTNode<Smi> smi_handler, SloppyTNode<Object> handler, Label* miss,
+ ExitPoint* exit_point, ICMode ic_mode, OnNonExistent on_nonexistent,
+ ElementSupport support_elements, LoadAccessMode access_mode) {
VARIABLE(var_double_value, MachineRepresentation::kFloat64);
Label rebox_double(this, &var_double_value);
@@ -388,10 +396,11 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
Label if_oob(this, Label::kDeferred);
Comment("indexed string");
+ TNode<String> string_holder = CAST(holder);
TNode<IntPtrT> intptr_index = TryToIntptr(p->name(), miss);
- TNode<IntPtrT> length = LoadStringLengthAsWord(holder);
+ TNode<IntPtrT> length = LoadStringLengthAsWord(string_holder);
GotoIf(UintPtrGreaterThanOrEqual(intptr_index, length), &if_oob);
- TNode<Int32T> code = StringCharCodeAt(holder, intptr_index);
+ TNode<Int32T> code = StringCharCodeAt(string_holder, intptr_index);
TNode<String> result = StringFromSingleCharCode(code);
Return(result);
@@ -410,23 +419,25 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
if (access_mode == LoadAccessMode::kHas) {
HandleLoadICSmiHandlerHasNamedCase(p, holder, handler_kind, miss,
- exit_point);
+ exit_point, ic_mode);
} else {
HandleLoadICSmiHandlerLoadNamedCase(
p, holder, handler_kind, handler_word, &rebox_double, &var_double_value,
- handler, miss, exit_point, on_nonexistent, support_elements);
+ handler, miss, exit_point, ic_mode, on_nonexistent, support_elements);
}
}
void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
- const LazyLoadICParameters* p, Node* holder, TNode<IntPtrT> handler_kind,
- TNode<WordT> handler_word, Label* rebox_double, Variable* var_double_value,
- SloppyTNode<Object> handler, Label* miss, ExitPoint* exit_point,
- OnNonExistent on_nonexistent, ElementSupport support_elements) {
+ const LazyLoadICParameters* p, TNode<HeapObject> holder,
+ TNode<IntPtrT> handler_kind, TNode<WordT> handler_word, Label* rebox_double,
+ Variable* var_double_value, SloppyTNode<Object> handler, Label* miss,
+ ExitPoint* exit_point, ICMode ic_mode, OnNonExistent on_nonexistent,
+ ElementSupport support_elements) {
Label constant(this), field(this), normal(this, Label::kDeferred),
- interceptor(this, Label::kDeferred), nonexistent(this),
- accessor(this, Label::kDeferred), global(this, Label::kDeferred),
- module_export(this, Label::kDeferred), proxy(this, Label::kDeferred),
+ slow(this, Label::kDeferred), interceptor(this, Label::kDeferred),
+ nonexistent(this), accessor(this, Label::kDeferred),
+ global(this, Label::kDeferred), module_export(this, Label::kDeferred),
+ proxy(this, Label::kDeferred),
native_data_property(this, Label::kDeferred),
api_getter(this, Label::kDeferred);
@@ -459,14 +470,16 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kGlobal)),
&global);
+ GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kSlow)), &slow);
+
GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kProxy)), &proxy);
Branch(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kModuleExport)),
&module_export, &interceptor);
BIND(&field);
- HandleLoadField(holder, handler_word, var_double_value, rebox_double,
- exit_point);
+ HandleLoadField(CAST(holder), handler_word, var_double_value, rebox_double,
+ miss, exit_point);
BIND(&nonexistent);
// This is a handler for a load of a non-existent value.
@@ -487,7 +500,7 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
BIND(&normal);
{
Comment("load_normal");
- TNode<NameDictionary> properties = CAST(LoadSlowProperties(holder));
+ TNode<NameDictionary> properties = CAST(LoadSlowProperties(CAST(holder)));
TVARIABLE(IntPtrT, var_name_index);
Label found(this, &var_name_index);
NameDictionaryLookup<NameDictionary>(properties, CAST(p->name()), &found,
@@ -529,8 +542,8 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
BIND(&proxy);
{
- VARIABLE(var_index, MachineType::PointerRepresentation());
- VARIABLE(var_unique, MachineRepresentation::kTagged);
+ TVARIABLE(IntPtrT, var_index);
+ TVARIABLE(Name, var_unique);
Label if_index(this), if_unique_name(this),
to_name_failed(this, Label::kDeferred);
@@ -586,20 +599,31 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
p->context(), p->name(), p->receiver(),
holder, p->slot(), p->vector());
}
+ BIND(&slow);
+ {
+ Comment("load_slow");
+ if (ic_mode == ICMode::kGlobalIC) {
+ exit_point->ReturnCallRuntime(Runtime::kLoadGlobalIC_Slow, p->context(),
+ p->name(), p->slot(), p->vector());
+
+ } else {
+ exit_point->ReturnCallRuntime(Runtime::kGetProperty, p->context(),
+ p->receiver(), p->name());
+ }
+ }
BIND(&module_export);
{
Comment("module export");
TNode<UintPtrT> index =
DecodeWord<LoadHandler::ExportsIndexBits>(handler_word);
- Node* module =
- LoadObjectField(p->receiver(), JSModuleNamespace::kModuleOffset,
- MachineType::TaggedPointer());
- TNode<ObjectHashTable> exports = CAST(LoadObjectField(
- module, Module::kExportsOffset, MachineType::TaggedPointer()));
+ TNode<Module> module =
+ CAST(LoadObjectField(p->receiver(), JSModuleNamespace::kModuleOffset));
+ TNode<ObjectHashTable> exports =
+ LoadObjectField<ObjectHashTable>(module, Module::kExportsOffset);
TNode<Cell> cell = CAST(LoadFixedArrayElement(exports, index));
// The handler is only installed for exports that exist.
- Node* value = LoadCellValue(cell);
+ TNode<Object> value = LoadCellValue(cell);
Label is_the_hole(this, Label::kDeferred);
GotoIf(IsTheHole(value), &is_the_hole);
exit_point->Return(value);
@@ -617,10 +641,11 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
}
void AccessorAssembler::HandleLoadICSmiHandlerHasNamedCase(
- const LazyLoadICParameters* p, Node* holder, TNode<IntPtrT> handler_kind,
- Label* miss, ExitPoint* exit_point) {
+ const LazyLoadICParameters* p, TNode<HeapObject> holder,
+ TNode<IntPtrT> handler_kind, Label* miss, ExitPoint* exit_point,
+ ICMode ic_mode) {
Label return_true(this), return_false(this), return_lookup(this),
- normal(this), global(this);
+ normal(this), global(this), slow(this);
GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kField)),
&return_true);
@@ -649,6 +674,8 @@ void AccessorAssembler::HandleLoadICSmiHandlerHasNamedCase(
IntPtrConstant(LoadHandler::kApiGetterHolderIsPrototype)),
&return_true);
+ GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kSlow)), &slow);
+
Branch(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kGlobal)), &global,
&return_lookup);
@@ -676,7 +703,7 @@ void AccessorAssembler::HandleLoadICSmiHandlerHasNamedCase(
BIND(&normal);
{
Comment("has_normal");
- TNode<NameDictionary> properties = CAST(LoadSlowProperties(holder));
+ TNode<NameDictionary> properties = CAST(LoadSlowProperties(CAST(holder)));
TVARIABLE(IntPtrT, var_name_index);
Label found(this);
NameDictionaryLookup<NameDictionary>(properties, CAST(p->name()), &found,
@@ -695,6 +722,18 @@ void AccessorAssembler::HandleLoadICSmiHandlerHasNamedCase(
exit_point->Return(TrueConstant());
}
+
+ BIND(&slow);
+ {
+ Comment("load_slow");
+ if (ic_mode == ICMode::kGlobalIC) {
+ exit_point->ReturnCallRuntime(Runtime::kLoadGlobalIC_Slow, p->context(),
+ p->name(), p->slot(), p->vector());
+ } else {
+ exit_point->ReturnCallRuntime(Runtime::kHasProperty, p->context(),
+ p->receiver(), p->name());
+ }
+ }
}
// Performs actions common to both load and store handlers:
@@ -715,8 +754,9 @@ void AccessorAssembler::HandleLoadICSmiHandlerHasNamedCase(
// TODO(ishell): Remove templatezation once we move common bits from
// Load/StoreHandler to the base class.
template <typename ICHandler, typename ICParameters>
-Node* AccessorAssembler::HandleProtoHandler(
- const ICParameters* p, Node* handler, const OnCodeHandler& on_code_handler,
+TNode<Object> AccessorAssembler::HandleProtoHandler(
+ const ICParameters* p, TNode<DataHandler> handler,
+ const OnCodeHandler& on_code_handler,
const OnFoundOnReceiver& on_found_on_receiver, Label* miss,
ICMode ic_mode) {
//
@@ -738,8 +778,7 @@ Node* AccessorAssembler::HandleProtoHandler(
Label if_smi_handler(this);
GotoIf(TaggedIsSmi(smi_or_code_handler), &if_smi_handler);
- CSA_ASSERT(this, IsCodeMap(LoadMap(CAST(smi_or_code_handler))));
- on_code_handler(smi_or_code_handler);
+ on_code_handler(CAST(smi_or_code_handler));
BIND(&if_smi_handler);
}
@@ -771,8 +810,8 @@ Node* AccessorAssembler::HandleProtoHandler(
CSA_ASSERT(this, IsWeakOrCleared(data2));
TNode<Context> expected_native_context =
CAST(GetHeapObjectAssumeWeak(data2, miss));
- EmitAccessCheck(expected_native_context, p->context(), p->receiver(),
- &done, miss);
+ EmitAccessCheck(expected_native_context, p->context(),
+ CAST(p->receiver()), &done, miss);
}
// Dictionary lookup on receiver is not necessary for Load/StoreGlobalIC
@@ -807,18 +846,19 @@ Node* AccessorAssembler::HandleProtoHandler(
}
void AccessorAssembler::HandleLoadICProtoHandler(
- const LazyLoadICParameters* p, Node* handler, Variable* var_holder,
- Variable* var_smi_handler, Label* if_smi_handler, Label* miss,
- ExitPoint* exit_point, ICMode ic_mode, LoadAccessMode access_mode) {
+ const LazyLoadICParameters* p, TNode<DataHandler> handler,
+ Variable* var_holder, Variable* var_smi_handler, Label* if_smi_handler,
+ Label* miss, ExitPoint* exit_point, ICMode ic_mode,
+ LoadAccessMode access_mode) {
DCHECK_EQ(MachineRepresentation::kTagged, var_holder->rep());
DCHECK_EQ(MachineRepresentation::kTagged, var_smi_handler->rep());
- Node* smi_handler = HandleProtoHandler<LoadHandler>(
+ TNode<Smi> smi_handler = CAST(HandleProtoHandler<LoadHandler>(
p, handler,
// Code sub-handlers are not expected in LoadICs, so no |on_code_handler|.
nullptr,
// on_found_on_receiver
- [=](Node* properties, Node* name_index) {
+ [=](TNode<NameDictionary> properties, TNode<IntPtrT> name_index) {
if (access_mode == LoadAccessMode::kHas) {
exit_point->Return(TrueConstant());
} else {
@@ -832,7 +872,7 @@ void AccessorAssembler::HandleLoadICProtoHandler(
exit_point->Return(value);
}
},
- miss, ic_mode);
+ miss, ic_mode));
TNode<MaybeObject> maybe_holder_or_constant =
LoadHandlerDataField(handler, 1);
@@ -840,7 +880,7 @@ void AccessorAssembler::HandleLoadICProtoHandler(
Label load_from_cached_holder(this), is_smi(this), done(this);
GotoIf(TaggedIsSmi(maybe_holder_or_constant), &is_smi);
- Branch(IsStrongReferenceTo(maybe_holder_or_constant, NullConstant()), &done,
+ Branch(TaggedEqual(maybe_holder_or_constant, NullConstant()), &done,
&load_from_cached_holder);
BIND(&is_smi);
@@ -878,14 +918,15 @@ void AccessorAssembler::HandleLoadICProtoHandler(
}
void AccessorAssembler::EmitAccessCheck(TNode<Context> expected_native_context,
- TNode<Context> context, Node* receiver,
+ TNode<Context> context,
+ TNode<Object> receiver,
Label* can_access, Label* miss) {
CSA_ASSERT(this, IsNativeContext(expected_native_context));
- TNode<Context> native_context = LoadNativeContext(context);
+ TNode<NativeContext> native_context = LoadNativeContext(context);
GotoIf(TaggedEqual(expected_native_context, native_context), can_access);
// If the receiver is not a JSGlobalProxy then we miss.
- GotoIfNot(IsJSGlobalProxy(receiver), miss);
+ GotoIfNot(IsJSGlobalProxy(CAST(receiver)), miss);
// For JSGlobalProxy receiver try to compare security tokens of current
// and expected native contexts.
TNode<Object> expected_token = LoadContextElement(
@@ -895,8 +936,8 @@ void AccessorAssembler::EmitAccessCheck(TNode<Context> expected_native_context,
Branch(TaggedEqual(expected_token, current_token), can_access, miss);
}
-void AccessorAssembler::JumpIfDataProperty(Node* details, Label* writable,
- Label* readonly) {
+void AccessorAssembler::JumpIfDataProperty(TNode<Uint32T> details,
+ Label* writable, Label* readonly) {
if (readonly) {
// Accessor properties never have the READ_ONLY attribute set.
GotoIf(IsSetWord32(details, PropertyDetails::kAttributesReadOnlyMask),
@@ -911,10 +952,11 @@ void AccessorAssembler::JumpIfDataProperty(Node* details, Label* writable,
}
void AccessorAssembler::HandleStoreICNativeDataProperty(
- const StoreICParameters* p, Node* holder, Node* handler_word) {
+ const StoreICParameters* p, SloppyTNode<HeapObject> holder,
+ TNode<Word32T> handler_word) {
Comment("native_data_property_store");
TNode<IntPtrT> descriptor =
- Signed(DecodeWord<StoreHandler::DescriptorBits>(handler_word));
+ Signed(DecodeWordFromWord32<StoreHandler::DescriptorBits>(handler_word));
TNode<AccessorInfo> accessor_info =
CAST(LoadDescriptorValue(LoadMap(holder), descriptor));
@@ -936,23 +978,30 @@ void AccessorAssembler::HandleStoreICHandlerCase(
BIND(&if_smi_handler);
{
Node* holder = p->receiver();
- TNode<IntPtrT> handler_word = SmiUntag(CAST(handler));
+ TNode<Int32T> handler_word = SmiToInt32(CAST(handler));
- Label if_fast_smi(this), if_proxy(this);
+ Label if_fast_smi(this), if_proxy(this), if_interceptor(this),
+ if_slow(this);
STATIC_ASSERT(StoreHandler::kGlobalProxy + 1 == StoreHandler::kNormal);
- STATIC_ASSERT(StoreHandler::kNormal + 1 == StoreHandler::kProxy);
+ STATIC_ASSERT(StoreHandler::kNormal + 1 == StoreHandler::kInterceptor);
+ STATIC_ASSERT(StoreHandler::kInterceptor + 1 == StoreHandler::kSlow);
+ STATIC_ASSERT(StoreHandler::kSlow + 1 == StoreHandler::kProxy);
STATIC_ASSERT(StoreHandler::kProxy + 1 == StoreHandler::kKindsNumber);
- TNode<UintPtrT> handler_kind =
- DecodeWord<StoreHandler::KindBits>(handler_word);
- GotoIf(IntPtrLessThan(handler_kind,
- IntPtrConstant(StoreHandler::kGlobalProxy)),
- &if_fast_smi);
- GotoIf(WordEqual(handler_kind, IntPtrConstant(StoreHandler::kProxy)),
+ TNode<Uint32T> handler_kind =
+ DecodeWord32<StoreHandler::KindBits>(handler_word);
+ GotoIf(
+ Int32LessThan(handler_kind, Int32Constant(StoreHandler::kGlobalProxy)),
+ &if_fast_smi);
+ GotoIf(Word32Equal(handler_kind, Int32Constant(StoreHandler::kProxy)),
&if_proxy);
+ GotoIf(Word32Equal(handler_kind, Int32Constant(StoreHandler::kInterceptor)),
+ &if_interceptor);
+ GotoIf(Word32Equal(handler_kind, Int32Constant(StoreHandler::kSlow)),
+ &if_slow);
CSA_ASSERT(this,
- WordEqual(handler_kind, IntPtrConstant(StoreHandler::kNormal)));
+ Word32Equal(handler_kind, Int32Constant(StoreHandler::kNormal)));
TNode<NameDictionary> properties = CAST(LoadSlowProperties(holder));
TVARIABLE(IntPtrT, var_name_index);
@@ -976,14 +1025,14 @@ void AccessorAssembler::HandleStoreICHandlerCase(
BIND(&if_fast_smi);
{
- TNode<UintPtrT> handler_kind =
- DecodeWord<StoreHandler::KindBits>(handler_word);
+ TNode<Uint32T> handler_kind =
+ DecodeWord32<StoreHandler::KindBits>(handler_word);
Label data(this), accessor(this), native_data_property(this);
- GotoIf(WordEqual(handler_kind, IntPtrConstant(StoreHandler::kAccessor)),
+ GotoIf(Word32Equal(handler_kind, Int32Constant(StoreHandler::kAccessor)),
&accessor);
- Branch(WordEqual(handler_kind,
- IntPtrConstant(StoreHandler::kNativeDataProperty)),
+ Branch(Word32Equal(handler_kind,
+ Int32Constant(StoreHandler::kNativeDataProperty)),
&native_data_property, &data);
BIND(&accessor);
@@ -999,6 +1048,29 @@ void AccessorAssembler::HandleStoreICHandlerCase(
BIND(&if_proxy);
HandleStoreToProxy(p, holder, miss, support_elements);
+
+ BIND(&if_interceptor);
+ {
+ Comment("store_interceptor");
+ TailCallRuntime(Runtime::kStorePropertyWithInterceptor, p->context(),
+ p->value(), p->slot(), p->vector(), p->receiver(),
+ p->name());
+ }
+
+ BIND(&if_slow);
+ {
+ Comment("store_slow");
+ // The slow case calls into the runtime to complete the store without
+ // causing an IC miss that would otherwise cause a transition to the
+ // generic stub.
+ if (ic_mode == ICMode::kGlobalIC) {
+ TailCallRuntime(Runtime::kStoreGlobalIC_Slow, p->context(), p->value(),
+ p->slot(), p->vector(), p->receiver(), p->name());
+ } else {
+ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, p->context(), p->value(),
+ p->receiver(), p->name());
+ }
+ }
}
BIND(&if_nonsmi_handler);
@@ -1111,7 +1183,7 @@ void AccessorAssembler::HandleStoreICTransitionMapHandlerCase(
}
void AccessorAssembler::CheckFieldType(TNode<DescriptorArray> descriptors,
- Node* name_index,
+ TNode<IntPtrT> name_index,
TNode<Word32T> representation,
Node* value, Label* bailout) {
Label r_smi(this), r_double(this), r_heapobject(this), all_fine(this);
@@ -1143,20 +1215,20 @@ void AccessorAssembler::CheckFieldType(TNode<DescriptorArray> descriptors,
BIND(&r_heapobject);
{
GotoIf(TaggedIsSmi(value), bailout);
- TNode<MaybeObject> field_type = LoadFieldTypeByKeyIndex(
- descriptors, UncheckedCast<IntPtrT>(name_index));
+ TNode<MaybeObject> field_type =
+ LoadFieldTypeByKeyIndex(descriptors, name_index);
const Address kNoneType = FieldType::None().ptr();
const Address kAnyType = FieldType::Any().ptr();
DCHECK_NE(static_cast<uint32_t>(kNoneType), kClearedWeakHeapObjectLower32);
DCHECK_NE(static_cast<uint32_t>(kAnyType), kClearedWeakHeapObjectLower32);
// FieldType::None can't hold any value.
- GotoIf(WordEqual(BitcastMaybeObjectToWord(field_type),
- IntPtrConstant(kNoneType)),
- bailout);
+ GotoIf(
+ TaggedEqual(field_type, BitcastWordToTagged(IntPtrConstant(kNoneType))),
+ bailout);
// FieldType::Any can hold any value.
- GotoIf(WordEqual(BitcastMaybeObjectToWord(field_type),
- IntPtrConstant(kAnyType)),
- &all_fine);
+ GotoIf(
+ TaggedEqual(field_type, BitcastWordToTagged(IntPtrConstant(kAnyType))),
+ &all_fine);
// Cleared weak references count as FieldType::None, which can't hold any
// value.
TNode<Map> field_type_map =
@@ -1168,15 +1240,16 @@ void AccessorAssembler::CheckFieldType(TNode<DescriptorArray> descriptors,
BIND(&all_fine);
}
-TNode<BoolT> AccessorAssembler::IsPropertyDetailsConst(Node* details) {
+TNode<BoolT> AccessorAssembler::IsPropertyDetailsConst(TNode<Uint32T> details) {
return Word32Equal(DecodeWord32<PropertyDetails::ConstnessField>(details),
Int32Constant(static_cast<int32_t>(VariableMode::kConst)));
}
void AccessorAssembler::OverwriteExistingFastDataProperty(
- Node* object, Node* object_map, Node* descriptors,
- Node* descriptor_name_index, Node* details, TNode<Object> value,
- Label* slow, bool do_transitioning_store) {
+ SloppyTNode<HeapObject> object, TNode<Map> object_map,
+ TNode<DescriptorArray> descriptors, TNode<IntPtrT> descriptor_name_index,
+ TNode<Uint32T> details, TNode<Object> value, Label* slow,
+ bool do_transitioning_store) {
Label done(this), if_field(this), if_descriptor(this);
CSA_ASSERT(this,
@@ -1192,8 +1265,8 @@ void AccessorAssembler::OverwriteExistingFastDataProperty(
TNode<Uint32T> representation =
DecodeWord32<PropertyDetails::RepresentationField>(details);
- CheckFieldType(CAST(descriptors), descriptor_name_index, representation,
- value, slow);
+ CheckFieldType(descriptors, descriptor_name_index, representation, value,
+ slow);
TNode<UintPtrT> field_index =
DecodeWordFromWord32<PropertyDetails::FieldIndexField>(details);
@@ -1224,7 +1297,7 @@ void AccessorAssembler::OverwriteExistingFastDataProperty(
Label if_mutable(this);
GotoIfNot(IsPropertyDetailsConst(details), &if_mutable);
TNode<Float64T> current_value =
- LoadObjectField<Float64T>(CAST(object), field_offset);
+ LoadObjectField<Float64T>(object, field_offset);
BranchIfSameNumberValue(current_value, double_value, &done, slow);
BIND(&if_mutable);
}
@@ -1257,8 +1330,7 @@ void AccessorAssembler::OverwriteExistingFastDataProperty(
} else {
Label if_mutable(this);
GotoIfNot(IsPropertyDetailsConst(details), &if_mutable);
- TNode<Object> current_value =
- LoadObjectField(CAST(object), field_offset);
+ TNode<Object> current_value = LoadObjectField(object, field_offset);
BranchIfSameValue(current_value, value, &done, slow,
SameValueMode::kNumbersOnly);
BIND(&if_mutable);
@@ -1302,7 +1374,8 @@ void AccessorAssembler::OverwriteExistingFastDataProperty(
} else {
Label tagged_rep(this), double_rep(this);
- TNode<PropertyArray> properties = CAST(LoadFastProperties(object));
+ TNode<PropertyArray> properties =
+ CAST(LoadFastProperties(CAST(object)));
Branch(
Word32Equal(representation, Int32Constant(Representation::kDouble)),
&double_rep, &tagged_rep);
@@ -1342,7 +1415,7 @@ void AccessorAssembler::OverwriteExistingFastDataProperty(
{
// Check that constant matches value.
TNode<Object> constant = LoadValueByKeyIndex(
- CAST(descriptors), UncheckedCast<IntPtrT>(descriptor_name_index));
+ descriptors, UncheckedCast<IntPtrT>(descriptor_name_index));
GotoIf(TaggedNotEqual(value, constant), slow);
if (do_transitioning_store) {
@@ -1370,10 +1443,11 @@ void AccessorAssembler::CheckPrototypeValidityCell(
}
void AccessorAssembler::HandleStoreAccessor(const StoreICParameters* p,
- Node* holder, Node* handler_word) {
+ SloppyTNode<HeapObject> holder,
+ TNode<Word32T> handler_word) {
Comment("accessor_store");
TNode<IntPtrT> descriptor =
- Signed(DecodeWord<StoreHandler::DescriptorBits>(handler_word));
+ Signed(DecodeWordFromWord32<StoreHandler::DescriptorBits>(handler_word));
TNode<HeapObject> accessor_pair =
CAST(LoadDescriptorValue(LoadMap(holder), descriptor));
CSA_ASSERT(this, IsAccessorPair(accessor_pair));
@@ -1393,7 +1467,7 @@ void AccessorAssembler::HandleStoreICProtoHandler(
OnCodeHandler on_code_handler;
if (support_elements == kSupportElements) {
// Code sub-handlers are expected only in KeyedStoreICs.
- on_code_handler = [=](Node* code_handler) {
+ on_code_handler = [=](TNode<Code> code_handler) {
// This is either element store or transitioning element store.
Label if_element_store(this), if_transitioning_element_store(this);
Branch(IsStoreHandler0Map(LoadMap(handler)), &if_element_store,
@@ -1421,10 +1495,10 @@ void AccessorAssembler::HandleStoreICProtoHandler(
};
}
- Node* smi_handler = HandleProtoHandler<StoreHandler>(
+ TNode<Object> smi_handler = HandleProtoHandler<StoreHandler>(
p, handler, on_code_handler,
// on_found_on_receiver
- [=](Node* properties, Node* name_index) {
+ [=](TNode<NameDictionary> properties, TNode<IntPtrT> name_index) {
TNode<Uint32T> details =
LoadDetailsByKeyIndex<NameDictionary>(properties, name_index);
// Check that the property is a writable data property (no accessor).
@@ -1434,49 +1508,80 @@ void AccessorAssembler::HandleStoreICProtoHandler(
STATIC_ASSERT(kData == 0);
GotoIf(IsSetWord32(details, kTypeAndReadOnlyMask), miss);
- StoreValueByKeyIndex<NameDictionary>(
- CAST(properties), UncheckedCast<IntPtrT>(name_index), p->value());
+ StoreValueByKeyIndex<NameDictionary>(properties, name_index,
+ p->value());
Return(p->value());
},
miss, ic_mode);
{
Label if_add_normal(this), if_store_global_proxy(this), if_api_setter(this),
- if_accessor(this), if_native_data_property(this);
+ if_accessor(this), if_native_data_property(this), if_slow(this),
+ if_interceptor(this);
CSA_ASSERT(this, TaggedIsSmi(smi_handler));
- TNode<IntPtrT> handler_word = SmiUntag(smi_handler);
+ TNode<Int32T> handler_word = SmiToInt32(CAST(smi_handler));
- TNode<UintPtrT> handler_kind =
- DecodeWord<StoreHandler::KindBits>(handler_word);
- GotoIf(WordEqual(handler_kind, IntPtrConstant(StoreHandler::kNormal)),
+ TNode<Uint32T> handler_kind =
+ DecodeWord32<StoreHandler::KindBits>(handler_word);
+ GotoIf(Word32Equal(handler_kind, Int32Constant(StoreHandler::kNormal)),
&if_add_normal);
TNode<MaybeObject> maybe_holder = LoadHandlerDataField(handler, 1);
CSA_ASSERT(this, IsWeakOrCleared(maybe_holder));
TNode<HeapObject> holder = GetHeapObjectAssumeWeak(maybe_holder, miss);
- GotoIf(WordEqual(handler_kind, IntPtrConstant(StoreHandler::kGlobalProxy)),
+ GotoIf(Word32Equal(handler_kind, Int32Constant(StoreHandler::kGlobalProxy)),
&if_store_global_proxy);
- GotoIf(WordEqual(handler_kind, IntPtrConstant(StoreHandler::kAccessor)),
+ GotoIf(Word32Equal(handler_kind, Int32Constant(StoreHandler::kAccessor)),
&if_accessor);
- GotoIf(WordEqual(handler_kind,
- IntPtrConstant(StoreHandler::kNativeDataProperty)),
+ GotoIf(Word32Equal(handler_kind,
+ Int32Constant(StoreHandler::kNativeDataProperty)),
&if_native_data_property);
- GotoIf(WordEqual(handler_kind, IntPtrConstant(StoreHandler::kApiSetter)),
+ GotoIf(Word32Equal(handler_kind, Int32Constant(StoreHandler::kApiSetter)),
&if_api_setter);
- GotoIf(WordEqual(handler_kind,
- IntPtrConstant(StoreHandler::kApiSetterHolderIsPrototype)),
- &if_api_setter);
+ GotoIf(Word32Equal(handler_kind, Int32Constant(StoreHandler::kSlow)),
+ &if_slow);
+
+ GotoIf(Word32Equal(handler_kind, Int32Constant(StoreHandler::kInterceptor)),
+ &if_interceptor);
+
+ GotoIf(
+ Word32Equal(handler_kind,
+ Int32Constant(StoreHandler::kApiSetterHolderIsPrototype)),
+ &if_api_setter);
CSA_ASSERT(this,
- WordEqual(handler_kind, IntPtrConstant(StoreHandler::kProxy)));
+ Word32Equal(handler_kind, Int32Constant(StoreHandler::kProxy)));
HandleStoreToProxy(p, holder, miss, support_elements);
+ BIND(&if_slow);
+ {
+ Comment("store_slow");
+ // The slow case calls into the runtime to complete the store without
+ // causing an IC miss that would otherwise cause a transition to the
+ // generic stub.
+ if (ic_mode == ICMode::kGlobalIC) {
+ TailCallRuntime(Runtime::kStoreGlobalIC_Slow, p->context(), p->value(),
+ p->slot(), p->vector(), p->receiver(), p->name());
+ } else {
+ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, p->context(), p->value(),
+ p->receiver(), p->name());
+ }
+ }
+
+ BIND(&if_interceptor);
+ {
+ Comment("store_interceptor");
+ TailCallRuntime(Runtime::kStorePropertyWithInterceptor, p->context(),
+ p->value(), p->slot(), p->vector(), p->receiver(),
+ p->name());
+ }
+
BIND(&if_add_normal);
{
// This is a case of "transitioning store" to a dictionary mode object
@@ -1512,7 +1617,7 @@ void AccessorAssembler::HandleStoreICProtoHandler(
// Context is stored either in data2 or data3 field depending on whether
// the access check is enabled for this handler or not.
TNode<MaybeObject> maybe_context = Select<MaybeObject>(
- IsSetWord<LoadHandler::DoAccessCheckOnReceiverBits>(handler_word),
+ IsSetWord32<LoadHandler::DoAccessCheckOnReceiverBits>(handler_word),
[=] { return LoadHandlerDataField(handler, 3); },
[=] { return LoadHandlerDataField(handler, 2); });
@@ -1530,13 +1635,13 @@ void AccessorAssembler::HandleStoreICProtoHandler(
VARIABLE(api_holder, MachineRepresentation::kTagged, p->receiver());
Label store(this);
- GotoIf(WordEqual(handler_kind, IntPtrConstant(StoreHandler::kApiSetter)),
+ GotoIf(Word32Equal(handler_kind, Int32Constant(StoreHandler::kApiSetter)),
&store);
- CSA_ASSERT(
- this,
- WordEqual(handler_kind,
- IntPtrConstant(StoreHandler::kApiSetterHolderIsPrototype)));
+ CSA_ASSERT(this,
+ Word32Equal(
+ handler_kind,
+ Int32Constant(StoreHandler::kApiSetterHolderIsPrototype)));
api_holder.Bind(LoadMapPrototype(LoadMap(p->receiver())));
Goto(&store);
@@ -1559,8 +1664,8 @@ void AccessorAssembler::HandleStoreICProtoHandler(
void AccessorAssembler::HandleStoreToProxy(const StoreICParameters* p,
Node* proxy, Label* miss,
ElementSupport support_elements) {
- VARIABLE(var_index, MachineType::PointerRepresentation());
- VARIABLE(var_unique, MachineRepresentation::kTagged);
+ TVARIABLE(IntPtrT, var_index);
+ TVARIABLE(Name, var_unique);
Label if_index(this), if_unique_name(this),
to_name_failed(this, Label::kDeferred);
@@ -1591,128 +1696,200 @@ void AccessorAssembler::HandleStoreToProxy(const StoreICParameters* p,
}
}
-void AccessorAssembler::HandleStoreICSmiHandlerCase(Node* handler_word,
- Node* holder, Node* value,
- Label* miss) {
+void AccessorAssembler::HandleStoreICSmiHandlerCase(
+ SloppyTNode<Word32T> handler_word, SloppyTNode<JSObject> holder,
+ SloppyTNode<Object> value, Label* miss) {
Comment("field store");
#ifdef DEBUG
- TNode<UintPtrT> handler_kind =
- DecodeWord<StoreHandler::KindBits>(handler_word);
+ TNode<Uint32T> handler_kind =
+ DecodeWord32<StoreHandler::KindBits>(handler_word);
CSA_ASSERT(
this,
Word32Or(
- WordEqual(handler_kind, IntPtrConstant(StoreHandler::kField)),
- WordEqual(handler_kind, IntPtrConstant(StoreHandler::kConstField))));
+ Word32Equal(handler_kind, Int32Constant(StoreHandler::kField)),
+ Word32Equal(handler_kind, Int32Constant(StoreHandler::kConstField))));
#endif
- TNode<UintPtrT> field_representation =
- DecodeWord<StoreHandler::FieldRepresentationBits>(handler_word);
+ TNode<Uint32T> field_representation =
+ DecodeWord32<StoreHandler::RepresentationBits>(handler_word);
Label if_smi_field(this), if_double_field(this), if_heap_object_field(this),
if_tagged_field(this);
- GotoIf(WordEqual(field_representation, IntPtrConstant(StoreHandler::kTagged)),
- &if_tagged_field);
- GotoIf(WordEqual(field_representation,
- IntPtrConstant(StoreHandler::kHeapObject)),
- &if_heap_object_field);
- GotoIf(WordEqual(field_representation, IntPtrConstant(StoreHandler::kDouble)),
- &if_double_field);
- CSA_ASSERT(this, WordEqual(field_representation,
- IntPtrConstant(StoreHandler::kSmi)));
- Goto(&if_smi_field);
+ int32_t case_values[] = {Representation::kTagged, Representation::kHeapObject,
+ Representation::kSmi};
+ Label* case_labels[] = {&if_tagged_field, &if_heap_object_field,
+ &if_smi_field};
+
+ Switch(field_representation, &if_double_field, case_values, case_labels, 3);
BIND(&if_tagged_field);
{
Comment("store tagged field");
- HandleStoreFieldAndReturn(handler_word, holder, Representation::Tagged(),
- value, miss);
- }
-
- BIND(&if_double_field);
- {
- Comment("store double field");
- HandleStoreFieldAndReturn(handler_word, holder, Representation::Double(),
- value, miss);
+ HandleStoreFieldAndReturn(handler_word, holder, value, base::nullopt,
+ Representation::Tagged(), miss);
}
BIND(&if_heap_object_field);
{
+ Comment("heap object field checks");
+ CheckHeapObjectTypeMatchesDescriptor(handler_word, holder, value, miss);
+
Comment("store heap object field");
- HandleStoreFieldAndReturn(handler_word, holder,
- Representation::HeapObject(), value, miss);
+ HandleStoreFieldAndReturn(handler_word, holder, value, base::nullopt,
+ Representation::HeapObject(), miss);
}
BIND(&if_smi_field);
{
+ Comment("smi field checks");
+ GotoIfNot(TaggedIsSmi(value), miss);
+
Comment("store smi field");
- HandleStoreFieldAndReturn(handler_word, holder, Representation::Smi(),
- value, miss);
+ HandleStoreFieldAndReturn(handler_word, holder, value, base::nullopt,
+ Representation::Smi(), miss);
+ }
+
+ BIND(&if_double_field);
+ {
+ CSA_ASSERT(this, Word32Equal(field_representation,
+ Int32Constant(Representation::kDouble)));
+ Comment("double field checks");
+ TNode<Float64T> double_value = TryTaggedToFloat64(value, miss);
+ CheckDescriptorConsidersNumbersMutable(handler_word, holder, miss);
+
+ Comment("store double field");
+ HandleStoreFieldAndReturn(handler_word, holder, value, double_value,
+ Representation::Double(), miss);
}
}
-void AccessorAssembler::HandleStoreFieldAndReturn(Node* handler_word,
- Node* holder,
- Representation representation,
- Node* value, Label* miss) {
- Node* prepared_value =
- PrepareValueForStore(handler_word, holder, representation, value, miss);
+void AccessorAssembler::CheckHeapObjectTypeMatchesDescriptor(
+ TNode<Word32T> handler_word, TNode<JSObject> holder, TNode<Object> value,
+ Label* bailout) {
+ GotoIf(TaggedIsSmi(value), bailout);
- Label if_inobject(this), if_out_of_object(this);
- Branch(IsSetWord<StoreHandler::IsInobjectBits>(handler_word), &if_inobject,
- &if_out_of_object);
+ Label done(this);
+ // Skip field type check in favor of constant value check when storing
+ // to constant field.
+ GotoIf(Word32Equal(DecodeWord32<StoreHandler::KindBits>(handler_word),
+ Int32Constant(StoreHandler::kConstField)),
+ &done);
+ TNode<IntPtrT> descriptor =
+ Signed(DecodeWordFromWord32<StoreHandler::DescriptorBits>(handler_word));
+ TNode<MaybeObject> maybe_field_type =
+ LoadDescriptorValueOrFieldType(LoadMap(holder), descriptor);
- BIND(&if_inobject);
+ GotoIf(TaggedIsSmi(maybe_field_type), &done);
+ // Check that value type matches the field type.
{
- StoreNamedField(handler_word, holder, true, representation, prepared_value,
- miss);
- Return(value);
+ TNode<HeapObject> field_type =
+ GetHeapObjectAssumeWeak(maybe_field_type, bailout);
+ Branch(TaggedEqual(LoadMap(CAST(value)), field_type), &done, bailout);
}
+ BIND(&done);
+}
- BIND(&if_out_of_object);
- {
- StoreNamedField(handler_word, holder, false, representation, prepared_value,
- miss);
- Return(value);
- }
+void AccessorAssembler::CheckDescriptorConsidersNumbersMutable(
+ TNode<Word32T> handler_word, TNode<JSObject> holder, Label* bailout) {
+ // We have to check that the representation is Double. Checking the value
+ // (either in the field or being assigned) is not enough, as we could have
+ // transitioned to Tagged but still be holding a HeapNumber, which would no
+ // longer be allowed to be mutable.
+
+ // TODO(leszeks): We could skip the representation check in favor of a
+ // constant value check in HandleStoreFieldAndReturn here, but then
+ // HandleStoreFieldAndReturn would need an IsHeapNumber check in case both the
+ // representation changed and the value is no longer a HeapNumber.
+ TNode<IntPtrT> descriptor_entry =
+ Signed(DecodeWordFromWord32<StoreHandler::DescriptorBits>(handler_word));
+ TNode<DescriptorArray> descriptors = LoadMapDescriptors(LoadMap(holder));
+ TNode<Uint32T> details =
+ LoadDetailsByDescriptorEntry(descriptors, descriptor_entry);
+
+ GotoIfNot(IsEqualInWord32<PropertyDetails::RepresentationField>(
+ details, Representation::kDouble),
+ bailout);
}
-Node* AccessorAssembler::PrepareValueForStore(Node* handler_word, Node* holder,
- Representation representation,
- Node* value, Label* bailout) {
- if (representation.IsDouble()) {
- value = TryTaggedToFloat64(value, bailout);
+void AccessorAssembler::HandleStoreFieldAndReturn(
+ TNode<Word32T> handler_word, TNode<JSObject> holder, TNode<Object> value,
+ base::Optional<TNode<Float64T>> double_value, Representation representation,
+ Label* miss) {
+ Label done(this);
- } else if (representation.IsHeapObject()) {
- GotoIf(TaggedIsSmi(value), bailout);
+ bool store_value_as_double = representation.IsDouble();
- Label done(this);
- // Skip field type check in favor of constant value check when storing
- // to constant field.
- GotoIf(WordEqual(DecodeWord<StoreHandler::KindBits>(handler_word),
- IntPtrConstant(StoreHandler::kConstField)),
- &done);
- TNode<IntPtrT> descriptor =
- Signed(DecodeWord<StoreHandler::DescriptorBits>(handler_word));
- TNode<MaybeObject> maybe_field_type =
- LoadDescriptorValueOrFieldType(LoadMap(holder), descriptor);
+ TNode<BoolT> is_inobject =
+ IsSetWord32<StoreHandler::IsInobjectBits>(handler_word);
+ TNode<HeapObject> property_storage = Select<HeapObject>(
+ is_inobject, [&]() { return holder; },
+ [&]() { return LoadFastProperties(holder); });
- GotoIf(TaggedIsSmi(maybe_field_type), &done);
- // Check that value type matches the field type.
- {
- TNode<HeapObject> field_type =
- GetHeapObjectAssumeWeak(maybe_field_type, bailout);
- Branch(TaggedEqual(LoadMap(CAST(value)), field_type), &done, bailout);
+ TNode<UintPtrT> index =
+ DecodeWordFromWord32<StoreHandler::FieldIndexBits>(handler_word);
+ TNode<IntPtrT> offset = Signed(TimesTaggedSize(index));
+
+ // For Double fields, we want to mutate the current double-value
+ // field rather than changing it to point at a new HeapNumber.
+ if (store_value_as_double) {
+ TVARIABLE(HeapObject, actual_property_storage, property_storage);
+ TVARIABLE(IntPtrT, actual_offset, offset);
+
+ Label property_and_offset_ready(this);
+
+ // If we are unboxing double fields, and this is an in-object field, the
+ // property_storage and offset are already pointing to the double-valued
+ // field.
+ if (FLAG_unbox_double_fields) {
+ GotoIf(is_inobject, &property_and_offset_ready);
}
- BIND(&done);
- } else if (representation.IsSmi()) {
- GotoIfNot(TaggedIsSmi(value), bailout);
+ // Store the double value directly into the mutable HeapNumber.
+ TNode<Object> field = LoadObjectField(property_storage, offset);
+ CSA_ASSERT(this, IsHeapNumber(CAST(field)));
+ actual_property_storage = CAST(field);
+ actual_offset = IntPtrConstant(HeapNumber::kValueOffset);
+ Goto(&property_and_offset_ready);
+
+ BIND(&property_and_offset_ready);
+ property_storage = actual_property_storage.value();
+ offset = actual_offset.value();
+ }
+
+ // Do constant value check if necessary.
+ Label do_store(this);
+ GotoIfNot(Word32Equal(DecodeWord32<StoreHandler::KindBits>(handler_word),
+ Int32Constant(StoreHandler::kConstField)),
+ &do_store);
+ {
+ if (store_value_as_double) {
+ Label done(this);
+ TNode<Float64T> current_value =
+ LoadObjectField<Float64T>(property_storage, offset);
+ BranchIfSameNumberValue(current_value, *double_value, &done, miss);
+ BIND(&done);
+ Return(value);
+ } else {
+ TNode<Object> current_value = LoadObjectField(property_storage, offset);
+ GotoIfNot(TaggedEqual(current_value, value), miss);
+ Return(value);
+ }
+ }
+ BIND(&do_store);
+ // Do the store.
+ if (store_value_as_double) {
+ StoreObjectFieldNoWriteBarrier(property_storage, offset, *double_value,
+ MachineRepresentation::kFloat64);
+ } else if (representation.IsSmi()) {
+ TNode<Smi> value_smi = CAST(value);
+ StoreObjectFieldNoWriteBarrier(property_storage, offset, value_smi);
} else {
- DCHECK(representation.IsTagged());
+ StoreObjectField(property_storage, offset, value);
}
- return value;
+
+ Return(value);
}
Node* AccessorAssembler::ExtendPropertiesBackingStore(Node* object,
@@ -1737,7 +1914,7 @@ Node* AccessorAssembler::ExtendPropertiesBackingStore(Node* object,
BIND(&if_smi_hash);
{
TNode<Int32T> hash = SmiToInt32(CAST(properties));
- TNode<Word32T> encoded_hash =
+ TNode<Int32T> encoded_hash =
Word32Shl(hash, Int32Constant(PropertyArray::HashField::kShift));
var_encoded_hash.Bind(encoded_hash);
var_length.Bind(IntPtrOrSmiConstant(0, mode));
@@ -1813,59 +1990,6 @@ Node* AccessorAssembler::ExtendPropertiesBackingStore(Node* object,
}
}
-void AccessorAssembler::StoreNamedField(Node* handler_word, Node* object,
- bool is_inobject,
- Representation representation,
- Node* value, Label* bailout) {
- bool store_value_as_double = representation.IsDouble();
- Node* property_storage = object;
- if (!is_inobject) {
- property_storage = LoadFastProperties(object);
- }
-
- TNode<UintPtrT> index =
- DecodeWord<StoreHandler::FieldIndexBits>(handler_word);
- TNode<IntPtrT> offset = Signed(TimesTaggedSize(index));
- if (representation.IsDouble()) {
- if (!FLAG_unbox_double_fields || !is_inobject) {
- // Load the mutable heap number.
- property_storage = LoadObjectField(property_storage, offset);
- // Store the double value into it.
- offset = IntPtrConstant(HeapNumber::kValueOffset);
- }
- }
-
- // Do constant value check if necessary.
- Label const_checked(this);
- GotoIfNot(WordEqual(DecodeWord<StoreHandler::KindBits>(handler_word),
- IntPtrConstant(StoreHandler::kConstField)),
- &const_checked);
- {
- if (store_value_as_double) {
- TNode<Float64T> current_value =
- LoadObjectField<Float64T>(CAST(property_storage), offset);
- BranchIfSameNumberValue(current_value, UncheckedCast<Float64T>(value),
- &const_checked, bailout);
- } else {
- TNode<Object> current_value = LoadObjectField(property_storage, offset);
- Branch(TaggedEqual(current_value, UncheckedCast<Object>(value)),
- &const_checked, bailout);
- }
- }
-
- BIND(&const_checked);
- // Do the store.
- if (store_value_as_double) {
- StoreObjectFieldNoWriteBarrier(property_storage, offset, value,
- MachineRepresentation::kFloat64);
- } else if (representation.IsSmi()) {
- TNode<Smi> value_smi = CAST(value);
- StoreObjectFieldNoWriteBarrier(property_storage, offset, value_smi);
- } else {
- StoreObjectField(property_storage, offset, value);
- }
-}
-
void AccessorAssembler::EmitFastElementsBoundsCheck(Node* object,
Node* elements,
Node* intptr_index,
@@ -2012,8 +2136,7 @@ void AccessorAssembler::EmitElementLoad(
if (access_mode == LoadAccessMode::kHas) {
exit_point->Return(TrueConstant());
} else {
- TNode<RawPtrT> backing_store =
- LoadJSTypedArrayBackingStore(CAST(object));
+ TNode<RawPtrT> data_ptr = LoadJSTypedArrayDataPtr(CAST(object));
Label uint8_elements(this), int8_elements(this), uint16_elements(this),
int16_elements(this), uint32_elements(this), int32_elements(this),
@@ -2039,50 +2162,48 @@ void AccessorAssembler::EmitElementLoad(
BIND(&uint8_elements);
{
Comment("UINT8_ELEMENTS"); // Handles UINT8_CLAMPED_ELEMENTS too.
- Node* element =
- Load(MachineType::Uint8(), backing_store, intptr_index);
+ Node* element = Load(MachineType::Uint8(), data_ptr, intptr_index);
exit_point->Return(SmiFromInt32(element));
}
BIND(&int8_elements);
{
Comment("INT8_ELEMENTS");
- Node* element =
- Load(MachineType::Int8(), backing_store, intptr_index);
+ Node* element = Load(MachineType::Int8(), data_ptr, intptr_index);
exit_point->Return(SmiFromInt32(element));
}
BIND(&uint16_elements);
{
Comment("UINT16_ELEMENTS");
TNode<IntPtrT> index = WordShl(intptr_index, IntPtrConstant(1));
- Node* element = Load(MachineType::Uint16(), backing_store, index);
+ Node* element = Load(MachineType::Uint16(), data_ptr, index);
exit_point->Return(SmiFromInt32(element));
}
BIND(&int16_elements);
{
Comment("INT16_ELEMENTS");
TNode<IntPtrT> index = WordShl(intptr_index, IntPtrConstant(1));
- Node* element = Load(MachineType::Int16(), backing_store, index);
+ Node* element = Load(MachineType::Int16(), data_ptr, index);
exit_point->Return(SmiFromInt32(element));
}
BIND(&uint32_elements);
{
Comment("UINT32_ELEMENTS");
TNode<IntPtrT> index = WordShl(intptr_index, IntPtrConstant(2));
- Node* element = Load(MachineType::Uint32(), backing_store, index);
+ Node* element = Load(MachineType::Uint32(), data_ptr, index);
exit_point->Return(ChangeUint32ToTagged(element));
}
BIND(&int32_elements);
{
Comment("INT32_ELEMENTS");
TNode<IntPtrT> index = WordShl(intptr_index, IntPtrConstant(2));
- Node* element = Load(MachineType::Int32(), backing_store, index);
+ Node* element = Load(MachineType::Int32(), data_ptr, index);
exit_point->Return(ChangeInt32ToTagged(element));
}
BIND(&float32_elements);
{
Comment("FLOAT32_ELEMENTS");
TNode<IntPtrT> index = WordShl(intptr_index, IntPtrConstant(2));
- Node* element = Load(MachineType::Float32(), backing_store, index);
+ Node* element = Load(MachineType::Float32(), data_ptr, index);
var_double_value->Bind(ChangeFloat32ToFloat64(element));
Goto(rebox_double);
}
@@ -2090,7 +2211,7 @@ void AccessorAssembler::EmitElementLoad(
{
Comment("FLOAT64_ELEMENTS");
TNode<IntPtrT> index = WordShl(intptr_index, IntPtrConstant(3));
- Node* element = Load(MachineType::Float64(), backing_store, index);
+ Node* element = Load(MachineType::Float64(), data_ptr, index);
var_double_value->Bind(element);
Goto(rebox_double);
}
@@ -2098,15 +2219,13 @@ void AccessorAssembler::EmitElementLoad(
{
Comment("BIGINT64_ELEMENTS");
exit_point->Return(LoadFixedTypedArrayElementAsTagged(
- backing_store, intptr_index, BIGINT64_ELEMENTS,
- INTPTR_PARAMETERS));
+ data_ptr, intptr_index, BIGINT64_ELEMENTS, INTPTR_PARAMETERS));
}
BIND(&biguint64_elements);
{
Comment("BIGUINT64_ELEMENTS");
exit_point->Return(LoadFixedTypedArrayElementAsTagged(
- backing_store, intptr_index, BIGUINT64_ELEMENTS,
- INTPTR_PARAMETERS));
+ data_ptr, intptr_index, BIGUINT64_ELEMENTS, INTPTR_PARAMETERS));
}
}
}
@@ -2152,7 +2271,8 @@ void AccessorAssembler::InvalidateValidityCellIfPrototype(Node* map,
BIND(&cont);
}
-void AccessorAssembler::GenericElementLoad(Node* receiver, Node* receiver_map,
+void AccessorAssembler::GenericElementLoad(Node* receiver,
+ TNode<Map> receiver_map,
SloppyTNode<Int32T> instance_type,
Node* index, Label* slow) {
Comment("integer index");
@@ -2213,11 +2333,9 @@ void AccessorAssembler::GenericElementLoad(Node* receiver, Node* receiver_map,
}
}
-void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
- SloppyTNode<Int32T> instance_type,
- const LoadICParameters* p,
- Label* slow,
- UseStubCache use_stub_cache) {
+void AccessorAssembler::GenericPropertyLoad(
+ Node* receiver, TNode<Map> receiver_map, SloppyTNode<Int32T> instance_type,
+ const LoadICParameters* p, Label* slow, UseStubCache use_stub_cache) {
ExitPoint direct_exit(this);
Comment("key is unique name");
@@ -2317,13 +2435,13 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
BIND(&lookup_prototype_chain);
{
- VARIABLE(var_holder_map, MachineRepresentation::kTagged);
+ TVARIABLE(Map, var_holder_map);
VARIABLE(var_holder_instance_type, MachineRepresentation::kWord32);
Label return_undefined(this), is_private_symbol(this);
Variable* merged_variables[] = {&var_holder_map, &var_holder_instance_type};
Label loop(this, arraysize(merged_variables), merged_variables);
- var_holder_map.Bind(receiver_map);
+ var_holder_map = receiver_map;
var_holder_instance_type.Bind(instance_type);
GotoIf(IsPrivateSymbol(name), &is_private_symbol);
@@ -2338,7 +2456,7 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
GotoIf(TaggedEqual(proto, NullConstant()), &return_undefined);
TNode<Map> proto_map = LoadMap(proto);
TNode<Uint16T> proto_instance_type = LoadMapInstanceType(proto_map);
- var_holder_map.Bind(proto_map);
+ var_holder_map = proto_map;
var_holder_instance_type.Bind(proto_instance_type);
Label next_proto(this), return_value(this, &var_value), goto_slow(this);
TryGetOwnProperty(p->context(), receiver, proto, proto_map,
@@ -2394,8 +2512,6 @@ enum AccessorAssembler::StubCacheTable : int {
};
Node* AccessorAssembler::StubCachePrimaryOffset(Node* name, Node* map) {
- // See v8::internal::StubCache::PrimaryOffset().
- STATIC_ASSERT(StubCache::kCacheIndexShift == Name::kHashShift);
// Compute the hash of the name (use entire hash field).
TNode<Uint32T> hash_field = LoadNameHashField(name);
CSA_ASSERT(this,
@@ -2422,7 +2538,7 @@ Node* AccessorAssembler::StubCacheSecondaryOffset(Node* name, Node* seed) {
// Use the seed from the primary cache in the secondary cache.
TNode<Int32T> name32 = TruncateIntPtrToInt32(BitcastTaggedToWord(name));
- TNode<Word32T> hash = Int32Sub(TruncateIntPtrToInt32(seed), name32);
+ TNode<Int32T> hash = Int32Sub(TruncateIntPtrToInt32(seed), name32);
hash = Int32Add(hash, Int32Constant(StubCache::kSecondaryMagic));
int32_t mask = (StubCache::kSecondaryTableSize - 1)
<< StubCache::kCacheIndexShift;
@@ -2436,7 +2552,8 @@ void AccessorAssembler::TryProbeStubCacheTable(
StubCache::Table table = static_cast<StubCache::Table>(table_id);
// The {table_offset} holds the entry offset times four (due to masking
// and shifting optimizations).
- const int kMultiplier = sizeof(StubCache::Entry) >> Name::kHashShift;
+ const int kMultiplier =
+ sizeof(StubCache::Entry) >> StubCache::kCacheIndexShift;
entry_offset = IntPtrMul(entry_offset, IntPtrConstant(kMultiplier));
TNode<ExternalReference> key_base = ExternalConstant(
@@ -2527,7 +2644,7 @@ void AccessorAssembler::LoadIC_BytecodeHandler(const LazyLoadICParameters* p,
Label try_polymorphic(this), if_handler(this, &var_handler);
TNode<MaybeObject> feedback =
- TryMonomorphicCase(p->slot(), p->vector(), recv_map, &if_handler,
+ TryMonomorphicCase(p->slot(), CAST(p->vector()), recv_map, &if_handler,
&var_handler, &try_polymorphic);
BIND(&if_handler);
@@ -2589,8 +2706,8 @@ void AccessorAssembler::LoadIC(const LoadICParameters* p) {
// Check monomorphic case.
TNode<MaybeObject> feedback =
- TryMonomorphicCase(p->slot(), p->vector(), receiver_map, &if_handler,
- &var_handler, &try_polymorphic);
+ TryMonomorphicCase(p->slot(), CAST(p->vector()), receiver_map,
+ &if_handler, &var_handler, &try_polymorphic);
BIND(&if_handler);
{
LazyLoadICParameters lazy_p(p);
@@ -2673,21 +2790,25 @@ void AccessorAssembler::LoadIC_NoFeedback(const LoadICParameters* p) {
}
}
-void AccessorAssembler::LoadGlobalIC(Node* vector, Node* slot,
+void AccessorAssembler::LoadGlobalIC(TNode<HeapObject> maybe_feedback_vector,
+ const LazyNode<Smi>& lazy_smi_slot,
+ const LazyNode<UintPtrT>& lazy_slot,
const LazyNode<Context>& lazy_context,
const LazyNode<Name>& lazy_name,
TypeofMode typeof_mode,
- ExitPoint* exit_point,
- ParameterMode slot_mode) {
+ ExitPoint* exit_point) {
Label try_handler(this, Label::kDeferred), miss(this, Label::kDeferred);
- GotoIf(IsUndefined(vector), &miss);
-
- LoadGlobalIC_TryPropertyCellCase(CAST(vector), slot, lazy_context, exit_point,
- &try_handler, &miss, slot_mode);
+ GotoIf(IsUndefined(maybe_feedback_vector), &miss);
+ {
+ TNode<FeedbackVector> vector = CAST(maybe_feedback_vector);
+ TNode<UintPtrT> slot = lazy_slot();
+ LoadGlobalIC_TryPropertyCellCase(vector, slot, lazy_context, exit_point,
+ &try_handler, &miss);
- BIND(&try_handler);
- LoadGlobalIC_TryHandlerCase(CAST(vector), slot, lazy_context, lazy_name,
- typeof_mode, exit_point, &miss, slot_mode);
+ BIND(&try_handler);
+ LoadGlobalIC_TryHandlerCase(vector, slot, lazy_smi_slot, lazy_context,
+ lazy_name, typeof_mode, exit_point, &miss);
+ }
BIND(&miss);
{
@@ -2695,20 +2816,19 @@ void AccessorAssembler::LoadGlobalIC(Node* vector, Node* slot,
TNode<Context> context = lazy_context();
TNode<Name> name = lazy_name();
exit_point->ReturnCallRuntime(Runtime::kLoadGlobalIC_Miss, context, name,
- ParameterToTagged(slot, slot_mode), vector,
+ lazy_smi_slot(), maybe_feedback_vector,
SmiConstant(typeof_mode));
}
}
void AccessorAssembler::LoadGlobalIC_TryPropertyCellCase(
- TNode<FeedbackVector> vector, Node* slot,
+ TNode<FeedbackVector> vector, TNode<UintPtrT> slot,
const LazyNode<Context>& lazy_context, ExitPoint* exit_point,
- Label* try_handler, Label* miss, ParameterMode slot_mode) {
+ Label* try_handler, Label* miss) {
Comment("LoadGlobalIC_TryPropertyCellCase");
Label if_lexical_var(this), if_property_cell(this);
- TNode<MaybeObject> maybe_weak_ref =
- LoadFeedbackVectorSlot(vector, slot, 0, slot_mode);
+ TNode<MaybeObject> maybe_weak_ref = LoadFeedbackVectorSlot(vector, slot);
Branch(TaggedIsSmi(maybe_weak_ref), &if_lexical_var, &if_property_cell);
BIND(&if_property_cell);
@@ -2739,16 +2859,16 @@ void AccessorAssembler::LoadGlobalIC_TryPropertyCellCase(
}
void AccessorAssembler::LoadGlobalIC_TryHandlerCase(
- TNode<FeedbackVector> vector, Node* slot,
- const LazyNode<Context>& lazy_context, const LazyNode<Name>& lazy_name,
- TypeofMode typeof_mode, ExitPoint* exit_point, Label* miss,
- ParameterMode slot_mode) {
+ TNode<FeedbackVector> vector, TNode<UintPtrT> slot,
+ const LazyNode<Smi>& lazy_smi_slot, const LazyNode<Context>& lazy_context,
+ const LazyNode<Name>& lazy_name, TypeofMode typeof_mode,
+ ExitPoint* exit_point, Label* miss) {
Comment("LoadGlobalIC_TryHandlerCase");
Label call_handler(this), non_smi(this);
TNode<MaybeObject> feedback_element =
- LoadFeedbackVectorSlot(vector, slot, kTaggedSize, slot_mode);
+ LoadFeedbackVectorSlot(vector, slot, kTaggedSize);
TNode<Object> handler = CAST(feedback_element);
GotoIf(TaggedEqual(handler, UninitializedSymbolConstant()), miss);
@@ -2757,14 +2877,14 @@ void AccessorAssembler::LoadGlobalIC_TryHandlerCase(
: OnNonExistent::kReturnUndefined;
TNode<Context> context = lazy_context();
- TNode<Context> native_context = LoadNativeContext(context);
+ TNode<NativeContext> native_context = LoadNativeContext(context);
TNode<JSGlobalProxy> receiver =
CAST(LoadContextElement(native_context, Context::GLOBAL_PROXY_INDEX));
TNode<Object> holder =
LoadContextElement(native_context, Context::EXTENSION_INDEX);
LazyLoadICParameters p([=] { return context; }, receiver, lazy_name,
- ParameterToTagged(slot, slot_mode), vector, holder);
+ lazy_smi_slot, vector, holder);
HandleLoadICHandlerCase(&p, handler, miss, exit_point, ICMode::kGlobalIC,
on_nonexistent);
@@ -2788,8 +2908,8 @@ void AccessorAssembler::KeyedLoadIC(const LoadICParameters* p,
// Check monomorphic case.
TNode<MaybeObject> feedback =
- TryMonomorphicCase(p->slot(), p->vector(), receiver_map, &if_handler,
- &var_handler, &try_polymorphic);
+ TryMonomorphicCase(p->slot(), CAST(p->vector()), receiver_map,
+ &if_handler, &var_handler, &try_polymorphic);
BIND(&if_handler);
{
LazyLoadICParameters lazy_p(p);
@@ -2840,13 +2960,13 @@ void AccessorAssembler::KeyedLoadIC(const LoadICParameters* p,
// We might have a name in feedback, and a weak fixed array in the next
// slot.
Comment("KeyedLoadIC_try_polymorphic_name");
- TVARIABLE(Object, var_name, p->name());
+ TVARIABLE(Name, var_name);
TVARIABLE(IntPtrT, var_index);
- Label if_polymorphic_name(this, &var_name), if_internalized(this),
- if_notinternalized(this, Label::kDeferred);
+ Label if_polymorphic_name(this), feedback_matches(this),
+ if_internalized(this), if_notinternalized(this, Label::kDeferred);
// Fast-case: The recorded {feedback} matches the {name}.
- GotoIf(TaggedEqual(strong_feedback, p->name()), &if_polymorphic_name);
+ GotoIf(TaggedEqual(strong_feedback, p->name()), &feedback_matches);
// Try to internalize the {name} if it isn't already.
TryToName(p->name(), &miss, &var_index, &if_internalized, &var_name, &miss,
@@ -2861,16 +2981,15 @@ void AccessorAssembler::KeyedLoadIC(const LoadICParameters* p,
BIND(&if_notinternalized);
{
- // Try to internalize the {name}.
- TNode<ExternalReference> function = ExternalConstant(
- ExternalReference::try_internalize_string_function());
- TNode<ExternalReference> const isolate_ptr =
- ExternalConstant(ExternalReference::isolate_address(isolate()));
- var_name = CAST(
- CallCFunction(function, MachineType::AnyTagged(),
- std::make_pair(MachineType::Pointer(), isolate_ptr),
- std::make_pair(MachineType::AnyTagged(), p->name())));
- Goto(&if_internalized);
+ TVARIABLE(IntPtrT, var_index);
+ TryInternalizeString(CAST(p->name()), &miss, &var_index, &if_internalized,
+ &var_name, &miss, &miss);
+ }
+
+ BIND(&feedback_matches);
+ {
+ var_name = CAST(p->name());
+ Goto(&if_polymorphic_name);
}
BIND(&if_polymorphic_name);
@@ -2896,71 +3015,74 @@ void AccessorAssembler::KeyedLoadIC(const LoadICParameters* p,
}
void AccessorAssembler::KeyedLoadICGeneric(const LoadICParameters* p) {
- TVARIABLE(IntPtrT, var_index);
- TVARIABLE(Object, var_unique, p->name());
- Label if_index(this), if_unique_name(this), if_notunique(this),
- if_other(this, Label::kDeferred), if_runtime(this, Label::kDeferred);
+ TVARIABLE(Object, var_name, p->name());
+ Label if_runtime(this, Label::kDeferred);
Node* receiver = p->receiver();
GotoIf(TaggedIsSmi(receiver), &if_runtime);
GotoIf(IsNullOrUndefined(receiver), &if_runtime);
- TryToName(p->name(), &if_index, &var_index, &if_unique_name, &var_unique,
- &if_other, &if_notunique);
-
- BIND(&if_other);
{
- TNode<Name> name =
- CAST(CallBuiltin(Builtins::kToName, p->context(), p->name()));
- var_unique = name;
- TryToName(name, &if_index, &var_index, &if_unique_name, &var_unique,
- &if_runtime, &if_notunique);
- }
+ TVARIABLE(IntPtrT, var_index);
+ TVARIABLE(Name, var_unique);
+ Label if_index(this), if_unique_name(this, &var_name), if_notunique(this),
+ if_other(this, Label::kDeferred);
- BIND(&if_index);
- {
- TNode<Map> receiver_map = LoadMap(receiver);
- TNode<Uint16T> instance_type = LoadMapInstanceType(receiver_map);
- GenericElementLoad(receiver, receiver_map, instance_type, var_index.value(),
- &if_runtime);
- }
+ TryToName(var_name.value(), &if_index, &var_index, &if_unique_name,
+ &var_unique, &if_other, &if_notunique);
- BIND(&if_unique_name);
- {
- LoadICParameters pp(p, var_unique.value());
- TNode<Map> receiver_map = LoadMap(receiver);
- TNode<Uint16T> instance_type = LoadMapInstanceType(receiver_map);
- GenericPropertyLoad(receiver, receiver_map, instance_type, &pp,
- &if_runtime);
- }
+ BIND(&if_unique_name);
+ {
+ LoadICParameters pp(p, var_unique.value());
+ TNode<Map> receiver_map = LoadMap(receiver);
+ TNode<Uint16T> instance_type = LoadMapInstanceType(receiver_map);
+ GenericPropertyLoad(receiver, receiver_map, instance_type, &pp,
+ &if_runtime);
+ }
- BIND(&if_notunique);
- {
- if (FLAG_internalize_on_the_fly) {
- // Ideally we could return undefined directly here if the name is not
- // found in the string table, i.e. it was never internalized, but that
- // invariant doesn't hold with named property interceptors (at this
- // point), so we take the {if_runtime} path instead.
- Label if_in_string_table(this);
- TryInternalizeString(var_unique.value(), &if_index, &var_index,
- &if_in_string_table, &var_unique, &if_runtime,
- &if_runtime);
+ BIND(&if_other);
+ {
+ var_name = CallBuiltin(Builtins::kToName, p->context(), var_name.value());
+ TryToName(var_name.value(), &if_index, &var_index, &if_unique_name,
+ &var_unique, &if_runtime, &if_notunique);
+ }
- BIND(&if_in_string_table);
- {
- // TODO(bmeurer): We currently use a version of GenericPropertyLoad
- // here, where we don't try to probe the megamorphic stub cache after
- // successfully internalizing the incoming string. Past experiments
- // with this have shown that it causes too much traffic on the stub
- // cache. We may want to re-evaluate that in the future.
- LoadICParameters pp(p, var_unique.value());
- TNode<Map> receiver_map = LoadMap(receiver);
- TNode<Uint16T> instance_type = LoadMapInstanceType(receiver_map);
- GenericPropertyLoad(receiver, receiver_map, instance_type, &pp,
- &if_runtime, kDontUseStubCache);
+ BIND(&if_notunique);
+ {
+ if (FLAG_internalize_on_the_fly) {
+ // Ideally we could return undefined directly here if the name is not
+ // found in the string table, i.e. it was never internalized, but that
+ // invariant doesn't hold with named property interceptors (at this
+ // point), so we take the {if_runtime} path instead.
+ Label if_in_string_table(this);
+ TryInternalizeString(CAST(var_name.value()), &if_index, &var_index,
+ &if_in_string_table, &var_unique, &if_runtime,
+ &if_runtime);
+
+ BIND(&if_in_string_table);
+ {
+ // TODO(bmeurer): We currently use a version of GenericPropertyLoad
+ // here, where we don't try to probe the megamorphic stub cache
+ // after successfully internalizing the incoming string. Past
+ // experiments with this have shown that it causes too much traffic
+ // on the stub cache. We may want to re-evaluate that in the future.
+ LoadICParameters pp(p, var_unique.value());
+ TNode<Map> receiver_map = LoadMap(receiver);
+ TNode<Uint16T> instance_type = LoadMapInstanceType(receiver_map);
+ GenericPropertyLoad(receiver, receiver_map, instance_type, &pp,
+ &if_runtime, kDontUseStubCache);
+ }
+ } else {
+ Goto(&if_runtime);
}
- } else {
- Goto(&if_runtime);
+ }
+
+ BIND(&if_index);
+ {
+ TNode<Map> receiver_map = LoadMap(receiver);
+ TNode<Uint16T> instance_type = LoadMapInstanceType(receiver_map);
+ GenericElementLoad(receiver, receiver_map, instance_type,
+ var_index.value(), &if_runtime);
}
}
@@ -2970,7 +3092,7 @@ void AccessorAssembler::KeyedLoadICGeneric(const LoadICParameters* p) {
IncrementCounter(isolate()->counters()->ic_keyed_load_generic_slow(), 1);
// TODO(jkummerow): Should we use the GetProperty TF stub instead?
TailCallRuntime(Runtime::kGetProperty, p->context(), p->receiver(),
- var_unique.value());
+ var_name.value());
}
}
@@ -2982,22 +3104,20 @@ void AccessorAssembler::KeyedLoadICPolymorphicName(const LoadICParameters* p,
Node* receiver = p->receiver();
TNode<Map> receiver_map = LoadReceiverMap(receiver);
TNode<Name> name = CAST(p->name());
- Node* vector = p->vector();
- Node* slot = p->slot();
+ TNode<FeedbackVector> vector = CAST(p->vector());
+ TNode<Smi> slot = p->slot();
TNode<Context> context = p->context();
// When we get here, we know that the {name} matches the recorded
// feedback name in the {vector} and can safely be used for the
// LoadIC handler logic below.
CSA_ASSERT(this, Word32BinaryNot(IsDeprecatedMap(receiver_map)));
- CSA_ASSERT(this,
- TaggedEqual(
- name, LoadFeedbackVectorSlot(vector, slot, 0, SMI_PARAMETERS)),
+ CSA_ASSERT(this, TaggedEqual(name, LoadFeedbackVectorSlot(vector, slot)),
name, vector);
// Check if we have a matching handler for the {receiver_map}.
TNode<MaybeObject> feedback_element =
- LoadFeedbackVectorSlot(vector, slot, kTaggedSize, SMI_PARAMETERS);
+ LoadFeedbackVectorSlot(vector, slot, kTaggedSize);
TNode<WeakFixedArray> array = CAST(feedback_element);
HandlePolymorphicCase(receiver_map, array, &if_handler, &var_handler, &miss);
@@ -3038,8 +3158,8 @@ void AccessorAssembler::StoreIC(const StoreICParameters* p) {
// Check monomorphic case.
TNode<MaybeObject> feedback =
- TryMonomorphicCase(p->slot(), p->vector(), receiver_map, &if_handler,
- &var_handler, &try_polymorphic);
+ TryMonomorphicCase(p->slot(), CAST(p->vector()), receiver_map,
+ &if_handler, &var_handler, &try_polymorphic);
BIND(&if_handler);
{
Comment("StoreIC_if_handler");
@@ -3082,17 +3202,12 @@ void AccessorAssembler::StoreIC(const StoreICParameters* p) {
void AccessorAssembler::StoreGlobalIC(const StoreICParameters* pp) {
Label if_lexical_var(this), if_heapobject(this);
TNode<MaybeObject> maybe_weak_ref =
- LoadFeedbackVectorSlot(pp->vector(), pp->slot(), 0, SMI_PARAMETERS);
+ LoadFeedbackVectorSlot(CAST(pp->vector()), pp->slot());
Branch(TaggedIsSmi(maybe_weak_ref), &if_lexical_var, &if_heapobject);
BIND(&if_heapobject);
{
Label try_handler(this), miss(this, Label::kDeferred);
- // We use pre-monomorphic state for global stores that run into
- // interceptors because the property doesn't exist yet. Using
- // pre-monomorphic state gives it a chance to find more information the
- // second time.
- GotoIf(TaggedEqual(maybe_weak_ref, PremonomorphicSymbolConstant()), &miss);
CSA_ASSERT(this, IsWeakOrCleared(maybe_weak_ref));
TNode<PropertyCell> property_cell =
@@ -3105,13 +3220,13 @@ void AccessorAssembler::StoreGlobalIC(const StoreICParameters* pp) {
BIND(&try_handler);
{
Comment("StoreGlobalIC_try_handler");
- TNode<MaybeObject> handler = LoadFeedbackVectorSlot(
- pp->vector(), pp->slot(), kTaggedSize, SMI_PARAMETERS);
+ TNode<MaybeObject> handler =
+ LoadFeedbackVectorSlot(CAST(pp->vector()), pp->slot(), kTaggedSize);
GotoIf(TaggedEqual(handler, UninitializedSymbolConstant()), &miss);
DCHECK_NULL(pp->receiver());
- TNode<Context> native_context = LoadNativeContext(pp->context());
+ TNode<NativeContext> native_context = LoadNativeContext(pp->context());
StoreICParameters p(
pp->context(),
LoadContextElement(native_context, Context::GLOBAL_PROXY_INDEX),
@@ -3225,8 +3340,8 @@ void AccessorAssembler::KeyedStoreIC(const StoreICParameters* p) {
// Check monomorphic case.
TNode<MaybeObject> feedback =
- TryMonomorphicCase(p->slot(), p->vector(), receiver_map, &if_handler,
- &var_handler, &try_polymorphic);
+ TryMonomorphicCase(p->slot(), CAST(p->vector()), receiver_map,
+ &if_handler, &var_handler, &try_polymorphic);
BIND(&if_handler);
{
Comment("KeyedStoreIC_if_handler");
@@ -3266,8 +3381,8 @@ void AccessorAssembler::KeyedStoreIC(const StoreICParameters* p) {
GotoIfNot(TaggedEqual(strong_feedback, p->name()), &miss);
// If the name comparison succeeded, we know we have a feedback vector
// with at least one map/handler pair.
- TNode<MaybeObject> feedback_element = LoadFeedbackVectorSlot(
- p->vector(), p->slot(), kTaggedSize, SMI_PARAMETERS);
+ TNode<MaybeObject> feedback_element =
+ LoadFeedbackVectorSlot(CAST(p->vector()), p->slot(), kTaggedSize);
TNode<WeakFixedArray> array = CAST(feedback_element);
HandlePolymorphicCase(receiver_map, array, &if_handler, &var_handler,
&miss);
@@ -3296,16 +3411,20 @@ void AccessorAssembler::StoreInArrayLiteralIC(const StoreICParameters* p) {
GotoIf(IsUndefined(p->vector()), &miss);
TNode<MaybeObject> feedback =
- TryMonomorphicCase(p->slot(), p->vector(), array_map, &if_handler,
+ TryMonomorphicCase(p->slot(), CAST(p->vector()), array_map, &if_handler,
&var_handler, &try_polymorphic);
BIND(&if_handler);
{
Comment("StoreInArrayLiteralIC_if_handler");
// This is a stripped-down version of HandleStoreICHandlerCase.
+ Label if_transitioning_element_store(this), if_smi_handler(this);
+
+ // Check used to identify the Slow case.
+ // Currently only the Slow case uses a Smi handler.
+ GotoIf(TaggedIsSmi(var_handler.value()), &if_smi_handler);
TNode<HeapObject> handler = CAST(var_handler.value());
- Label if_transitioning_element_store(this);
GotoIfNot(IsCode(handler), &if_transitioning_element_store);
TailCallStub(StoreWithVectorDescriptor{}, CAST(handler), p->context(),
p->receiver(), p->name(), p->value(), p->slot(),
@@ -3324,6 +3443,22 @@ void AccessorAssembler::StoreInArrayLiteralIC(const StoreICParameters* p) {
p->receiver(), p->name(), transition_map, p->value(),
p->slot(), p->vector());
}
+
+ BIND(&if_smi_handler);
+ {
+#ifdef DEBUG
+ // A check to ensure that no other Smi handler uses this path.
+ TNode<Int32T> handler_word = SmiToInt32(CAST(var_handler.value()));
+ TNode<Uint32T> handler_kind =
+ DecodeWord32<StoreHandler::KindBits>(handler_word);
+ CSA_ASSERT(this, Word32Equal(handler_kind,
+ Int32Constant(StoreHandler::kSlow)));
+#endif
+
+ Comment("StoreInArrayLiteralIC_Slow");
+ TailCallRuntime(Runtime::kStoreInArrayLiteralIC_Slow, p->context(),
+ p->value(), p->receiver(), p->name());
+ }
}
BIND(&try_polymorphic);
@@ -3366,7 +3501,7 @@ void AccessorAssembler::GenerateLoadIC() {
Node* receiver = Parameter(Descriptor::kReceiver);
TNode<Object> name = CAST(Parameter(Descriptor::kName));
- Node* slot = Parameter(Descriptor::kSlot);
+ TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
Node* vector = Parameter(Descriptor::kVector);
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
@@ -3379,7 +3514,7 @@ void AccessorAssembler::GenerateLoadIC_Megamorphic() {
Node* receiver = Parameter(Descriptor::kReceiver);
TNode<Object> name = CAST(Parameter(Descriptor::kName));
- Node* slot = Parameter(Descriptor::kSlot);
+ TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
Node* vector = Parameter(Descriptor::kVector);
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
@@ -3392,7 +3527,7 @@ void AccessorAssembler::GenerateLoadIC_Megamorphic() {
BIND(&if_handler);
LazyLoadICParameters p([=] { return context; }, receiver,
- [=] { return name; }, slot, vector);
+ [=] { return name; }, [=] { return slot; }, vector);
HandleLoadICHandlerCase(&p, CAST(var_handler.value()), &miss, &direct_exit);
BIND(&miss);
@@ -3405,8 +3540,8 @@ void AccessorAssembler::GenerateLoadIC_Noninlined() {
Node* receiver = Parameter(Descriptor::kReceiver);
TNode<Object> name = CAST(Parameter(Descriptor::kName));
- Node* slot = Parameter(Descriptor::kSlot);
- Node* vector = Parameter(Descriptor::kVector);
+ TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
+ TNode<FeedbackVector> vector = CAST(Parameter(Descriptor::kVector));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
ExitPoint direct_exit(this);
@@ -3414,8 +3549,7 @@ void AccessorAssembler::GenerateLoadIC_Noninlined() {
Label if_handler(this, &var_handler), miss(this, Label::kDeferred);
TNode<Map> receiver_map = LoadReceiverMap(receiver);
- TNode<MaybeObject> feedback_element =
- LoadFeedbackVectorSlot(vector, slot, 0, SMI_PARAMETERS);
+ TNode<MaybeObject> feedback_element = LoadFeedbackVectorSlot(vector, slot);
TNode<HeapObject> feedback = CAST(feedback_element);
LoadICParameters p(context, receiver, name, slot, vector);
@@ -3439,7 +3573,7 @@ void AccessorAssembler::GenerateLoadIC_NoFeedback() {
Node* receiver = Parameter(Descriptor::kReceiver);
TNode<Object> name = CAST(Parameter(Descriptor::kName));
- Node* slot = Parameter(Descriptor::kSlot);
+ TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
LoadICParameters p(context, receiver, name, slot, UndefinedConstant());
@@ -3475,13 +3609,17 @@ void AccessorAssembler::GenerateLoadGlobalIC(TypeofMode typeof_mode) {
using Descriptor = LoadGlobalWithVectorDescriptor;
TNode<Name> name = CAST(Parameter(Descriptor::kName));
- Node* slot = Parameter(Descriptor::kSlot);
- Node* vector = Parameter(Descriptor::kVector);
+ TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
+ TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
ExitPoint direct_exit(this);
LoadGlobalIC(
- vector, slot,
+ vector,
+ // lazy_smi_slot
+ [=] { return slot; },
+ // lazy_slot
+ [=] { return Unsigned(SmiUntag(slot)); },
// lazy_context
[=] { return context; },
// lazy_name
@@ -3506,7 +3644,7 @@ void AccessorAssembler::GenerateKeyedLoadIC() {
Node* receiver = Parameter(Descriptor::kReceiver);
TNode<Object> name = CAST(Parameter(Descriptor::kName));
- Node* slot = Parameter(Descriptor::kSlot);
+ TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
Node* vector = Parameter(Descriptor::kVector);
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
@@ -3519,7 +3657,7 @@ void AccessorAssembler::GenerateKeyedLoadIC_Megamorphic() {
Node* receiver = Parameter(Descriptor::kReceiver);
TNode<Object> name = CAST(Parameter(Descriptor::kName));
- Node* slot = Parameter(Descriptor::kSlot);
+ TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
Node* vector = Parameter(Descriptor::kVector);
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
@@ -3532,7 +3670,7 @@ void AccessorAssembler::GenerateKeyedLoadICTrampoline() {
Node* receiver = Parameter(Descriptor::kReceiver);
TNode<Object> name = CAST(Parameter(Descriptor::kName));
- Node* slot = Parameter(Descriptor::kSlot);
+ TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<FeedbackVector> vector = LoadFeedbackVectorForStub();
@@ -3545,7 +3683,7 @@ void AccessorAssembler::GenerateKeyedLoadICTrampoline_Megamorphic() {
Node* receiver = Parameter(Descriptor::kReceiver);
TNode<Object> name = CAST(Parameter(Descriptor::kName));
- Node* slot = Parameter(Descriptor::kSlot);
+ TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<FeedbackVector> vector = LoadFeedbackVectorForStub();
@@ -3558,7 +3696,7 @@ void AccessorAssembler::GenerateKeyedLoadIC_PolymorphicName() {
Node* receiver = Parameter(Descriptor::kReceiver);
TNode<Object> name = CAST(Parameter(Descriptor::kName));
- Node* slot = Parameter(Descriptor::kSlot);
+ TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
Node* vector = Parameter(Descriptor::kVector);
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
@@ -3571,7 +3709,7 @@ void AccessorAssembler::GenerateStoreGlobalIC() {
TNode<Object> name = CAST(Parameter(Descriptor::kName));
Node* value = Parameter(Descriptor::kValue);
- Node* slot = Parameter(Descriptor::kSlot);
+ TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
Node* vector = Parameter(Descriptor::kVector);
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
@@ -3584,7 +3722,7 @@ void AccessorAssembler::GenerateStoreGlobalICTrampoline() {
TNode<Object> name = CAST(Parameter(Descriptor::kName));
Node* value = Parameter(Descriptor::kValue);
- Node* slot = Parameter(Descriptor::kSlot);
+ TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<FeedbackVector> vector = LoadFeedbackVectorForStub();
@@ -3597,7 +3735,7 @@ void AccessorAssembler::GenerateStoreIC() {
Node* receiver = Parameter(Descriptor::kReceiver);
TNode<Object> name = CAST(Parameter(Descriptor::kName));
Node* value = Parameter(Descriptor::kValue);
- Node* slot = Parameter(Descriptor::kSlot);
+ TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
Node* vector = Parameter(Descriptor::kVector);
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
@@ -3611,7 +3749,7 @@ void AccessorAssembler::GenerateStoreICTrampoline() {
Node* receiver = Parameter(Descriptor::kReceiver);
TNode<Object> name = CAST(Parameter(Descriptor::kName));
Node* value = Parameter(Descriptor::kValue);
- Node* slot = Parameter(Descriptor::kSlot);
+ TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<FeedbackVector> vector = LoadFeedbackVectorForStub();
@@ -3625,7 +3763,7 @@ void AccessorAssembler::GenerateKeyedStoreIC() {
Node* receiver = Parameter(Descriptor::kReceiver);
TNode<Object> name = CAST(Parameter(Descriptor::kName));
Node* value = Parameter(Descriptor::kValue);
- Node* slot = Parameter(Descriptor::kSlot);
+ TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
Node* vector = Parameter(Descriptor::kVector);
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
@@ -3639,7 +3777,7 @@ void AccessorAssembler::GenerateKeyedStoreICTrampoline() {
Node* receiver = Parameter(Descriptor::kReceiver);
TNode<Object> name = CAST(Parameter(Descriptor::kName));
Node* value = Parameter(Descriptor::kValue);
- Node* slot = Parameter(Descriptor::kSlot);
+ TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<FeedbackVector> vector = LoadFeedbackVectorForStub();
@@ -3653,7 +3791,7 @@ void AccessorAssembler::GenerateStoreInArrayLiteralIC() {
Node* array = Parameter(Descriptor::kReceiver);
TNode<Object> index = CAST(Parameter(Descriptor::kName));
Node* value = Parameter(Descriptor::kValue);
- Node* slot = Parameter(Descriptor::kSlot);
+ TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
Node* vector = Parameter(Descriptor::kVector);
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
@@ -3671,7 +3809,7 @@ void AccessorAssembler::GenerateCloneObjectIC_Slow() {
// can be tail called from it. However, the feedback slot and vector are not
// used.
- TNode<Context> native_context = LoadNativeContext(context);
+ TNode<NativeContext> native_context = LoadNativeContext(context);
TNode<JSFunction> object_fn =
CAST(LoadContextElement(native_context, Context::OBJECT_FUNCTION_INDEX));
TNode<Map> initial_map = CAST(
@@ -3724,7 +3862,7 @@ void AccessorAssembler::GenerateCloneObjectIC() {
TNode<Object> source = CAST(Parameter(Descriptor::kSource));
TNode<Smi> flags = CAST(Parameter(Descriptor::kFlags));
TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
+ TNode<HeapObject> maybe_vector = CAST(Parameter(Descriptor::kVector));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TVARIABLE(MaybeObject, var_handler);
Label if_handler(this, &var_handler), miss(this, Label::kDeferred),
@@ -3734,10 +3872,11 @@ void AccessorAssembler::GenerateCloneObjectIC() {
TNode<Map> source_map = LoadReceiverMap(source);
GotoIf(IsDeprecatedMap(source_map), &miss);
- GotoIf(IsUndefined(vector), &slow);
+ GotoIf(IsUndefined(maybe_vector), &slow);
- TNode<MaybeObject> feedback = TryMonomorphicCase(
- slot, vector, source_map, &if_handler, &var_handler, &try_polymorphic);
+ TNode<MaybeObject> feedback =
+ TryMonomorphicCase(slot, CAST(maybe_vector), source_map, &if_handler,
+ &var_handler, &try_polymorphic);
BIND(&if_handler);
{
@@ -3801,30 +3940,28 @@ void AccessorAssembler::GenerateCloneObjectIC() {
// Just copy the fields as raw data (pretending that there are no mutable
// HeapNumbers). This doesn't need write barriers.
- BuildFastLoop(
+ BuildFastLoop<IntPtrT>(
source_start, source_size,
- [=](Node* field_index) {
- TNode<IntPtrT> field_offset =
- TimesTaggedSize(UncheckedCast<IntPtrT>(field_index));
+ [=](TNode<IntPtrT> field_index) {
+ TNode<IntPtrT> field_offset = TimesTaggedSize(field_index);
TNode<TaggedT> field =
LoadObjectField<TaggedT>(CAST(source), field_offset);
TNode<IntPtrT> result_offset =
IntPtrAdd(field_offset, field_offset_difference);
StoreObjectFieldNoWriteBarrier(object, result_offset, field);
},
- 1, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+ 1, IndexAdvanceMode::kPost);
// If mutable HeapNumbers can occur, we need to go through the {object}
// again here and properly clone them. We use a second loop here to
// ensure that the GC (and heap verifier) always sees properly initialized
// objects, i.e. never hits undefined values in double fields.
if (!FLAG_unbox_double_fields) {
- BuildFastLoop(
+ BuildFastLoop<IntPtrT>(
source_start, source_size,
- [=](Node* field_index) {
- TNode<IntPtrT> result_offset =
- IntPtrAdd(TimesTaggedSize(UncheckedCast<IntPtrT>(field_index)),
- field_offset_difference);
+ [=](TNode<IntPtrT> field_index) {
+ TNode<IntPtrT> result_offset = IntPtrAdd(
+ TimesTaggedSize(field_index), field_offset_difference);
TNode<Object> field = LoadObjectField(object, result_offset);
Label if_done(this), if_mutableheapnumber(this, Label::kDeferred);
GotoIf(TaggedIsSmi(field), &if_done);
@@ -3838,7 +3975,7 @@ void AccessorAssembler::GenerateCloneObjectIC() {
}
BIND(&if_done);
},
- 1, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+ 1, IndexAdvanceMode::kPost);
}
Return(object);
@@ -3867,14 +4004,15 @@ void AccessorAssembler::GenerateCloneObjectIC() {
BIND(&slow);
{
TailCallBuiltin(Builtins::kCloneObjectIC_Slow, context, source, flags, slot,
- vector);
+ maybe_vector);
}
BIND(&miss);
{
Comment("CloneObjectIC_miss");
- TNode<HeapObject> map_or_result = CAST(CallRuntime(
- Runtime::kCloneObjectIC_Miss, context, source, flags, slot, vector));
+ TNode<HeapObject> map_or_result =
+ CAST(CallRuntime(Runtime::kCloneObjectIC_Miss, context, source, flags,
+ slot, maybe_vector));
var_handler = UncheckedCast<MaybeObject>(map_or_result);
GotoIf(IsMap(map_or_result), &if_handler);
CSA_ASSERT(this, IsJSObject(map_or_result));
@@ -3887,7 +4025,7 @@ void AccessorAssembler::GenerateKeyedHasIC() {
Node* receiver = Parameter(Descriptor::kReceiver);
TNode<Object> name = CAST(Parameter(Descriptor::kName));
- Node* slot = Parameter(Descriptor::kSlot);
+ TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
Node* vector = Parameter(Descriptor::kVector);
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
@@ -3911,7 +4049,7 @@ void AccessorAssembler::GenerateKeyedHasIC_PolymorphicName() {
Node* receiver = Parameter(Descriptor::kReceiver);
TNode<Object> name = CAST(Parameter(Descriptor::kName));
- Node* slot = Parameter(Descriptor::kSlot);
+ TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
Node* vector = Parameter(Descriptor::kVector);
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
@@ -3919,5 +4057,54 @@ void AccessorAssembler::GenerateKeyedHasIC_PolymorphicName() {
KeyedLoadICPolymorphicName(&p, LoadAccessMode::kHas);
}
+void AccessorAssembler::BranchIfPrototypesHaveNoElements(
+ TNode<Map> receiver_map, Label* definitely_no_elements,
+ Label* possibly_elements) {
+ TVARIABLE(Map, var_map, receiver_map);
+ Label loop_body(this, &var_map);
+ TNode<FixedArray> empty_fixed_array = EmptyFixedArrayConstant();
+ TNode<NumberDictionary> empty_slow_element_dictionary =
+ EmptySlowElementDictionaryConstant();
+ Goto(&loop_body);
+
+ BIND(&loop_body);
+ {
+ TNode<Map> map = var_map.value();
+ TNode<HeapObject> prototype = LoadMapPrototype(map);
+ GotoIf(IsNull(prototype), definitely_no_elements);
+ TNode<Map> prototype_map = LoadMap(prototype);
+ TNode<Uint16T> prototype_instance_type = LoadMapInstanceType(prototype_map);
+
+ // Pessimistically assume elements if a Proxy, Special API Object,
+ // or JSPrimitiveWrapper wrapper is found on the prototype chain. After this
+ // instance type check, it's not necessary to check for interceptors or
+ // access checks.
+ Label if_custom(this, Label::kDeferred), if_notcustom(this);
+ Branch(IsCustomElementsReceiverInstanceType(prototype_instance_type),
+ &if_custom, &if_notcustom);
+
+ BIND(&if_custom);
+ {
+ // For string JSPrimitiveWrapper wrappers we still support the checks as
+ // long as they wrap the empty string.
+ GotoIfNot(
+ InstanceTypeEqual(prototype_instance_type, JS_PRIMITIVE_WRAPPER_TYPE),
+ possibly_elements);
+ TNode<Object> prototype_value =
+ LoadJSPrimitiveWrapperValue(CAST(prototype));
+ Branch(IsEmptyString(prototype_value), &if_notcustom, possibly_elements);
+ }
+
+ BIND(&if_notcustom);
+ {
+ TNode<FixedArrayBase> prototype_elements = LoadElements(CAST(prototype));
+ var_map = prototype_map;
+ GotoIf(TaggedEqual(prototype_elements, empty_fixed_array), &loop_body);
+ Branch(TaggedEqual(prototype_elements, empty_slow_element_dictionary),
+ &loop_body, possibly_elements);
+ }
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ic/accessor-assembler.h b/deps/v8/src/ic/accessor-assembler.h
index 0de2292fd6..ccc2de9323 100644
--- a/deps/v8/src/ic/accessor-assembler.h
+++ b/deps/v8/src/ic/accessor-assembler.h
@@ -5,6 +5,7 @@
#ifndef V8_IC_ACCESSOR_ASSEMBLER_H_
#define V8_IC_ACCESSOR_ASSEMBLER_H_
+#include "src/base/optional.h"
#include "src/codegen/code-stub-assembler.h"
namespace v8 {
@@ -19,10 +20,6 @@ class ExitPoint;
class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
public:
using Node = compiler::Node;
- template <class T>
- using TNode = compiler::TNode<T>;
- template <class T>
- using SloppyTNode = compiler::SloppyTNode<T>;
explicit AccessorAssembler(compiler::CodeAssemblerState* state)
: CodeStubAssembler(state) {}
@@ -69,7 +66,7 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
struct LoadICParameters {
LoadICParameters(TNode<Context> context, Node* receiver, TNode<Object> name,
- Node* slot, Node* vector, Node* holder = nullptr)
+ TNode<Smi> slot, Node* vector, Node* holder = nullptr)
: context_(context),
receiver_(receiver),
name_(name),
@@ -88,7 +85,7 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
TNode<Context> context() const { return context_; }
Node* receiver() const { return receiver_; }
TNode<Object> name() const { return name_; }
- Node* slot() const { return slot_; }
+ TNode<Smi> slot() const { return slot_; }
Node* vector() const { return vector_; }
Node* holder() const { return holder_; }
@@ -96,15 +93,15 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
TNode<Context> context_;
Node* receiver_;
TNode<Object> name_;
- Node* slot_;
+ TNode<Smi> slot_;
Node* vector_;
Node* holder_;
};
struct LazyLoadICParameters {
LazyLoadICParameters(LazyNode<Context> context, Node* receiver,
- LazyNode<Object> name, Node* slot, Node* vector,
- Node* holder = nullptr)
+ LazyNode<Object> name, LazyNode<Smi> slot,
+ Node* vector, Node* holder = nullptr)
: context_(context),
receiver_(receiver),
name_(name),
@@ -114,19 +111,17 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
explicit LazyLoadICParameters(const LoadICParameters* p)
: receiver_(p->receiver()),
- slot_(p->slot()),
vector_(p->vector()),
holder_(p->holder()) {
- TNode<Context> p_context = p->context();
- context_ = [=] { return p_context; };
- TNode<Object> p_name = p->name();
- name_ = [=] { return p_name; };
+ slot_ = [=] { return p->slot(); };
+ context_ = [=] { return p->context(); };
+ name_ = [=] { return p->name(); };
}
TNode<Context> context() const { return context_(); }
Node* receiver() const { return receiver_; }
TNode<Object> name() const { return name_(); }
- Node* slot() const { return slot_; }
+ TNode<Smi> slot() const { return slot_(); }
Node* vector() const { return vector_; }
Node* holder() const { return holder_; }
@@ -134,16 +129,17 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
LazyNode<Context> context_;
Node* receiver_;
LazyNode<Object> name_;
- Node* slot_;
+ LazyNode<Smi> slot_;
Node* vector_;
Node* holder_;
};
- void LoadGlobalIC(Node* vector, Node* slot,
+ void LoadGlobalIC(TNode<HeapObject> maybe_feedback_vector,
+ const LazyNode<Smi>& lazy_smi_slot,
+ const LazyNode<UintPtrT>& lazy_slot,
const LazyNode<Context>& lazy_context,
const LazyNode<Name>& lazy_name, TypeofMode typeof_mode,
- ExitPoint* exit_point,
- ParameterMode slot_mode = SMI_PARAMETERS);
+ ExitPoint* exit_point);
// Specialized LoadIC for inlined bytecode handler, hand-tuned to omit frame
// construction on common paths.
@@ -157,8 +153,8 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
protected:
struct StoreICParameters : public LoadICParameters {
StoreICParameters(TNode<Context> context, Node* receiver,
- TNode<Object> name, SloppyTNode<Object> value, Node* slot,
- Node* vector)
+ TNode<Object> name, SloppyTNode<Object> value,
+ TNode<Smi> slot, Node* vector)
: LoadICParameters(context, receiver, name, slot, vector),
value_(value) {}
@@ -185,20 +181,22 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
Label* miss,
StoreTransitionMapFlags flags);
- void JumpIfDataProperty(Node* details, Label* writable, Label* readonly);
+ void JumpIfDataProperty(TNode<Uint32T> details, Label* writable,
+ Label* readonly);
void InvalidateValidityCellIfPrototype(Node* map, Node* bitfield3 = nullptr);
- void OverwriteExistingFastDataProperty(Node* object, Node* object_map,
- Node* descriptors,
- Node* descriptor_name_index,
- Node* details, TNode<Object> value,
- Label* slow,
+ void OverwriteExistingFastDataProperty(SloppyTNode<HeapObject> object,
+ TNode<Map> object_map,
+ TNode<DescriptorArray> descriptors,
+ TNode<IntPtrT> descriptor_name_index,
+ TNode<Uint32T> details,
+ TNode<Object> value, Label* slow,
bool do_transitioning_store);
- void CheckFieldType(TNode<DescriptorArray> descriptors, Node* name_index,
- TNode<Word32T> representation, Node* value,
- Label* bailout);
+ void CheckFieldType(TNode<DescriptorArray> descriptors,
+ TNode<IntPtrT> name_index, TNode<Word32T> representation,
+ Node* value, Label* bailout);
private:
// Stub generation entry points.
@@ -232,12 +230,11 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
// IC dispatcher behavior.
// Checks monomorphic case. Returns {feedback} entry of the vector.
- TNode<MaybeObject> TryMonomorphicCase(Node* slot, Node* vector,
- Node* receiver_map, Label* if_handler,
- TVariable<MaybeObject>* var_handler,
- Label* if_miss);
- void HandlePolymorphicCase(Node* receiver_map, TNode<WeakFixedArray> feedback,
- Label* if_handler,
+ TNode<MaybeObject> TryMonomorphicCase(
+ TNode<Smi> slot, TNode<FeedbackVector> vector, TNode<Map> receiver_map,
+ Label* if_handler, TVariable<MaybeObject>* var_handler, Label* if_miss);
+ void HandlePolymorphicCase(TNode<Map> receiver_map,
+ TNode<WeakFixedArray> feedback, Label* if_handler,
TVariable<MaybeObject>* var_handler,
Label* if_miss);
@@ -249,15 +246,14 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
ElementSupport support_elements = kOnlyProperties,
LoadAccessMode access_mode = LoadAccessMode::kLoad);
- void HandleLoadICSmiHandlerCase(const LazyLoadICParameters* p, Node* holder,
- SloppyTNode<Smi> smi_handler,
- SloppyTNode<Object> handler, Label* miss,
- ExitPoint* exit_point,
- OnNonExistent on_nonexistent,
- ElementSupport support_elements,
- LoadAccessMode access_mode);
+ void HandleLoadICSmiHandlerCase(
+ const LazyLoadICParameters* p, SloppyTNode<HeapObject> holder,
+ SloppyTNode<Smi> smi_handler, SloppyTNode<Object> handler, Label* miss,
+ ExitPoint* exit_point, ICMode ic_mode, OnNonExistent on_nonexistent,
+ ElementSupport support_elements, LoadAccessMode access_mode);
- void HandleLoadICProtoHandler(const LazyLoadICParameters* p, Node* handler,
+ void HandleLoadICProtoHandler(const LazyLoadICParameters* p,
+ TNode<DataHandler> handler,
Variable* var_holder, Variable* var_smi_handler,
Label* if_smi_handler, Label* miss,
ExitPoint* exit_point, ICMode ic_mode,
@@ -273,40 +269,43 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
TNode<WordT> handler_word, TNode<DataHandler> handler,
TNode<IntPtrT> handler_kind, ExitPoint* exit_point);
- void HandleLoadField(Node* holder, Node* handler_word,
+ void HandleLoadField(SloppyTNode<JSObject> holder, TNode<WordT> handler_word,
Variable* var_double_value, Label* rebox_double,
- ExitPoint* exit_point);
+ Label* miss, ExitPoint* exit_point);
void EmitAccessCheck(TNode<Context> expected_native_context,
- TNode<Context> context, Node* receiver,
+ TNode<Context> context, TNode<Object> receiver,
Label* can_access, Label* miss);
void HandleLoadICSmiHandlerLoadNamedCase(
- const LazyLoadICParameters* p, Node* holder, TNode<IntPtrT> handler_kind,
- TNode<WordT> handler_word, Label* rebox_double,
- Variable* var_double_value, SloppyTNode<Object> handler, Label* miss,
- ExitPoint* exit_point, OnNonExistent on_nonexistent,
+ const LazyLoadICParameters* p, TNode<HeapObject> holder,
+ TNode<IntPtrT> handler_kind, TNode<WordT> handler_word,
+ Label* rebox_double, Variable* var_double_value,
+ SloppyTNode<Object> handler, Label* miss, ExitPoint* exit_point,
+ ICMode ic_mode, OnNonExistent on_nonexistent,
ElementSupport support_elements);
void HandleLoadICSmiHandlerHasNamedCase(const LazyLoadICParameters* p,
- Node* holder,
+ TNode<HeapObject> holder,
TNode<IntPtrT> handler_kind,
- Label* miss, ExitPoint* exit_point);
+ Label* miss, ExitPoint* exit_point,
+ ICMode ic_mode);
// LoadGlobalIC implementation.
- void LoadGlobalIC_TryPropertyCellCase(
- TNode<FeedbackVector> vector, Node* slot,
- const LazyNode<Context>& lazy_context, ExitPoint* exit_point,
- Label* try_handler, Label* miss,
- ParameterMode slot_mode = SMI_PARAMETERS);
+ void LoadGlobalIC_TryPropertyCellCase(TNode<FeedbackVector> vector,
+ TNode<UintPtrT> slot,
+ const LazyNode<Context>& lazy_context,
+ ExitPoint* exit_point,
+ Label* try_handler, Label* miss);
- void LoadGlobalIC_TryHandlerCase(TNode<FeedbackVector> vector, Node* slot,
+ void LoadGlobalIC_TryHandlerCase(TNode<FeedbackVector> vector,
+ TNode<UintPtrT> slot,
+ const LazyNode<Smi>& lazy_smi_slot,
const LazyNode<Context>& lazy_context,
const LazyNode<Name>& lazy_name,
TypeofMode typeof_mode,
- ExitPoint* exit_point, Label* miss,
- ParameterMode slot_mode);
+ ExitPoint* exit_point, Label* miss);
// StoreIC implementation.
@@ -314,59 +313,66 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
TNode<StoreHandler> handler, Label* miss,
ICMode ic_mode,
ElementSupport support_elements);
- void HandleStoreICSmiHandlerCase(Node* handler_word, Node* holder,
- Node* value, Label* miss);
- void HandleStoreFieldAndReturn(Node* handler_word, Node* holder,
- Representation representation, Node* value,
- Label* miss);
+ void HandleStoreICSmiHandlerCase(SloppyTNode<Word32T> handler_word,
+ SloppyTNode<JSObject> holder,
+ SloppyTNode<Object> value, Label* miss);
+ void HandleStoreFieldAndReturn(TNode<Word32T> handler_word,
+ TNode<JSObject> holder, TNode<Object> value,
+ base::Optional<TNode<Float64T>> double_value,
+ Representation representation, Label* miss);
void CheckPrototypeValidityCell(TNode<Object> maybe_validity_cell,
Label* miss);
- void HandleStoreICNativeDataProperty(const StoreICParameters* p, Node* holder,
- Node* handler_word);
+ void HandleStoreICNativeDataProperty(const StoreICParameters* p,
+ SloppyTNode<HeapObject> holder,
+ TNode<Word32T> handler_word);
void HandleStoreToProxy(const StoreICParameters* p, Node* proxy, Label* miss,
ElementSupport support_elements);
- void HandleStoreAccessor(const StoreICParameters* p, Node* holder,
- Node* handler_word);
+ void HandleStoreAccessor(const StoreICParameters* p,
+ SloppyTNode<HeapObject> holder,
+ TNode<Word32T> handler_word);
// KeyedLoadIC_Generic implementation.
- void GenericElementLoad(Node* receiver, Node* receiver_map,
+ void GenericElementLoad(Node* receiver, TNode<Map> receiver_map,
SloppyTNode<Int32T> instance_type, Node* index,
Label* slow);
enum UseStubCache { kUseStubCache, kDontUseStubCache };
- void GenericPropertyLoad(Node* receiver, Node* receiver_map,
+ void GenericPropertyLoad(Node* receiver, TNode<Map> receiver_map,
SloppyTNode<Int32T> instance_type,
const LoadICParameters* p, Label* slow,
UseStubCache use_stub_cache = kUseStubCache);
// Low-level helpers.
- using OnCodeHandler = std::function<void(Node* code_handler)>;
- using OnFoundOnReceiver =
- std::function<void(Node* properties, Node* name_index)>;
+ using OnCodeHandler = std::function<void(TNode<Code> code_handler)>;
+ using OnFoundOnReceiver = std::function<void(TNode<NameDictionary> properties,
+ TNode<IntPtrT> name_index)>;
template <typename ICHandler, typename ICParameters>
- Node* HandleProtoHandler(const ICParameters* p, Node* handler,
- const OnCodeHandler& on_code_handler,
- const OnFoundOnReceiver& on_found_on_receiver,
- Label* miss, ICMode ic_mode);
-
- Node* PrepareValueForStore(Node* handler_word, Node* holder,
- Representation representation, Node* value,
- Label* bailout);
+ TNode<Object> HandleProtoHandler(
+ const ICParameters* p, TNode<DataHandler> handler,
+ const OnCodeHandler& on_code_handler,
+ const OnFoundOnReceiver& on_found_on_receiver, Label* miss,
+ ICMode ic_mode);
+
+ void CheckHeapObjectTypeMatchesDescriptor(TNode<Word32T> handler_word,
+ TNode<JSObject> holder,
+ TNode<Object> value,
+ Label* bailout);
+ // Double fields store double values in a mutable box, where stores are
+ // writes into this box rather than HeapNumber assignment.
+ void CheckDescriptorConsidersNumbersMutable(TNode<Word32T> handler_word,
+ TNode<JSObject> holder,
+ Label* bailout);
// Extends properties backing store by JSObject::kFieldsAdded elements,
// returns updated properties backing store.
Node* ExtendPropertiesBackingStore(Node* object, Node* index);
- void StoreNamedField(Node* handler_word, Node* object, bool is_inobject,
- Representation representation, Node* value,
- Label* bailout);
-
void EmitFastElementsBoundsCheck(Node* object, Node* elements,
Node* intptr_index,
Node* is_jsarray_condition, Label* miss);
@@ -379,7 +385,7 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
LoadAccessMode access_mode = LoadAccessMode::kLoad);
void NameDictionaryNegativeLookup(Node* object, SloppyTNode<Name> name,
Label* miss);
- TNode<BoolT> IsPropertyDetailsConst(Node* details);
+ TNode<BoolT> IsPropertyDetailsConst(TNode<Uint32T> details);
// Stub cache access helpers.
@@ -395,6 +401,10 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
TNode<Map> map, Label* if_handler,
TVariable<MaybeObject>* var_handler,
Label* if_miss);
+
+ void BranchIfPrototypesHaveNoElements(TNode<Map> receiver_map,
+ Label* definitely_no_elements,
+ Label* possibly_elements);
};
// Abstraction over direct and indirect exit points. Direct exits correspond to
diff --git a/deps/v8/src/ic/binary-op-assembler.cc b/deps/v8/src/ic/binary-op-assembler.cc
index f6bec6eab9..ee488100e9 100644
--- a/deps/v8/src/ic/binary-op-assembler.cc
+++ b/deps/v8/src/ic/binary-op-assembler.cc
@@ -9,21 +9,19 @@
namespace v8 {
namespace internal {
-using compiler::Node;
-
-Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
- Node* rhs, Node* slot_id,
- Node* feedback_vector,
- bool rhs_is_smi) {
+TNode<Object> BinaryOpAssembler::Generate_AddWithFeedback(
+ TNode<Context> context, TNode<Object> lhs, TNode<Object> rhs,
+ TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector,
+ bool rhs_known_smi) {
// Shared entry for floating point addition.
Label do_fadd(this), if_lhsisnotnumber(this, Label::kDeferred),
check_rhsisoddball(this, Label::kDeferred),
call_with_oddball_feedback(this), call_with_any_feedback(this),
call_add_stub(this), end(this), bigint(this, Label::kDeferred);
- VARIABLE(var_fadd_lhs, MachineRepresentation::kFloat64);
- VARIABLE(var_fadd_rhs, MachineRepresentation::kFloat64);
- VARIABLE(var_type_feedback, MachineRepresentation::kTaggedSigned);
- VARIABLE(var_result, MachineRepresentation::kTagged);
+ TVARIABLE(Float64T, var_fadd_lhs);
+ TVARIABLE(Float64T, var_fadd_rhs);
+ TVARIABLE(Smi, var_type_feedback);
+ TVARIABLE(Object, var_result);
// Check if the {lhs} is a Smi or a HeapObject.
Label if_lhsissmi(this);
@@ -32,13 +30,14 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
// both Smi and Number operations, so this path should not be marked as
// Deferred.
Label if_lhsisnotsmi(this,
- rhs_is_smi ? Label::kDeferred : Label::kNonDeferred);
+ rhs_known_smi ? Label::kDeferred : Label::kNonDeferred);
Branch(TaggedIsNotSmi(lhs), &if_lhsisnotsmi, &if_lhsissmi);
BIND(&if_lhsissmi);
{
Comment("lhs is Smi");
- if (!rhs_is_smi) {
+ TNode<Smi> lhs_smi = CAST(lhs);
+ if (!rhs_known_smi) {
// Check if the {rhs} is also a Smi.
Label if_rhsissmi(this), if_rhsisnotsmi(this);
Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
@@ -46,10 +45,11 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
BIND(&if_rhsisnotsmi);
{
// Check if the {rhs} is a HeapNumber.
- GotoIfNot(IsHeapNumber(rhs), &check_rhsisoddball);
+ TNode<HeapObject> rhs_heap_object = CAST(rhs);
+ GotoIfNot(IsHeapNumber(rhs_heap_object), &check_rhsisoddball);
- var_fadd_lhs.Bind(SmiToFloat64(lhs));
- var_fadd_rhs.Bind(LoadHeapNumberValue(rhs));
+ var_fadd_lhs = SmiToFloat64(lhs_smi);
+ var_fadd_rhs = LoadHeapNumberValue(rhs_heap_object);
Goto(&do_fadd);
}
@@ -62,21 +62,21 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
// is for AddSmi operation. For the normal Add operation, we want to fast
// path both Smi and Number operations, so this path should not be marked
// as Deferred.
+ TNode<Smi> rhs_smi = CAST(rhs);
Label if_overflow(this,
- rhs_is_smi ? Label::kDeferred : Label::kNonDeferred);
- TNode<Smi> smi_result = TrySmiAdd(CAST(lhs), CAST(rhs), &if_overflow);
+ rhs_known_smi ? Label::kDeferred : Label::kNonDeferred);
+ TNode<Smi> smi_result = TrySmiAdd(lhs_smi, rhs_smi, &if_overflow);
// Not overflowed.
{
- var_type_feedback.Bind(
- SmiConstant(BinaryOperationFeedback::kSignedSmall));
- var_result.Bind(smi_result);
+ var_type_feedback = SmiConstant(BinaryOperationFeedback::kSignedSmall);
+ var_result = smi_result;
Goto(&end);
}
BIND(&if_overflow);
{
- var_fadd_lhs.Bind(SmiToFloat64(lhs));
- var_fadd_rhs.Bind(SmiToFloat64(rhs));
+ var_fadd_lhs = SmiToFloat64(lhs_smi);
+ var_fadd_rhs = SmiToFloat64(rhs_smi);
Goto(&do_fadd);
}
}
@@ -85,9 +85,10 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
BIND(&if_lhsisnotsmi);
{
// Check if {lhs} is a HeapNumber.
- GotoIfNot(IsHeapNumber(lhs), &if_lhsisnotnumber);
+ TNode<HeapObject> lhs_heap_object = CAST(lhs);
+ GotoIfNot(IsHeapNumber(lhs_heap_object), &if_lhsisnotnumber);
- if (!rhs_is_smi) {
+ if (!rhs_known_smi) {
// Check if the {rhs} is Smi.
Label if_rhsissmi(this), if_rhsisnotsmi(this);
Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
@@ -95,29 +96,30 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
BIND(&if_rhsisnotsmi);
{
// Check if the {rhs} is a HeapNumber.
- GotoIfNot(IsHeapNumber(rhs), &check_rhsisoddball);
+ TNode<HeapObject> rhs_heap_object = CAST(rhs);
+ GotoIfNot(IsHeapNumber(rhs_heap_object), &check_rhsisoddball);
- var_fadd_lhs.Bind(LoadHeapNumberValue(lhs));
- var_fadd_rhs.Bind(LoadHeapNumberValue(rhs));
+ var_fadd_lhs = LoadHeapNumberValue(lhs_heap_object);
+ var_fadd_rhs = LoadHeapNumberValue(rhs_heap_object);
Goto(&do_fadd);
}
BIND(&if_rhsissmi);
}
{
- var_fadd_lhs.Bind(LoadHeapNumberValue(lhs));
- var_fadd_rhs.Bind(SmiToFloat64(rhs));
+ var_fadd_lhs = LoadHeapNumberValue(lhs_heap_object);
+ var_fadd_rhs = SmiToFloat64(CAST(rhs));
Goto(&do_fadd);
}
}
BIND(&do_fadd);
{
- var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kNumber));
+ var_type_feedback = SmiConstant(BinaryOperationFeedback::kNumber);
TNode<Float64T> value =
Float64Add(var_fadd_lhs.value(), var_fadd_rhs.value());
TNode<HeapNumber> result = AllocateHeapNumberWithValue(value);
- var_result.Bind(result);
+ var_result = result;
Goto(&end);
}
@@ -125,7 +127,7 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
{
// No checks on rhs are done yet. We just know lhs is not a number or Smi.
Label if_lhsisoddball(this), if_lhsisnotoddball(this);
- TNode<Uint16T> lhs_instance_type = LoadInstanceType(lhs);
+ TNode<Uint16T> lhs_instance_type = LoadInstanceType(CAST(lhs));
TNode<BoolT> lhs_is_oddball =
InstanceTypeEqual(lhs_instance_type, ODDBALL_TYPE);
Branch(lhs_is_oddball, &if_lhsisoddball, &if_lhsisnotoddball);
@@ -135,39 +137,40 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
GotoIf(TaggedIsSmi(rhs), &call_with_oddball_feedback);
// Check if {rhs} is a HeapNumber.
- Branch(IsHeapNumber(rhs), &call_with_oddball_feedback,
+ Branch(IsHeapNumber(CAST(rhs)), &call_with_oddball_feedback,
&check_rhsisoddball);
}
BIND(&if_lhsisnotoddball);
{
+ // Check if the {rhs} is a smi, and exit the string and bigint check early
+ // if it is.
+ GotoIf(TaggedIsSmi(rhs), &call_with_any_feedback);
+ TNode<HeapObject> rhs_heap_object = CAST(rhs);
+
Label lhs_is_string(this), lhs_is_bigint(this);
GotoIf(IsStringInstanceType(lhs_instance_type), &lhs_is_string);
GotoIf(IsBigIntInstanceType(lhs_instance_type), &lhs_is_bigint);
Goto(&call_with_any_feedback);
BIND(&lhs_is_bigint);
- {
- GotoIf(TaggedIsSmi(rhs), &call_with_any_feedback);
- Branch(IsBigInt(rhs), &bigint, &call_with_any_feedback);
- }
+ Branch(IsBigInt(rhs_heap_object), &bigint, &call_with_any_feedback);
BIND(&lhs_is_string);
- // Check if the {rhs} is a smi, and exit the string check early if it is.
- GotoIf(TaggedIsSmi(rhs), &call_with_any_feedback);
-
- TNode<Uint16T> rhs_instance_type = LoadInstanceType(rhs);
+ {
+ TNode<Uint16T> rhs_instance_type = LoadInstanceType(rhs_heap_object);
- // Exit unless {rhs} is a string. Since {lhs} is a string we no longer
- // need an Oddball check.
- GotoIfNot(IsStringInstanceType(rhs_instance_type),
- &call_with_any_feedback);
+ // Exit unless {rhs} is a string. Since {lhs} is a string we no longer
+ // need an Oddball check.
+ GotoIfNot(IsStringInstanceType(rhs_instance_type),
+ &call_with_any_feedback);
- var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kString));
- var_result.Bind(
- CallBuiltin(Builtins::kStringAdd_CheckNone, context, lhs, rhs));
+ var_type_feedback = SmiConstant(BinaryOperationFeedback::kString);
+ var_result =
+ CallBuiltin(Builtins::kStringAdd_CheckNone, context, lhs, rhs);
- Goto(&end);
+ Goto(&end);
+ }
}
}
@@ -175,7 +178,7 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
{
// Check if rhs is an oddball. At this point we know lhs is either a
// Smi or number or oddball and rhs is not a number or Smi.
- TNode<Uint16T> rhs_instance_type = LoadInstanceType(rhs);
+ TNode<Uint16T> rhs_instance_type = LoadInstanceType(CAST(rhs));
TNode<BoolT> rhs_is_oddball =
InstanceTypeEqual(rhs_instance_type, ODDBALL_TYPE);
GotoIf(rhs_is_oddball, &call_with_oddball_feedback);
@@ -186,59 +189,58 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
{
// Both {lhs} and {rhs} are of BigInt type.
Label bigint_too_big(this);
- var_result.Bind(
- CallBuiltin(Builtins::kBigIntAddNoThrow, context, lhs, rhs));
+ var_result = CallBuiltin(Builtins::kBigIntAddNoThrow, context, lhs, rhs);
// Check for sentinel that signals BigIntTooBig exception.
GotoIf(TaggedIsSmi(var_result.value()), &bigint_too_big);
- var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kBigInt));
+ var_type_feedback = SmiConstant(BinaryOperationFeedback::kBigInt);
Goto(&end);
BIND(&bigint_too_big);
{
// Update feedback to prevent deopt loop.
UpdateFeedback(SmiConstant(BinaryOperationFeedback::kAny),
- feedback_vector, slot_id);
+ maybe_feedback_vector, slot_id);
ThrowRangeError(context, MessageTemplate::kBigIntTooBig);
}
}
BIND(&call_with_oddball_feedback);
{
- var_type_feedback.Bind(
- SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
+ var_type_feedback = SmiConstant(BinaryOperationFeedback::kNumberOrOddball);
Goto(&call_add_stub);
}
BIND(&call_with_any_feedback);
{
- var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kAny));
+ var_type_feedback = SmiConstant(BinaryOperationFeedback::kAny);
Goto(&call_add_stub);
}
BIND(&call_add_stub);
{
- var_result.Bind(CallBuiltin(Builtins::kAdd, context, lhs, rhs));
+ var_result = CallBuiltin(Builtins::kAdd, context, lhs, rhs);
Goto(&end);
}
BIND(&end);
- UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_id);
+ UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_id);
return var_result.value();
}
-Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
- Node* context, Node* lhs, Node* rhs, Node* slot_id, Node* feedback_vector,
+TNode<Object> BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
+ TNode<Context> context, TNode<Object> lhs, TNode<Object> rhs,
+ TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector,
const SmiOperation& smiOperation, const FloatOperation& floatOperation,
- Operation op, bool rhs_is_smi) {
+ Operation op, bool rhs_known_smi) {
Label do_float_operation(this), end(this), call_stub(this),
check_rhsisoddball(this, Label::kDeferred), call_with_any_feedback(this),
if_lhsisnotnumber(this, Label::kDeferred),
if_bigint(this, Label::kDeferred);
- VARIABLE(var_float_lhs, MachineRepresentation::kFloat64);
- VARIABLE(var_float_rhs, MachineRepresentation::kFloat64);
- VARIABLE(var_type_feedback, MachineRepresentation::kTaggedSigned);
- VARIABLE(var_result, MachineRepresentation::kTagged);
+ TVARIABLE(Float64T, var_float_lhs);
+ TVARIABLE(Float64T, var_float_rhs);
+ TVARIABLE(Smi, var_type_feedback);
+ TVARIABLE(Object, var_result);
Label if_lhsissmi(this);
// If rhs is known to be an Smi (in the SubSmi, MulSmi, DivSmi, ModSmi
@@ -246,25 +248,28 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
// operation, we want to fast path both Smi and Number operations, so this
// path should not be marked as Deferred.
Label if_lhsisnotsmi(this,
- rhs_is_smi ? Label::kDeferred : Label::kNonDeferred);
+ rhs_known_smi ? Label::kDeferred : Label::kNonDeferred);
Branch(TaggedIsNotSmi(lhs), &if_lhsisnotsmi, &if_lhsissmi);
// Check if the {lhs} is a Smi or a HeapObject.
BIND(&if_lhsissmi);
{
Comment("lhs is Smi");
- if (!rhs_is_smi) {
+ TNode<Smi> lhs_smi = CAST(lhs);
+ if (!rhs_known_smi) {
// Check if the {rhs} is also a Smi.
Label if_rhsissmi(this), if_rhsisnotsmi(this);
Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+
BIND(&if_rhsisnotsmi);
{
// Check if {rhs} is a HeapNumber.
- GotoIfNot(IsHeapNumber(rhs), &check_rhsisoddball);
+ TNode<HeapObject> rhs_heap_object = CAST(rhs);
+ GotoIfNot(IsHeapNumber(rhs_heap_object), &check_rhsisoddball);
// Perform a floating point operation.
- var_float_lhs.Bind(SmiToFloat64(lhs));
- var_float_rhs.Bind(LoadHeapNumberValue(rhs));
+ var_float_lhs = SmiToFloat64(lhs_smi);
+ var_float_rhs = LoadHeapNumberValue(rhs_heap_object);
Goto(&do_float_operation);
}
@@ -273,7 +278,7 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
{
Comment("perform smi operation");
- var_result.Bind(smiOperation(lhs, rhs, &var_type_feedback));
+ var_result = smiOperation(lhs_smi, CAST(rhs), &var_type_feedback);
Goto(&end);
}
}
@@ -282,9 +287,10 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
{
Comment("lhs is not Smi");
// Check if the {lhs} is a HeapNumber.
- GotoIfNot(IsHeapNumber(lhs), &if_lhsisnotnumber);
+ TNode<HeapObject> lhs_heap_object = CAST(lhs);
+ GotoIfNot(IsHeapNumber(lhs_heap_object), &if_lhsisnotnumber);
- if (!rhs_is_smi) {
+ if (!rhs_known_smi) {
// Check if the {rhs} is a Smi.
Label if_rhsissmi(this), if_rhsisnotsmi(this);
Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
@@ -292,11 +298,12 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
BIND(&if_rhsisnotsmi);
{
// Check if the {rhs} is a HeapNumber.
- GotoIfNot(IsHeapNumber(rhs), &check_rhsisoddball);
+ TNode<HeapObject> rhs_heap_object = CAST(rhs);
+ GotoIfNot(IsHeapNumber(rhs_heap_object), &check_rhsisoddball);
// Perform a floating point operation.
- var_float_lhs.Bind(LoadHeapNumberValue(lhs));
- var_float_rhs.Bind(LoadHeapNumberValue(rhs));
+ var_float_lhs = LoadHeapNumberValue(lhs_heap_object);
+ var_float_rhs = LoadHeapNumberValue(rhs_heap_object);
Goto(&do_float_operation);
}
@@ -305,19 +312,19 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
{
// Perform floating point operation.
- var_float_lhs.Bind(LoadHeapNumberValue(lhs));
- var_float_rhs.Bind(SmiToFloat64(rhs));
+ var_float_lhs = LoadHeapNumberValue(lhs_heap_object);
+ var_float_rhs = SmiToFloat64(CAST(rhs));
Goto(&do_float_operation);
}
}
BIND(&do_float_operation);
{
- var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kNumber));
- Node* lhs_value = var_float_lhs.value();
- Node* rhs_value = var_float_rhs.value();
- Node* value = floatOperation(lhs_value, rhs_value);
- var_result.Bind(AllocateHeapNumberWithValue(value));
+ var_type_feedback = SmiConstant(BinaryOperationFeedback::kNumber);
+ TNode<Float64T> lhs_value = var_float_lhs.value();
+ TNode<Float64T> rhs_value = var_float_rhs.value();
+ TNode<Float64T> value = floatOperation(lhs_value, rhs_value);
+ var_result = AllocateHeapNumberWithValue(value);
Goto(&end);
}
@@ -325,7 +332,7 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
{
// No checks on rhs are done yet. We just know lhs is not a number or Smi.
Label if_left_bigint(this), if_left_oddball(this);
- TNode<Uint16T> lhs_instance_type = LoadInstanceType(lhs);
+ TNode<Uint16T> lhs_instance_type = LoadInstanceType(CAST(lhs));
GotoIf(IsBigIntInstanceType(lhs_instance_type), &if_left_bigint);
TNode<BoolT> lhs_is_oddball =
InstanceTypeEqual(lhs_instance_type, ODDBALL_TYPE);
@@ -338,18 +345,18 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
BIND(&if_rhsissmi);
{
- var_type_feedback.Bind(
- SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
+ var_type_feedback =
+ SmiConstant(BinaryOperationFeedback::kNumberOrOddball);
Goto(&call_stub);
}
BIND(&if_rhsisnotsmi);
{
// Check if {rhs} is a HeapNumber.
- GotoIfNot(IsHeapNumber(rhs), &check_rhsisoddball);
+ GotoIfNot(IsHeapNumber(CAST(rhs)), &check_rhsisoddball);
- var_type_feedback.Bind(
- SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
+ var_type_feedback =
+ SmiConstant(BinaryOperationFeedback::kNumberOrOddball);
Goto(&call_stub);
}
}
@@ -357,7 +364,7 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
BIND(&if_left_bigint);
{
GotoIf(TaggedIsSmi(rhs), &call_with_any_feedback);
- Branch(IsBigInt(rhs), &if_bigint, &call_with_any_feedback);
+ Branch(IsBigInt(CAST(rhs)), &if_bigint, &call_with_any_feedback);
}
}
@@ -365,39 +372,38 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
{
// Check if rhs is an oddball. At this point we know lhs is either a
// Smi or number or oddball and rhs is not a number or Smi.
- TNode<Uint16T> rhs_instance_type = LoadInstanceType(rhs);
+ TNode<Uint16T> rhs_instance_type = LoadInstanceType(CAST(rhs));
GotoIf(IsBigIntInstanceType(rhs_instance_type), &if_bigint);
TNode<BoolT> rhs_is_oddball =
InstanceTypeEqual(rhs_instance_type, ODDBALL_TYPE);
GotoIfNot(rhs_is_oddball, &call_with_any_feedback);
- var_type_feedback.Bind(
- SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
+ var_type_feedback = SmiConstant(BinaryOperationFeedback::kNumberOrOddball);
Goto(&call_stub);
}
// This handles the case where at least one input is a BigInt.
BIND(&if_bigint);
{
- var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kBigInt));
+ var_type_feedback = SmiConstant(BinaryOperationFeedback::kBigInt);
if (op == Operation::kAdd) {
- var_result.Bind(CallBuiltin(Builtins::kBigIntAdd, context, lhs, rhs));
+ var_result = CallBuiltin(Builtins::kBigIntAdd, context, lhs, rhs);
} else {
- var_result.Bind(CallRuntime(Runtime::kBigIntBinaryOp, context, lhs, rhs,
- SmiConstant(op)));
+ var_result = CallRuntime(Runtime::kBigIntBinaryOp, context, lhs, rhs,
+ SmiConstant(op));
}
Goto(&end);
}
BIND(&call_with_any_feedback);
{
- var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kAny));
+ var_type_feedback = SmiConstant(BinaryOperationFeedback::kAny);
Goto(&call_stub);
}
BIND(&call_stub);
{
- Node* result;
+ TNode<Object> result;
switch (op) {
case Operation::kSubtract:
result = CallBuiltin(Builtins::kSubtract, context, lhs, rhs);
@@ -414,34 +420,35 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
default:
UNREACHABLE();
}
- var_result.Bind(result);
+ var_result = result;
Goto(&end);
}
BIND(&end);
- UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_id);
+ UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_id);
return var_result.value();
}
-Node* BinaryOpAssembler::Generate_SubtractWithFeedback(Node* context, Node* lhs,
- Node* rhs, Node* slot_id,
- Node* feedback_vector,
- bool rhs_is_smi) {
- auto smiFunction = [=](Node* lhs, Node* rhs, Variable* var_type_feedback) {
+TNode<Object> BinaryOpAssembler::Generate_SubtractWithFeedback(
+ TNode<Context> context, TNode<Object> lhs, TNode<Object> rhs,
+ TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector,
+ bool rhs_known_smi) {
+ auto smiFunction = [=](TNode<Smi> lhs, TNode<Smi> rhs,
+ TVariable<Smi>* var_type_feedback) {
Label end(this);
TVARIABLE(Number, var_result);
// If rhs is known to be an Smi (for SubSmi) we want to fast path Smi
// operation. For the normal Sub operation, we want to fast path both
// Smi and Number operations, so this path should not be marked as Deferred.
Label if_overflow(this,
- rhs_is_smi ? Label::kDeferred : Label::kNonDeferred);
- var_result = TrySmiSub(CAST(lhs), CAST(rhs), &if_overflow);
- var_type_feedback->Bind(SmiConstant(BinaryOperationFeedback::kSignedSmall));
+ rhs_known_smi ? Label::kDeferred : Label::kNonDeferred);
+ var_result = TrySmiSub(lhs, rhs, &if_overflow);
+ *var_type_feedback = SmiConstant(BinaryOperationFeedback::kSignedSmall);
Goto(&end);
BIND(&if_overflow);
{
- var_type_feedback->Bind(SmiConstant(BinaryOperationFeedback::kNumber));
+ *var_type_feedback = SmiConstant(BinaryOperationFeedback::kNumber);
TNode<Float64T> value = Float64Sub(SmiToFloat64(lhs), SmiToFloat64(rhs));
var_result = AllocateHeapNumberWithValue(value);
Goto(&end);
@@ -450,91 +457,97 @@ Node* BinaryOpAssembler::Generate_SubtractWithFeedback(Node* context, Node* lhs,
BIND(&end);
return var_result.value();
};
- auto floatFunction = [=](Node* lhs, Node* rhs) {
+ auto floatFunction = [=](TNode<Float64T> lhs, TNode<Float64T> rhs) {
return Float64Sub(lhs, rhs);
};
return Generate_BinaryOperationWithFeedback(
- context, lhs, rhs, slot_id, feedback_vector, smiFunction, floatFunction,
- Operation::kSubtract, rhs_is_smi);
+ context, lhs, rhs, slot_id, maybe_feedback_vector, smiFunction,
+ floatFunction, Operation::kSubtract, rhs_known_smi);
}
-Node* BinaryOpAssembler::Generate_MultiplyWithFeedback(Node* context, Node* lhs,
- Node* rhs, Node* slot_id,
- Node* feedback_vector,
- bool rhs_is_smi) {
- auto smiFunction = [=](Node* lhs, Node* rhs, Variable* var_type_feedback) {
- TNode<Number> result = SmiMul(CAST(lhs), CAST(rhs));
- var_type_feedback->Bind(SelectSmiConstant(
+TNode<Object> BinaryOpAssembler::Generate_MultiplyWithFeedback(
+ TNode<Context> context, TNode<Object> lhs, TNode<Object> rhs,
+ TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector,
+ bool rhs_known_smi) {
+ auto smiFunction = [=](TNode<Smi> lhs, TNode<Smi> rhs,
+ TVariable<Smi>* var_type_feedback) {
+ TNode<Number> result = SmiMul(lhs, rhs);
+ *var_type_feedback = SelectSmiConstant(
TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall,
- BinaryOperationFeedback::kNumber));
+ BinaryOperationFeedback::kNumber);
return result;
};
- auto floatFunction = [=](Node* lhs, Node* rhs) {
+ auto floatFunction = [=](TNode<Float64T> lhs, TNode<Float64T> rhs) {
return Float64Mul(lhs, rhs);
};
return Generate_BinaryOperationWithFeedback(
- context, lhs, rhs, slot_id, feedback_vector, smiFunction, floatFunction,
- Operation::kMultiply, rhs_is_smi);
+ context, lhs, rhs, slot_id, maybe_feedback_vector, smiFunction,
+ floatFunction, Operation::kMultiply, rhs_known_smi);
}
-Node* BinaryOpAssembler::Generate_DivideWithFeedback(
- Node* context, Node* dividend, Node* divisor, Node* slot_id,
- Node* feedback_vector, bool rhs_is_smi) {
- auto smiFunction = [=](Node* lhs, Node* rhs, Variable* var_type_feedback) {
- VARIABLE(var_result, MachineRepresentation::kTagged);
+TNode<Object> BinaryOpAssembler::Generate_DivideWithFeedback(
+ TNode<Context> context, TNode<Object> dividend, TNode<Object> divisor,
+ TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector,
+ bool rhs_known_smi) {
+ auto smiFunction = [=](TNode<Smi> lhs, TNode<Smi> rhs,
+ TVariable<Smi>* var_type_feedback) {
+ TVARIABLE(Object, var_result);
// If rhs is known to be an Smi (for DivSmi) we want to fast path Smi
// operation. For the normal Div operation, we want to fast path both
// Smi and Number operations, so this path should not be marked as Deferred.
- Label bailout(this, rhs_is_smi ? Label::kDeferred : Label::kNonDeferred),
+ Label bailout(this, rhs_known_smi ? Label::kDeferred : Label::kNonDeferred),
end(this);
- var_result.Bind(TrySmiDiv(CAST(lhs), CAST(rhs), &bailout));
- var_type_feedback->Bind(SmiConstant(BinaryOperationFeedback::kSignedSmall));
+ var_result = TrySmiDiv(lhs, rhs, &bailout);
+ *var_type_feedback = SmiConstant(BinaryOperationFeedback::kSignedSmall);
Goto(&end);
BIND(&bailout);
{
- var_type_feedback->Bind(
- SmiConstant(BinaryOperationFeedback::kSignedSmallInputs));
+ *var_type_feedback =
+ SmiConstant(BinaryOperationFeedback::kSignedSmallInputs);
TNode<Float64T> value = Float64Div(SmiToFloat64(lhs), SmiToFloat64(rhs));
- var_result.Bind(AllocateHeapNumberWithValue(value));
+ var_result = AllocateHeapNumberWithValue(value);
Goto(&end);
}
BIND(&end);
return var_result.value();
};
- auto floatFunction = [=](Node* lhs, Node* rhs) {
+ auto floatFunction = [=](TNode<Float64T> lhs, TNode<Float64T> rhs) {
return Float64Div(lhs, rhs);
};
return Generate_BinaryOperationWithFeedback(
- context, dividend, divisor, slot_id, feedback_vector, smiFunction,
- floatFunction, Operation::kDivide, rhs_is_smi);
+ context, dividend, divisor, slot_id, maybe_feedback_vector, smiFunction,
+ floatFunction, Operation::kDivide, rhs_known_smi);
}
-Node* BinaryOpAssembler::Generate_ModulusWithFeedback(
- Node* context, Node* dividend, Node* divisor, Node* slot_id,
- Node* feedback_vector, bool rhs_is_smi) {
- auto smiFunction = [=](Node* lhs, Node* rhs, Variable* var_type_feedback) {
- TNode<Number> result = SmiMod(CAST(lhs), CAST(rhs));
- var_type_feedback->Bind(SelectSmiConstant(
+TNode<Object> BinaryOpAssembler::Generate_ModulusWithFeedback(
+ TNode<Context> context, TNode<Object> dividend, TNode<Object> divisor,
+ TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector,
+ bool rhs_known_smi) {
+ auto smiFunction = [=](TNode<Smi> lhs, TNode<Smi> rhs,
+ TVariable<Smi>* var_type_feedback) {
+ TNode<Number> result = SmiMod(lhs, rhs);
+ *var_type_feedback = SelectSmiConstant(
TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall,
- BinaryOperationFeedback::kNumber));
+ BinaryOperationFeedback::kNumber);
return result;
};
- auto floatFunction = [=](Node* lhs, Node* rhs) {
+ auto floatFunction = [=](TNode<Float64T> lhs, TNode<Float64T> rhs) {
return Float64Mod(lhs, rhs);
};
return Generate_BinaryOperationWithFeedback(
- context, dividend, divisor, slot_id, feedback_vector, smiFunction,
- floatFunction, Operation::kModulus, rhs_is_smi);
+ context, dividend, divisor, slot_id, maybe_feedback_vector, smiFunction,
+ floatFunction, Operation::kModulus, rhs_known_smi);
}
-Node* BinaryOpAssembler::Generate_ExponentiateWithFeedback(
- Node* context, Node* base, Node* exponent, Node* slot_id,
- Node* feedback_vector, bool rhs_is_smi) {
+TNode<Object> BinaryOpAssembler::Generate_ExponentiateWithFeedback(
+ TNode<Context> context, TNode<Object> base, TNode<Object> exponent,
+ TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector,
+ bool rhs_known_smi) {
// We currently don't optimize exponentiation based on feedback.
TNode<Smi> dummy_feedback = SmiConstant(BinaryOperationFeedback::kAny);
- UpdateFeedback(dummy_feedback, feedback_vector, slot_id);
+ UpdateFeedback(dummy_feedback, maybe_feedback_vector, slot_id);
return CallBuiltin(Builtins::kExponentiate, context, base, exponent);
}
diff --git a/deps/v8/src/ic/binary-op-assembler.h b/deps/v8/src/ic/binary-op-assembler.h
index 26324660c8..37484909d4 100644
--- a/deps/v8/src/ic/binary-op-assembler.h
+++ b/deps/v8/src/ic/binary-op-assembler.h
@@ -17,44 +17,50 @@ class CodeAssemblerState;
class BinaryOpAssembler : public CodeStubAssembler {
public:
- using Node = compiler::Node;
-
explicit BinaryOpAssembler(compiler::CodeAssemblerState* state)
: CodeStubAssembler(state) {}
- Node* Generate_AddWithFeedback(Node* context, Node* lhs, Node* rhs,
- Node* slot_id, Node* feedback_vector,
- bool rhs_is_smi);
+ TNode<Object> Generate_AddWithFeedback(
+ TNode<Context> context, TNode<Object> left, TNode<Object> right,
+ TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
+ bool rhs_known_smi);
- Node* Generate_SubtractWithFeedback(Node* context, Node* lhs, Node* rhs,
- Node* slot_id, Node* feedback_vector,
- bool rhs_is_smi);
+ TNode<Object> Generate_SubtractWithFeedback(
+ TNode<Context> context, TNode<Object> left, TNode<Object> right,
+ TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
+ bool rhs_known_smi);
- Node* Generate_MultiplyWithFeedback(Node* context, Node* lhs, Node* rhs,
- Node* slot_id, Node* feedback_vector,
- bool rhs_is_smi);
+ TNode<Object> Generate_MultiplyWithFeedback(
+ TNode<Context> context, TNode<Object> left, TNode<Object> right,
+ TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
+ bool rhs_known_smi);
- Node* Generate_DivideWithFeedback(Node* context, Node* dividend,
- Node* divisor, Node* slot_id,
- Node* feedback_vector, bool rhs_is_smi);
+ TNode<Object> Generate_DivideWithFeedback(
+ TNode<Context> context, TNode<Object> dividend, TNode<Object> divisor,
+ TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
+ bool rhs_known_smi);
- Node* Generate_ModulusWithFeedback(Node* context, Node* dividend,
- Node* divisor, Node* slot_id,
- Node* feedback_vector, bool rhs_is_smi);
+ TNode<Object> Generate_ModulusWithFeedback(
+ TNode<Context> context, TNode<Object> dividend, TNode<Object> divisor,
+ TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
+ bool rhs_known_smi);
- Node* Generate_ExponentiateWithFeedback(Node* context, Node* dividend,
- Node* divisor, Node* slot_id,
- Node* feedback_vector,
- bool rhs_is_smi);
+ TNode<Object> Generate_ExponentiateWithFeedback(
+ TNode<Context> context, TNode<Object> base, TNode<Object> exponent,
+ TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
+ bool rhs_known_smi);
private:
- using SmiOperation = std::function<Node*(Node*, Node*, Variable*)>;
- using FloatOperation = std::function<Node*(Node*, Node*)>;
-
- Node* Generate_BinaryOperationWithFeedback(
- Node* context, Node* lhs, Node* rhs, Node* slot_id, Node* feedback_vector,
+ using SmiOperation =
+ std::function<TNode<Object>(TNode<Smi>, TNode<Smi>, TVariable<Smi>*)>;
+ using FloatOperation =
+ std::function<TNode<Float64T>(TNode<Float64T>, TNode<Float64T>)>;
+
+ TNode<Object> Generate_BinaryOperationWithFeedback(
+ TNode<Context> context, TNode<Object> left, TNode<Object> right,
+ TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
const SmiOperation& smiOperation, const FloatOperation& floatOperation,
- Operation op, bool rhs_is_smi);
+ Operation op, bool rhs_known_smi);
};
} // namespace internal
diff --git a/deps/v8/src/ic/handler-configuration-inl.h b/deps/v8/src/ic/handler-configuration-inl.h
index c0ff8a4c9b..95ef353277 100644
--- a/deps/v8/src/ic/handler-configuration-inl.h
+++ b/deps/v8/src/ic/handler-configuration-inl.h
@@ -43,6 +43,11 @@ Handle<Smi> LoadHandler::LoadInterceptor(Isolate* isolate) {
return handle(Smi::FromInt(config), isolate);
}
+Handle<Smi> LoadHandler::LoadSlow(Isolate* isolate) {
+ int config = KindBits::encode(kSlow);
+ return handle(Smi::FromInt(config), isolate);
+}
+
Handle<Smi> LoadHandler::LoadField(Isolate* isolate, FieldIndex field_index) {
int config = KindBits::encode(kField) |
IsInobjectBits::encode(field_index.is_inobject()) |
@@ -127,6 +132,16 @@ Handle<Smi> StoreHandler::StoreNormal(Isolate* isolate) {
return handle(Smi::FromInt(config), isolate);
}
+Handle<Smi> StoreHandler::StoreInterceptor(Isolate* isolate) {
+ int config = KindBits::encode(kInterceptor);
+ return handle(Smi::FromInt(config), isolate);
+}
+
+Handle<Smi> StoreHandler::StoreSlow(Isolate* isolate) {
+ int config = KindBits::encode(kSlow);
+ return handle(Smi::FromInt(config), isolate);
+}
+
Handle<Smi> StoreHandler::StoreProxy(Isolate* isolate) {
int config = KindBits::encode(kProxy);
return handle(Smi::FromInt(config), isolate);
@@ -135,29 +150,12 @@ Handle<Smi> StoreHandler::StoreProxy(Isolate* isolate) {
Handle<Smi> StoreHandler::StoreField(Isolate* isolate, Kind kind,
int descriptor, FieldIndex field_index,
Representation representation) {
- FieldRepresentation field_rep;
- switch (representation.kind()) {
- case Representation::kSmi:
- field_rep = kSmi;
- break;
- case Representation::kDouble:
- field_rep = kDouble;
- break;
- case Representation::kHeapObject:
- field_rep = kHeapObject;
- break;
- case Representation::kTagged:
- field_rep = kTagged;
- break;
- default:
- UNREACHABLE();
- }
-
+ DCHECK(!representation.IsNone());
DCHECK(kind == kField || kind == kConstField);
int config = KindBits::encode(kind) |
IsInobjectBits::encode(field_index.is_inobject()) |
- FieldRepresentationBits::encode(field_rep) |
+ RepresentationBits::encode(representation.kind()) |
DescriptorBits::encode(descriptor) |
FieldIndexBits::encode(field_index.index());
return handle(Smi::FromInt(config), isolate);
diff --git a/deps/v8/src/ic/handler-configuration.cc b/deps/v8/src/ic/handler-configuration.cc
index 814935c6eb..3af5fe4953 100644
--- a/deps/v8/src/ic/handler-configuration.cc
+++ b/deps/v8/src/ic/handler-configuration.cc
@@ -196,7 +196,7 @@ MaybeObjectHandle StoreHandler::StoreTransition(Isolate* isolate,
bool is_dictionary_map = transition_map->is_dictionary_map();
#ifdef DEBUG
if (!is_dictionary_map) {
- int descriptor = transition_map->LastAdded();
+ InternalIndex descriptor = transition_map->LastAdded();
Handle<DescriptorArray> descriptors(transition_map->instance_descriptors(),
isolate);
PropertyDetails details = descriptors->GetDetails(descriptor);
diff --git a/deps/v8/src/ic/handler-configuration.h b/deps/v8/src/ic/handler-configuration.h
index 80d19d73ec..fd0cee2920 100644
--- a/deps/v8/src/ic/handler-configuration.h
+++ b/deps/v8/src/ic/handler-configuration.h
@@ -43,6 +43,7 @@ class LoadHandler final : public DataHandler {
kApiGetter,
kApiGetterHolderIsPrototype,
kInterceptor,
+ kSlow,
kProxy,
kNonExistent,
kModuleExport
@@ -113,6 +114,9 @@ class LoadHandler final : public DataHandler {
// interceptor.
static inline Handle<Smi> LoadInterceptor(Isolate* isolate);
+ // Creates a Smi-handler for loading a property from a object.
+ static inline Handle<Smi> LoadSlow(Isolate* isolate);
+
// Creates a Smi-handler for loading a field from fast object.
static inline Handle<Smi> LoadField(Isolate* isolate, FieldIndex field_index);
@@ -197,13 +201,13 @@ class StoreHandler final : public DataHandler {
kApiSetterHolderIsPrototype,
kGlobalProxy,
kNormal,
+ kInterceptor,
+ kSlow,
kProxy,
kKindsNumber // Keep last
};
using KindBits = BitField<Kind, 0, 4>;
- enum FieldRepresentation { kSmi, kDouble, kHeapObject, kTagged };
-
// Applicable to kGlobalProxy, kProxy kinds.
// Defines whether access rights check should be done on receiver object.
@@ -231,10 +235,10 @@ class StoreHandler final : public DataHandler {
// Encoding when KindBits contains kField or kTransitionToField.
//
using IsInobjectBits = DescriptorBits::Next<bool, 1>;
- using FieldRepresentationBits = IsInobjectBits::Next<FieldRepresentation, 2>;
+ using RepresentationBits = IsInobjectBits::Next<Representation::Kind, 3>;
// +1 here is to cover all possible JSObject header sizes.
using FieldIndexBits =
- FieldRepresentationBits::Next<unsigned, kDescriptorIndexBitCount + 1>;
+ RepresentationBits::Next<unsigned, kDescriptorIndexBitCount + 1>;
// Make sure we don't overflow the smi.
STATIC_ASSERT(FieldIndexBits::kLastUsedBit < kSmiValueSize);
@@ -283,6 +287,12 @@ class StoreHandler final : public DataHandler {
// Creates a Smi-handler for storing a property to a slow object.
static inline Handle<Smi> StoreNormal(Isolate* isolate);
+ // Creates a Smi-handler for storing a property to an interceptor.
+ static inline Handle<Smi> StoreInterceptor(Isolate* isolate);
+
+ // Creates a Smi-handler for storing a property.
+ static inline Handle<Smi> StoreSlow(Isolate* isolate);
+
// Creates a Smi-handler for storing a property on a proxy.
static inline Handle<Smi> StoreProxy(Isolate* isolate);
diff --git a/deps/v8/src/ic/ic-stats.cc b/deps/v8/src/ic/ic-stats.cc
index f387239aee..54d4856631 100644
--- a/deps/v8/src/ic/ic-stats.cc
+++ b/deps/v8/src/ic/ic-stats.cc
@@ -94,6 +94,7 @@ ICInfo::ICInfo()
script_offset(0),
script_name(nullptr),
line_num(-1),
+ column_num(-1),
is_constructor(false),
is_optimized(false),
map(nullptr),
@@ -106,6 +107,7 @@ void ICInfo::Reset() {
script_offset = 0;
script_name = nullptr;
line_num = -1;
+ column_num = -1;
is_constructor = false;
is_optimized = false;
state.clear();
@@ -127,6 +129,7 @@ void ICInfo::AppendToTracedValue(v8::tracing::TracedValue* value) const {
if (script_offset) value->SetInteger("offset", script_offset);
if (script_name) value->SetString("scriptName", script_name);
if (line_num != -1) value->SetInteger("lineNum", line_num);
+ if (column_num != -1) value->SetInteger("columnNum", column_num);
if (is_constructor) value->SetInteger("constructor", is_constructor);
if (!state.empty()) value->SetString("state", state);
if (map) {
diff --git a/deps/v8/src/ic/ic-stats.h b/deps/v8/src/ic/ic-stats.h
index 76c65c3862..44b968c6c0 100644
--- a/deps/v8/src/ic/ic-stats.h
+++ b/deps/v8/src/ic/ic-stats.h
@@ -34,6 +34,7 @@ struct ICInfo {
int script_offset;
const char* script_name;
int line_num;
+ int column_num;
bool is_constructor;
bool is_optimized;
std::string state;
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index 54f4be7a22..4ac5fd7abe 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -15,6 +15,7 @@
#include "src/execution/execution.h"
#include "src/execution/frames-inl.h"
#include "src/execution/isolate-inl.h"
+#include "src/execution/protectors-inl.h"
#include "src/execution/runtime-profiler.h"
#include "src/handles/handles-inl.h"
#include "src/ic/call-optimization.h"
@@ -47,8 +48,6 @@ char IC::TransitionMarkFromState(IC::State state) {
return 'X';
case UNINITIALIZED:
return '0';
- case PREMONOMORPHIC:
- return '.';
case MONOMORPHIC:
return '1';
case RECOMPUTE_HANDLER:
@@ -343,11 +342,6 @@ bool IC::ConfigureVectorState(IC::State new_state, Handle<Object> key) {
return changed;
}
-void IC::ConfigureVectorState(Handle<Map> map) {
- nexus()->ConfigurePremonomorphic(map);
- OnFeedbackChanged("Premonomorphic");
-}
-
void IC::ConfigureVectorState(Handle<Name> name, Handle<Map> map,
Handle<Object> handler) {
ConfigureVectorState(name, map, MaybeObjectHandle(handler));
@@ -383,11 +377,11 @@ MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
// of its properties; throw a TypeError in that case.
if (IsAnyHas() ? !object->IsJSReceiver()
: object->IsNullOrUndefined(isolate())) {
- if (use_ic && state() != PREMONOMORPHIC) {
+ if (use_ic) {
// Ensure the IC state progresses.
TRACE_HANDLER_STATS(isolate(), LoadIC_NonReceiver);
update_receiver_map(object);
- PatchCache(name, slow_stub());
+ SetCache(name, LoadHandler::LoadSlow(isolate()));
TraceIC("LoadIC", name);
}
@@ -490,7 +484,7 @@ MaybeHandle<Object> LoadGlobalIC::Load(Handle<Name> name) {
} else {
// Given combination of indices can't be encoded, so use slow stub.
TRACE_HANDLER_STATS(isolate(), LoadGlobalIC_SlowStub);
- PatchCache(name, slow_stub());
+ SetCache(name, LoadHandler::LoadSlow(isolate()));
}
TraceIC("LoadGlobalIC", name);
}
@@ -613,11 +607,11 @@ bool IC::IsTransitionOfMonomorphicTarget(Map source_map, Map target_map) {
return transitioned_map == target_map;
}
-void IC::PatchCache(Handle<Name> name, Handle<Object> handler) {
- PatchCache(name, MaybeObjectHandle(handler));
+void IC::SetCache(Handle<Name> name, Handle<Object> handler) {
+ SetCache(name, MaybeObjectHandle(handler));
}
-void IC::PatchCache(Handle<Name> name, const MaybeObjectHandle& handler) {
+void IC::SetCache(Handle<Name> name, const MaybeObjectHandle& handler) {
DCHECK(IsHandler(*handler));
// Currently only load and store ICs support non-code handlers.
DCHECK(IsAnyLoad() || IsAnyStore() || IsAnyHas());
@@ -625,7 +619,6 @@ void IC::PatchCache(Handle<Name> name, const MaybeObjectHandle& handler) {
case NO_FEEDBACK:
UNREACHABLE();
case UNINITIALIZED:
- case PREMONOMORPHIC:
UpdateMonomorphicIC(handler, name);
break;
case RECOMPUTE_HANDLER:
@@ -659,7 +652,7 @@ __attribute__((__aligned__(32)))
void LoadIC::UpdateCaches(LookupIterator* lookup) {
Handle<Object> code;
if (lookup->state() == LookupIterator::ACCESS_CHECK) {
- code = slow_stub();
+ code = LoadHandler::LoadSlow(isolate());
} else if (!lookup->IsFound()) {
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadNonexistentDH);
Handle<Smi> smi_handler = LoadHandler::LoadNonExistent(isolate());
@@ -683,7 +676,7 @@ void LoadIC::UpdateCaches(LookupIterator* lookup) {
code = ComputeHandler(lookup);
}
- PatchCache(lookup->name(), code);
+ SetCache(lookup->name(), code);
TraceIC("LoadIC", lookup->name());
}
@@ -798,7 +791,7 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
isolate());
if (!getter->IsJSFunction() && !getter->IsFunctionTemplateInfo()) {
TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
- return slow_stub();
+ return LoadHandler::LoadSlow(isolate());
}
if ((getter->IsFunctionTemplateInfo() &&
@@ -807,7 +800,7 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
JSFunction::cast(*getter).shared().BreakAtEntry())) {
// Do not install an IC if the api function has a breakpoint.
TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
- return slow_stub();
+ return LoadHandler::LoadSlow(isolate());
}
Handle<Smi> smi_handler;
@@ -817,7 +810,7 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
if (!call_optimization.IsCompatibleReceiverMap(map, holder) ||
!holder->HasFastProperties()) {
TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
- return slow_stub();
+ return LoadHandler::LoadSlow(isolate());
}
CallOptimization::HolderLookup holder_lookup;
@@ -868,7 +861,7 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
!holder->HasFastProperties() ||
(info->is_sloppy() && !receiver->IsJSReceiver())) {
TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
- return slow_stub();
+ return LoadHandler::LoadSlow(isolate());
}
Handle<Smi> smi_handler = LoadHandler::LoadNativeDataProperty(
@@ -1076,7 +1069,7 @@ bool AllowConvertHoleElementToUndefined(Isolate* isolate,
}
// For other {receiver}s we need to check the "no elements" protector.
- if (isolate->IsNoElementsProtectorIntact()) {
+ if (Protectors::IsNoElementsIntact(isolate)) {
if (receiver_map->IsStringMap()) {
return true;
}
@@ -1315,12 +1308,11 @@ bool StoreIC::LookupForWrite(LookupIterator* it, Handle<Object> value,
case LookupIterator::INTERCEPTOR: {
Handle<JSObject> holder = it->GetHolder<JSObject>();
InterceptorInfo info = holder->GetNamedInterceptor();
- if (it->HolderIsReceiverOrHiddenPrototype()) {
- return !info.non_masking() && receiver.is_identical_to(holder) &&
- !info.setter().IsUndefined(isolate());
- } else if (!info.getter().IsUndefined(isolate()) ||
- !info.query().IsUndefined(isolate())) {
- return false;
+ if ((it->HolderIsReceiverOrHiddenPrototype() &&
+ !info.non_masking()) ||
+ !info.getter().IsUndefined(isolate()) ||
+ !info.query().IsUndefined(isolate())) {
+ return true;
}
break;
}
@@ -1403,7 +1395,7 @@ MaybeHandle<Object> StoreGlobalIC::Store(Handle<Name> name,
} else {
// Given combination of indices can't be encoded, so use slow stub.
TRACE_HANDLER_STATS(isolate(), StoreGlobalIC_SlowStub);
- PatchCache(name, slow_stub());
+ SetCache(name, StoreHandler::StoreSlow(isolate()));
}
TraceIC("StoreGlobalIC", name);
}
@@ -1432,11 +1424,11 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
// If the object is undefined or null it's illegal to try to set any
// properties on it; throw a TypeError in that case.
if (object->IsNullOrUndefined(isolate())) {
- if (use_ic && state() != PREMONOMORPHIC) {
+ if (use_ic) {
// Ensure the IC state progresses.
TRACE_HANDLER_STATS(isolate(), StoreIC_NonReceiver);
update_receiver_map(object);
- PatchCache(name, slow_stub());
+ SetCache(name, StoreHandler::StoreSlow(isolate()));
TraceIC("StoreIC", name);
}
return TypeError(MessageTemplate::kNonObjectPropertyStore, object, name);
@@ -1481,30 +1473,11 @@ void StoreIC::UpdateCaches(LookupIterator* lookup, Handle<Object> value,
}
handler = ComputeHandler(lookup);
} else {
- if (state() == UNINITIALIZED && IsStoreGlobalIC() &&
- lookup->state() == LookupIterator::INTERCEPTOR) {
- InterceptorInfo info =
- lookup->GetHolder<JSObject>()->GetNamedInterceptor();
- if (!lookup->HolderIsReceiverOrHiddenPrototype() &&
- !info.getter().IsUndefined(isolate())) {
- // Utilize premonomorphic state for global store ics that run into
- // an interceptor because the property doesn't exist yet.
- // After we actually set the property, we'll have more information.
- // Premonomorphism gives us a chance to find more information the
- // second time.
- TRACE_HANDLER_STATS(isolate(), StoreGlobalIC_Premonomorphic);
- ConfigureVectorState(receiver_map());
- TraceIC("StoreGlobalIC", lookup->name());
- return;
- }
- }
-
set_slow_stub_reason("LookupForWrite said 'false'");
- // TODO(marja): change slow_stub to return MaybeObjectHandle.
- handler = MaybeObjectHandle(slow_stub());
+ handler = MaybeObjectHandle(StoreHandler::StoreSlow(isolate()));
}
- PatchCache(lookup->name(), handler);
+ SetCache(lookup->name(), handler);
TraceIC("StoreIC", lookup->name());
}
@@ -1542,12 +1515,27 @@ MaybeObjectHandle StoreIC::ComputeHandler(LookupIterator* lookup) {
case LookupIterator::INTERCEPTOR: {
Handle<JSObject> holder = lookup->GetHolder<JSObject>();
- USE(holder);
+ InterceptorInfo info = holder->GetNamedInterceptor();
+
+ // If the interceptor is on the receiver
+ if (lookup->HolderIsReceiverOrHiddenPrototype() && !info.non_masking()) {
+ // return a store interceptor smi handler if there is one,
+ if (!info.setter().IsUndefined(isolate())) {
+ return MaybeObjectHandle(StoreHandler::StoreInterceptor(isolate()));
+ }
+ // otherwise return a slow-case smi handler.
+ return MaybeObjectHandle(StoreHandler::StoreSlow(isolate()));
+ }
- DCHECK(!holder->GetNamedInterceptor().setter().IsUndefined(isolate()));
- // TODO(jgruber): Update counter name.
- TRACE_HANDLER_STATS(isolate(), StoreIC_StoreInterceptorStub);
- return MaybeObjectHandle(BUILTIN_CODE(isolate(), StoreInterceptorIC));
+ // If the interceptor is a getter/query interceptor on the prototype
+ // chain, return an invalidatable slow handler so it can turn fast if the
+ // interceptor is masked by a regular property later.
+ DCHECK(!info.getter().IsUndefined(isolate()) ||
+ !info.query().IsUndefined(isolate()));
+ Handle<Object> handler = StoreHandler::StoreThroughPrototype(
+ isolate(), receiver_map(), holder,
+ StoreHandler::StoreSlow(isolate()));
+ return MaybeObjectHandle(handler);
}
case LookupIterator::ACCESSOR: {
@@ -1559,7 +1547,9 @@ MaybeObjectHandle StoreIC::ComputeHandler(LookupIterator* lookup) {
if (!holder->HasFastProperties()) {
set_slow_stub_reason("accessor on slow map");
TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
- return MaybeObjectHandle(slow_stub());
+ MaybeObjectHandle handler =
+ MaybeObjectHandle(StoreHandler::StoreSlow(isolate()));
+ return handler;
}
Handle<Object> accessors = lookup->GetAccessors();
if (accessors->IsAccessorInfo()) {
@@ -1567,18 +1557,18 @@ MaybeObjectHandle StoreIC::ComputeHandler(LookupIterator* lookup) {
if (v8::ToCData<Address>(info->setter()) == kNullAddress) {
set_slow_stub_reason("setter == kNullAddress");
TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
- return MaybeObjectHandle(slow_stub());
+ return MaybeObjectHandle(StoreHandler::StoreSlow(isolate()));
}
if (AccessorInfo::cast(*accessors).is_special_data_property() &&
!lookup->HolderIsReceiverOrHiddenPrototype()) {
set_slow_stub_reason("special data property in prototype chain");
TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
- return MaybeObjectHandle(slow_stub());
+ return MaybeObjectHandle(StoreHandler::StoreSlow(isolate()));
}
if (!AccessorInfo::IsCompatibleReceiverMap(info, receiver_map())) {
set_slow_stub_reason("incompatible receiver type");
TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
- return MaybeObjectHandle(slow_stub());
+ return MaybeObjectHandle(StoreHandler::StoreSlow(isolate()));
}
Handle<Smi> smi_handler = StoreHandler::StoreNativeDataProperty(
@@ -1598,7 +1588,7 @@ MaybeObjectHandle StoreIC::ComputeHandler(LookupIterator* lookup) {
if (!setter->IsJSFunction() && !setter->IsFunctionTemplateInfo()) {
set_slow_stub_reason("setter not a function");
TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
- return MaybeObjectHandle(slow_stub());
+ return MaybeObjectHandle(StoreHandler::StoreSlow(isolate()));
}
if ((setter->IsFunctionTemplateInfo() &&
@@ -1607,7 +1597,7 @@ MaybeObjectHandle StoreIC::ComputeHandler(LookupIterator* lookup) {
JSFunction::cast(*setter).shared().BreakAtEntry())) {
// Do not install an IC if the api function has a breakpoint.
TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
- return MaybeObjectHandle(slow_stub());
+ return MaybeObjectHandle(StoreHandler::StoreSlow(isolate()));
}
CallOptimization call_optimization(isolate(), setter);
@@ -1631,11 +1621,11 @@ MaybeObjectHandle StoreIC::ComputeHandler(LookupIterator* lookup) {
}
set_slow_stub_reason("incompatible receiver");
TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
- return MaybeObjectHandle(slow_stub());
+ return MaybeObjectHandle(StoreHandler::StoreSlow(isolate()));
} else if (setter->IsFunctionTemplateInfo()) {
set_slow_stub_reason("setter non-simple template");
TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
- return MaybeObjectHandle(slow_stub());
+ return MaybeObjectHandle(StoreHandler::StoreSlow(isolate()));
}
Handle<Smi> smi_handler =
@@ -1651,7 +1641,7 @@ MaybeObjectHandle StoreIC::ComputeHandler(LookupIterator* lookup) {
isolate(), receiver_map(), holder, smi_handler));
}
TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
- return MaybeObjectHandle(slow_stub());
+ return MaybeObjectHandle(StoreHandler::StoreSlow(isolate()));
}
case LookupIterator::DATA: {
@@ -1694,7 +1684,7 @@ MaybeObjectHandle StoreIC::ComputeHandler(LookupIterator* lookup) {
DCHECK_EQ(kDescriptor, lookup->property_details().location());
set_slow_stub_reason("constant property");
TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
- return MaybeObjectHandle(slow_stub());
+ return MaybeObjectHandle(StoreHandler::StoreSlow(isolate()));
}
case LookupIterator::JSPROXY: {
Handle<JSReceiver> receiver =
@@ -1905,7 +1895,7 @@ void KeyedStoreIC::StoreElementPolymorphicHandlers(
// TODO(mvstanton): Consider embedding store_mode in the state of the slow
// keyed store ic for uniformity.
TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_SlowStub);
- handler = slow_stub();
+ handler = StoreHandler::StoreSlow(isolate());
} else {
{
@@ -2532,7 +2522,7 @@ static bool CanFastCloneObject(Handle<Map> map) {
}
DescriptorArray descriptors = map->instance_descriptors();
- for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
+ for (InternalIndex i : map->IterateOwnDescriptors()) {
PropertyDetails details = descriptors.GetDetails(i);
Name key = descriptors.GetKey(i);
if (details.kind() != kData || !details.IsEnumerable() ||
diff --git a/deps/v8/src/ic/ic.h b/deps/v8/src/ic/ic.h
index 29f3b4a60a..a3c68f4fbf 100644
--- a/deps/v8/src/ic/ic.h
+++ b/deps/v8/src/ic/ic.h
@@ -74,8 +74,6 @@ class IC {
// Configure for most states.
bool ConfigureVectorState(IC::State new_state, Handle<Object> key);
- // Configure the vector for PREMONOMORPHIC.
- void ConfigureVectorState(Handle<Map> map);
// Configure the vector for MONOMORPHIC.
void ConfigureVectorState(Handle<Name> name, Handle<Map> map,
Handle<Object> handler);
@@ -103,8 +101,8 @@ class IC {
void CopyICToMegamorphicCache(Handle<Name> name);
bool IsTransitionOfMonomorphicTarget(Map source_map, Map target_map);
- void PatchCache(Handle<Name> name, Handle<Object> handler);
- void PatchCache(Handle<Name> name, const MaybeObjectHandle& handler);
+ void SetCache(Handle<Name> name, Handle<Object> handler);
+ void SetCache(Handle<Name> name, const MaybeObjectHandle& handler);
FeedbackSlotKind kind() const { return kind_; }
bool IsGlobalIC() const { return IsLoadGlobalIC() || IsStoreGlobalIC(); }
bool IsLoadIC() const { return IsLoadICKind(kind_); }
@@ -188,11 +186,6 @@ class LoadIC : public IC {
Handle<Name> name);
protected:
- virtual Handle<Code> slow_stub() const {
- return IsAnyHas() ? BUILTIN_CODE(isolate(), HasIC_Slow)
- : BUILTIN_CODE(isolate(), LoadIC_Slow);
- }
-
// Update the inline cache and the global stub cache based on the
// lookup result.
void UpdateCaches(LookupIterator* lookup);
@@ -211,11 +204,6 @@ class LoadGlobalIC : public LoadIC {
: LoadIC(isolate, vector, slot, kind) {}
V8_WARN_UNUSED_RESULT MaybeHandle<Object> Load(Handle<Name> name);
-
- protected:
- Handle<Code> slow_stub() const override {
- return BUILTIN_CODE(isolate(), LoadGlobalIC_Slow);
- }
};
class KeyedLoadIC : public LoadIC {
@@ -268,11 +256,6 @@ class StoreIC : public IC {
protected:
// Stub accessors.
- virtual Handle<Code> slow_stub() const {
- // All StoreICs share the same slow stub.
- return BUILTIN_CODE(isolate(), KeyedStoreIC_Slow);
- }
-
// Update the inline cache and the global stub cache based on the
// lookup result.
void UpdateCaches(LookupIterator* lookup, Handle<Object> value,
@@ -292,11 +275,6 @@ class StoreGlobalIC : public StoreIC {
V8_WARN_UNUSED_RESULT MaybeHandle<Object> Store(Handle<Name> name,
Handle<Object> value);
-
- protected:
- Handle<Code> slow_stub() const override {
- return BUILTIN_CODE(isolate(), StoreGlobalIC_Slow);
- }
};
enum KeyedStoreCheckMap { kDontCheckMap, kCheckMap };
@@ -328,10 +306,6 @@ class KeyedStoreIC : public StoreIC {
KeyedAccessStoreMode store_mode,
Handle<Map> new_receiver_map);
- Handle<Code> slow_stub() const override {
- return BUILTIN_CODE(isolate(), KeyedStoreIC_Slow);
- }
-
private:
Handle<Map> ComputeTransitionedMap(Handle<Map> map,
TransitionMode transition_mode);
@@ -356,11 +330,6 @@ class StoreInArrayLiteralIC : public KeyedStoreIC {
}
void Store(Handle<JSArray> array, Handle<Object> index, Handle<Object> value);
-
- private:
- Handle<Code> slow_stub() const override {
- return BUILTIN_CODE(isolate(), StoreInArrayLiteralIC_Slow);
- }
};
} // namespace internal
diff --git a/deps/v8/src/ic/keyed-store-generic.cc b/deps/v8/src/ic/keyed-store-generic.cc
index bb4e6cb427..ff830a022e 100644
--- a/deps/v8/src/ic/keyed-store-generic.cc
+++ b/deps/v8/src/ic/keyed-store-generic.cc
@@ -16,10 +16,6 @@
namespace v8 {
namespace internal {
-using Node = compiler::Node;
-template <class T>
-using TNode = compiler::TNode<T>;
-
enum class StoreMode { kOrdinary, kInLiteral };
class KeyedStoreGenericAssembler : public AccessorAssembler {
@@ -62,9 +58,11 @@ class KeyedStoreGenericAssembler : public AccessorAssembler {
TNode<Object> key, TNode<Object> value,
Maybe<LanguageMode> language_mode);
- void EmitGenericElementStore(Node* receiver, TNode<Map> receiver_map,
- Node* instance_type, TNode<IntPtrT> index,
- Node* value, Node* context, Label* slow);
+ void EmitGenericElementStore(TNode<JSObject> receiver,
+ TNode<Map> receiver_map,
+ TNode<Uint16T> instance_type,
+ TNode<IntPtrT> index, TNode<Object> value,
+ TNode<Context> context, Label* slow);
// If language mode is not provided it is deduced from the feedback slot's
// kind.
@@ -86,38 +84,46 @@ class KeyedStoreGenericAssembler : public AccessorAssembler {
Label* non_fast_elements,
Label* only_fast_elements);
- void TryRewriteElements(Node* receiver, TNode<Map> receiver_map,
- Node* elements, Node* native_context,
+ void TryRewriteElements(TNode<JSObject> receiver, TNode<Map> receiver_map,
+ TNode<FixedArrayBase> elements,
+ TNode<NativeContext> native_context,
ElementsKind from_kind, ElementsKind to_kind,
Label* bailout);
- void StoreElementWithCapacity(Node* receiver, TNode<Map> receiver_map,
+ void StoreElementWithCapacity(TNode<JSObject> receiver,
+ TNode<Map> receiver_map,
SloppyTNode<FixedArrayBase> elements,
TNode<Word32T> elements_kind,
- TNode<IntPtrT> index, Node* value,
- Node* context, Label* slow,
+ TNode<IntPtrT> index, SloppyTNode<Object> value,
+ TNode<Context> context, Label* slow,
UpdateLength update_length);
- void MaybeUpdateLengthAndReturn(Node* receiver, Node* index, Node* value,
+ void MaybeUpdateLengthAndReturn(TNode<JSObject> receiver,
+ TNode<IntPtrT> index, TNode<Object> value,
UpdateLength update_length);
- void TryChangeToHoleyMapHelper(Node* receiver, TNode<Map> receiver_map,
- Node* native_context, ElementsKind packed_kind,
+ void TryChangeToHoleyMapHelper(TNode<JSObject> receiver,
+ TNode<Map> receiver_map,
+ TNode<NativeContext> native_context,
+ ElementsKind packed_kind,
ElementsKind holey_kind, Label* done,
Label* map_mismatch, Label* bailout);
- void TryChangeToHoleyMap(Node* receiver, TNode<Map> receiver_map,
- TNode<Word32T> current_elements_kind, Node* context,
- ElementsKind packed_kind, Label* bailout);
- void TryChangeToHoleyMapMulti(Node* receiver, TNode<Map> receiver_map,
+ void TryChangeToHoleyMap(TNode<JSObject> receiver, TNode<Map> receiver_map,
+ TNode<Word32T> current_elements_kind,
+ TNode<Context> context, ElementsKind packed_kind,
+ Label* bailout);
+ void TryChangeToHoleyMapMulti(TNode<JSObject> receiver,
+ TNode<Map> receiver_map,
TNode<Word32T> current_elements_kind,
- Node* context, ElementsKind packed_kind,
+ TNode<Context> context,
+ ElementsKind packed_kind,
ElementsKind packed_kind_2, Label* bailout);
- void LookupPropertyOnPrototypeChain(TNode<Map> receiver_map, Node* name,
- Label* accessor,
- Variable* var_accessor_pair,
- Variable* var_accessor_holder,
- Label* readonly, Label* bailout);
+ void LookupPropertyOnPrototypeChain(
+ TNode<Map> receiver_map, TNode<Name> name, Label* accessor,
+ TVariable<Object>* var_accessor_pair,
+ TVariable<HeapObject>* var_accessor_holder, Label* readonly,
+ Label* bailout);
TNode<Map> FindCandidateStoreICTransitionMapHandler(TNode<Map> map,
TNode<Name> name,
@@ -173,18 +179,18 @@ void KeyedStoreGenericGenerator::SetPropertyInLiteral(
void KeyedStoreGenericAssembler::BranchIfPrototypesHaveNonFastElements(
TNode<Map> receiver_map, Label* non_fast_elements,
Label* only_fast_elements) {
- VARIABLE(var_map, MachineRepresentation::kTagged);
- var_map.Bind(receiver_map);
+ TVARIABLE(Map, var_map);
+ var_map = receiver_map;
Label loop_body(this, &var_map);
Goto(&loop_body);
BIND(&loop_body);
{
- Node* map = var_map.value();
+ TNode<Map> map = var_map.value();
TNode<HeapObject> prototype = LoadMapPrototype(map);
GotoIf(IsNull(prototype), only_fast_elements);
TNode<Map> prototype_map = LoadMap(prototype);
- var_map.Bind(prototype_map);
+ var_map = prototype_map;
TNode<Uint16T> instance_type = LoadMapInstanceType(prototype_map);
GotoIf(IsCustomElementsReceiverInstanceType(instance_type),
non_fast_elements);
@@ -196,9 +202,9 @@ void KeyedStoreGenericAssembler::BranchIfPrototypesHaveNonFastElements(
}
void KeyedStoreGenericAssembler::TryRewriteElements(
- Node* receiver, TNode<Map> receiver_map, Node* elements,
- Node* native_context, ElementsKind from_kind, ElementsKind to_kind,
- Label* bailout) {
+ TNode<JSObject> receiver, TNode<Map> receiver_map,
+ TNode<FixedArrayBase> elements, TNode<NativeContext> native_context,
+ ElementsKind from_kind, ElementsKind to_kind, Label* bailout) {
DCHECK(IsFastPackedElementsKind(from_kind));
ElementsKind holey_from_kind = GetHoleyElementsKind(from_kind);
ElementsKind holey_to_kind = GetHoleyElementsKind(to_kind);
@@ -206,12 +212,12 @@ void KeyedStoreGenericAssembler::TryRewriteElements(
TrapAllocationMemento(receiver, bailout);
}
Label perform_transition(this), check_holey_map(this);
- VARIABLE(var_target_map, MachineRepresentation::kTagged);
+ TVARIABLE(Map, var_target_map);
// Check if the receiver has the default |from_kind| map.
{
TNode<Map> packed_map = LoadJSArrayElementsMap(from_kind, native_context);
GotoIf(TaggedNotEqual(receiver_map, packed_map), &check_holey_map);
- var_target_map.Bind(
+ var_target_map = CAST(
LoadContextElement(native_context, Context::ArrayMapIndex(to_kind)));
Goto(&perform_transition);
}
@@ -222,7 +228,7 @@ void KeyedStoreGenericAssembler::TryRewriteElements(
TNode<Object> holey_map = LoadContextElement(
native_context, Context::ArrayMapIndex(holey_from_kind));
GotoIf(TaggedNotEqual(receiver_map, holey_map), bailout);
- var_target_map.Bind(LoadContextElement(
+ var_target_map = CAST(LoadContextElement(
native_context, Context::ArrayMapIndex(holey_to_kind)));
Goto(&perform_transition);
}
@@ -240,9 +246,9 @@ void KeyedStoreGenericAssembler::TryRewriteElements(
}
void KeyedStoreGenericAssembler::TryChangeToHoleyMapHelper(
- Node* receiver, TNode<Map> receiver_map, Node* native_context,
- ElementsKind packed_kind, ElementsKind holey_kind, Label* done,
- Label* map_mismatch, Label* bailout) {
+ TNode<JSObject> receiver, TNode<Map> receiver_map,
+ TNode<NativeContext> native_context, ElementsKind packed_kind,
+ ElementsKind holey_kind, Label* done, Label* map_mismatch, Label* bailout) {
TNode<Map> packed_map = LoadJSArrayElementsMap(packed_kind, native_context);
GotoIf(TaggedNotEqual(receiver_map, packed_map), map_mismatch);
if (AllocationSite::ShouldTrack(packed_kind, holey_kind)) {
@@ -255,23 +261,23 @@ void KeyedStoreGenericAssembler::TryChangeToHoleyMapHelper(
}
void KeyedStoreGenericAssembler::TryChangeToHoleyMap(
- Node* receiver, TNode<Map> receiver_map,
- TNode<Word32T> current_elements_kind, Node* context,
+ TNode<JSObject> receiver, TNode<Map> receiver_map,
+ TNode<Word32T> current_elements_kind, TNode<Context> context,
ElementsKind packed_kind, Label* bailout) {
ElementsKind holey_kind = GetHoleyElementsKind(packed_kind);
Label already_holey(this);
GotoIf(Word32Equal(current_elements_kind, Int32Constant(holey_kind)),
&already_holey);
- TNode<Context> native_context = LoadNativeContext(context);
+ TNode<NativeContext> native_context = LoadNativeContext(context);
TryChangeToHoleyMapHelper(receiver, receiver_map, native_context, packed_kind,
holey_kind, &already_holey, bailout, bailout);
BIND(&already_holey);
}
void KeyedStoreGenericAssembler::TryChangeToHoleyMapMulti(
- Node* receiver, TNode<Map> receiver_map,
- TNode<Word32T> current_elements_kind, Node* context,
+ TNode<JSObject> receiver, TNode<Map> receiver_map,
+ TNode<Word32T> current_elements_kind, TNode<Context> context,
ElementsKind packed_kind, ElementsKind packed_kind_2, Label* bailout) {
ElementsKind holey_kind = GetHoleyElementsKind(packed_kind);
ElementsKind holey_kind_2 = GetHoleyElementsKind(packed_kind_2);
@@ -282,7 +288,7 @@ void KeyedStoreGenericAssembler::TryChangeToHoleyMapMulti(
GotoIf(Word32Equal(current_elements_kind, Int32Constant(holey_kind_2)),
&already_holey);
- TNode<Context> native_context = LoadNativeContext(context);
+ TNode<NativeContext> native_context = LoadNativeContext(context);
TryChangeToHoleyMapHelper(receiver, receiver_map, native_context, packed_kind,
holey_kind, &already_holey, &check_other_kind,
bailout);
@@ -294,7 +300,8 @@ void KeyedStoreGenericAssembler::TryChangeToHoleyMapMulti(
}
void KeyedStoreGenericAssembler::MaybeUpdateLengthAndReturn(
- Node* receiver, Node* index, Node* value, UpdateLength update_length) {
+ TNode<JSObject> receiver, TNode<IntPtrT> index, TNode<Object> value,
+ UpdateLength update_length) {
if (update_length != kDontChangeLength) {
TNode<Smi> new_length = SmiTag(Signed(IntPtrAdd(index, IntPtrConstant(1))));
StoreObjectFieldNoWriteBarrier(receiver, JSArray::kLengthOffset, new_length,
@@ -304,10 +311,10 @@ void KeyedStoreGenericAssembler::MaybeUpdateLengthAndReturn(
}
void KeyedStoreGenericAssembler::StoreElementWithCapacity(
- Node* receiver, TNode<Map> receiver_map,
+ TNode<JSObject> receiver, TNode<Map> receiver_map,
SloppyTNode<FixedArrayBase> elements, TNode<Word32T> elements_kind,
- TNode<IntPtrT> index, Node* value, Node* context, Label* slow,
- UpdateLength update_length) {
+ TNode<IntPtrT> index, SloppyTNode<Object> value, TNode<Context> context,
+ Label* slow, UpdateLength update_length) {
if (update_length != kDontChangeLength) {
CSA_ASSERT(this, InstanceTypeEqual(LoadMapInstanceType(receiver_map),
JS_ARRAY_TYPE));
@@ -331,8 +338,8 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
// FixedArray backing store -> Smi or object elements.
{
- TNode<IntPtrT> offset = ElementOffsetFromIndex(
- index, PACKED_ELEMENTS, INTPTR_PARAMETERS, kHeaderSize);
+ TNode<IntPtrT> offset =
+ ElementOffsetFromIndex(index, PACKED_ELEMENTS, kHeaderSize);
// Check if we're about to overwrite the hole. We can safely do that
// only if there can be no setters on the prototype chain.
// If we know that we're storing beyond the previous array length, we
@@ -387,8 +394,8 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
// Transition to the required ElementsKind.
{
Label transition_to_double(this), transition_to_object(this);
- TNode<Context> native_context = LoadNativeContext(context);
- Branch(TaggedEqual(LoadMap(value), HeapNumberMapConstant()),
+ TNode<NativeContext> native_context = LoadNativeContext(context);
+ Branch(TaggedEqual(LoadMap(CAST(value)), HeapNumberMapConstant()),
&transition_to_double, &transition_to_object);
BIND(&transition_to_double);
{
@@ -401,11 +408,11 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
PACKED_SMI_ELEMENTS, target_kind, slow);
// Reload migrated elements.
TNode<FixedArrayBase> double_elements = LoadElements(receiver);
- TNode<IntPtrT> double_offset = ElementOffsetFromIndex(
- index, PACKED_DOUBLE_ELEMENTS, INTPTR_PARAMETERS, kHeaderSize);
+ TNode<IntPtrT> double_offset =
+ ElementOffsetFromIndex(index, PACKED_DOUBLE_ELEMENTS, kHeaderSize);
// Make sure we do not store signalling NaNs into double arrays.
TNode<Float64T> double_value =
- Float64SilenceNaN(LoadHeapNumberValue(value));
+ Float64SilenceNaN(LoadHeapNumberValue(CAST(value)));
StoreNoWriteBarrier(MachineRepresentation::kFloat64, double_elements,
double_offset, double_value);
MaybeUpdateLengthAndReturn(receiver, index, value, update_length);
@@ -434,8 +441,8 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
&check_cow_elements);
// FixedDoubleArray backing store -> double elements.
{
- TNode<IntPtrT> offset = ElementOffsetFromIndex(
- index, PACKED_DOUBLE_ELEMENTS, INTPTR_PARAMETERS, kHeaderSize);
+ TNode<IntPtrT> offset =
+ ElementOffsetFromIndex(index, PACKED_DOUBLE_ELEMENTS, kHeaderSize);
// Check if we're about to overwrite the hole. We can safely do that
// only if there can be no setters on the prototype chain.
{
@@ -457,7 +464,8 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
// Try to store the value as a double.
{
Label non_number_value(this);
- Node* double_value = TryTaggedToFloat64(value, &non_number_value);
+ TNode<Float64T> double_value =
+ TryTaggedToFloat64(value, &non_number_value);
// Make sure we do not store signalling NaNs into double arrays.
double_value = Float64SilenceNaN(double_value);
@@ -475,7 +483,7 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
// Transition to object elements.
{
- TNode<Context> native_context = LoadNativeContext(context);
+ TNode<NativeContext> native_context = LoadNativeContext(context);
ElementsKind target_kind = update_length == kBumpLengthWithGap
? HOLEY_ELEMENTS
: PACKED_ELEMENTS;
@@ -483,8 +491,8 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
PACKED_DOUBLE_ELEMENTS, target_kind, slow);
// Reload migrated elements.
TNode<FixedArrayBase> fast_elements = LoadElements(receiver);
- TNode<IntPtrT> fast_offset = ElementOffsetFromIndex(
- index, PACKED_ELEMENTS, INTPTR_PARAMETERS, kHeaderSize);
+ TNode<IntPtrT> fast_offset =
+ ElementOffsetFromIndex(index, PACKED_ELEMENTS, kHeaderSize);
Store(fast_elements, fast_offset, value);
MaybeUpdateLengthAndReturn(receiver, index, value, update_length);
}
@@ -498,8 +506,9 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
}
void KeyedStoreGenericAssembler::EmitGenericElementStore(
- Node* receiver, TNode<Map> receiver_map, Node* instance_type,
- TNode<IntPtrT> index, Node* value, Node* context, Label* slow) {
+ TNode<JSObject> receiver, TNode<Map> receiver_map,
+ TNode<Uint16T> instance_type, TNode<IntPtrT> index, TNode<Object> value,
+ TNode<Context> context, Label* slow) {
Label if_fast(this), if_in_bounds(this), if_out_of_bounds(this),
if_increment_length_by_one(this), if_bump_length_with_gap(this),
if_grow(this), if_nonfast(this), if_typed_array(this),
@@ -517,7 +526,7 @@ void KeyedStoreGenericAssembler::EmitGenericElementStore(
}
BIND(&if_array);
{
- TNode<IntPtrT> length = SmiUntag(LoadFastJSArrayLength(receiver));
+ TNode<IntPtrT> length = SmiUntag(LoadFastJSArrayLength(CAST(receiver)));
GotoIf(UintPtrLessThan(index, length), &if_in_bounds);
TNode<IntPtrT> capacity = SmiUntag(LoadFixedArrayBaseLength(elements));
GotoIf(UintPtrGreaterThanOrEqual(index, capacity), &if_grow);
@@ -595,32 +604,32 @@ void KeyedStoreGenericAssembler::EmitGenericElementStore(
}
void KeyedStoreGenericAssembler::LookupPropertyOnPrototypeChain(
- TNode<Map> receiver_map, Node* name, Label* accessor,
- Variable* var_accessor_pair, Variable* var_accessor_holder, Label* readonly,
+ TNode<Map> receiver_map, TNode<Name> name, Label* accessor,
+ TVariable<Object>* var_accessor_pair,
+ TVariable<HeapObject>* var_accessor_holder, Label* readonly,
Label* bailout) {
Label ok_to_write(this);
- VARIABLE(var_holder, MachineRepresentation::kTagged);
- var_holder.Bind(LoadMapPrototype(receiver_map));
- VARIABLE(var_holder_map, MachineRepresentation::kTagged);
- var_holder_map.Bind(LoadMap(var_holder.value()));
+ TVARIABLE(HeapObject, var_holder);
+ TVARIABLE(Map, var_holder_map);
+ var_holder = LoadMapPrototype(receiver_map);
+ var_holder_map = LoadMap(var_holder.value());
- Variable* merged_variables[] = {&var_holder, &var_holder_map};
- Label loop(this, arraysize(merged_variables), merged_variables);
+ Label loop(this, {&var_holder, &var_holder_map});
Goto(&loop);
BIND(&loop);
{
- Node* holder = var_holder.value();
+ TNode<HeapObject> holder = var_holder.value();
GotoIf(IsNull(holder), &ok_to_write);
- Node* holder_map = var_holder_map.value();
+ TNode<Map> holder_map = var_holder_map.value();
TNode<Uint16T> instance_type = LoadMapInstanceType(holder_map);
Label next_proto(this);
{
Label found(this), found_fast(this), found_dict(this), found_global(this);
TVARIABLE(HeapObject, var_meta_storage);
TVARIABLE(IntPtrT, var_entry);
- TryLookupProperty(holder, holder_map, instance_type, name, &found_fast,
- &found_dict, &found_global, &var_meta_storage,
- &var_entry, &next_proto, bailout);
+ TryLookupProperty(CAST(holder), holder_map, instance_type, name,
+ &found_fast, &found_dict, &found_global,
+ &var_meta_storage, &var_entry, &next_proto, bailout);
BIND(&found_fast);
{
TNode<DescriptorArray> descriptors = CAST(var_meta_storage.value());
@@ -631,10 +640,10 @@ void KeyedStoreGenericAssembler::LookupPropertyOnPrototypeChain(
// Accessor case.
// TODO(jkummerow): Implement a trimmed-down
// LoadAccessorFromFastObject.
- VARIABLE(var_details, MachineRepresentation::kWord32);
+ TVARIABLE(Uint32T, var_details);
LoadPropertyFromFastObject(holder, holder_map, descriptors, name_index,
&var_details, var_accessor_pair);
- var_accessor_holder->Bind(holder);
+ *var_accessor_holder = holder;
Goto(accessor);
}
@@ -648,9 +657,9 @@ void KeyedStoreGenericAssembler::LookupPropertyOnPrototypeChain(
if (accessor != nullptr) {
// Accessor case.
- var_accessor_pair->Bind(
- LoadValueByKeyIndex<NameDictionary>(dictionary, entry));
- var_accessor_holder->Bind(holder);
+ *var_accessor_pair =
+ LoadValueByKeyIndex<NameDictionary>(dictionary, entry);
+ *var_accessor_holder = holder;
Goto(accessor);
} else {
Goto(&ok_to_write);
@@ -666,14 +675,14 @@ void KeyedStoreGenericAssembler::LookupPropertyOnPrototypeChain(
TNode<Object> value =
LoadObjectField(property_cell, PropertyCell::kValueOffset);
GotoIf(TaggedEqual(value, TheHoleConstant()), &next_proto);
- TNode<Int32T> details = LoadAndUntagToWord32ObjectField(
- property_cell, PropertyCell::kPropertyDetailsRawOffset);
+ TNode<Uint32T> details = Unsigned(LoadAndUntagToWord32ObjectField(
+ property_cell, PropertyCell::kPropertyDetailsRawOffset));
JumpIfDataProperty(details, &ok_to_write, readonly);
if (accessor != nullptr) {
// Accessor case.
- var_accessor_pair->Bind(value);
- var_accessor_holder->Bind(holder);
+ *var_accessor_pair = value;
+ *var_accessor_holder = holder;
Goto(accessor);
} else {
Goto(&ok_to_write);
@@ -686,8 +695,8 @@ void KeyedStoreGenericAssembler::LookupPropertyOnPrototypeChain(
GotoIf(InstanceTypeEqual(instance_type, JS_TYPED_ARRAY_TYPE), bailout);
TNode<HeapObject> proto = LoadMapPrototype(holder_map);
GotoIf(IsNull(proto), &ok_to_write);
- var_holder.Bind(proto);
- var_holder_map.Bind(LoadMap(proto));
+ var_holder = proto;
+ var_holder_map = LoadMap(proto);
Goto(&loop);
}
BIND(&ok_to_write);
@@ -763,8 +772,10 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
const StoreICParameters* p, ExitPoint* exit_point, Label* slow,
Maybe<LanguageMode> maybe_language_mode) {
CSA_ASSERT(this, IsSimpleObjectMap(receiver_map));
- VARIABLE(var_accessor_pair, MachineRepresentation::kTagged);
- VARIABLE(var_accessor_holder, MachineRepresentation::kTagged);
+ // TODO(rmcilroy) Type as Struct once we use a trimmed down
+ // LoadAccessorFromFastObject instead of LoadPropertyFromFastObject.
+ TVARIABLE(Object, var_accessor_pair);
+ TVARIABLE(HeapObject, var_accessor_holder);
Label fast_properties(this), dictionary_properties(this), accessor(this),
readonly(this);
TNode<Uint32T> bitfield3 = LoadMapBitField3(receiver_map);
@@ -792,11 +803,11 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
if (ShouldCallSetter()) {
// Accessor case.
// TODO(jkummerow): Implement a trimmed-down LoadAccessorFromFastObject.
- VARIABLE(var_details, MachineRepresentation::kWord32);
+ TVARIABLE(Uint32T, var_details);
LoadPropertyFromFastObject(receiver, receiver_map, descriptors,
name_index, &var_details,
&var_accessor_pair);
- var_accessor_holder.Bind(receiver);
+ var_accessor_holder = receiver;
Goto(&accessor);
} else {
// Handle accessor to data property reconfiguration in runtime.
@@ -836,7 +847,7 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
TVARIABLE(IntPtrT, var_name_index);
Label dictionary_found(this, &var_name_index), not_found(this);
- TNode<NameDictionary> properties = CAST(LoadSlowProperties(CAST(receiver)));
+ TNode<NameDictionary> properties = CAST(LoadSlowProperties(receiver));
NameDictionaryLookup<NameDictionary>(properties, name, &dictionary_found,
&var_name_index, &not_found);
BIND(&dictionary_found);
@@ -849,9 +860,9 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
if (ShouldCallSetter()) {
// Accessor case.
- var_accessor_pair.Bind(LoadValueByKeyIndex<NameDictionary>(
- properties, var_name_index.value()));
- var_accessor_holder.Bind(receiver);
+ var_accessor_pair = LoadValueByKeyIndex<NameDictionary>(
+ properties, var_name_index.value());
+ var_accessor_holder = receiver;
Goto(&accessor);
} else {
// We must reconfigure an accessor property to a data property
@@ -870,6 +881,11 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
BIND(&not_found);
{
+ // TODO(jkummerow): Also add support to correctly handle integer exotic
+ // cases for typed arrays and remove this check here.
+ GotoIf(InstanceTypeEqual(LoadMapInstanceType(receiver_map),
+ JS_TYPED_ARRAY_TYPE),
+ slow);
CheckForAssociatedProtector(name, slow);
Label extensible(this), is_private_symbol(this);
TNode<Uint32T> bitfield3 = LoadMapBitField3(receiver_map);
@@ -909,7 +925,7 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
BIND(&accessor);
{
Label not_callable(this);
- Node* accessor_pair = var_accessor_pair.value();
+ TNode<Struct> accessor_pair = CAST(var_accessor_pair.value());
GotoIf(IsAccessorInfoMap(LoadMap(accessor_pair)), slow);
CSA_ASSERT(this, HasInstanceType(accessor_pair, ACCESSOR_PAIR_TYPE));
TNode<HeapObject> setter =
@@ -951,7 +967,7 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
LanguageMode language_mode;
if (maybe_language_mode.To(&language_mode)) {
if (language_mode == LanguageMode::kStrict) {
- Node* type = Typeof(p->receiver());
+ TNode<String> type = Typeof(p->receiver());
ThrowTypeError(p->context(), MessageTemplate::kStrictReadOnlyProperty,
name, type, p->receiver());
} else {
@@ -969,15 +985,16 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
// Helper that is used by the public KeyedStoreGeneric and by SetProperty.
void KeyedStoreGenericAssembler::KeyedStoreGeneric(
- TNode<Context> context, TNode<Object> receiver, TNode<Object> key,
+ TNode<Context> context, TNode<Object> receiver_maybe_smi, TNode<Object> key,
TNode<Object> value, Maybe<LanguageMode> language_mode) {
TVARIABLE(IntPtrT, var_index);
- TVARIABLE(Object, var_unique, key);
+ TVARIABLE(Name, var_unique);
Label if_index(this), if_unique_name(this), not_internalized(this),
slow(this);
- GotoIf(TaggedIsSmi(receiver), &slow);
- TNode<Map> receiver_map = LoadMap(CAST(receiver));
+ GotoIf(TaggedIsSmi(receiver_maybe_smi), &slow);
+ TNode<HeapObject> receiver = CAST(receiver_maybe_smi);
+ TNode<Map> receiver_map = LoadMap(receiver);
TNode<Uint16T> instance_type = LoadMapInstanceType(receiver_map);
// Receivers requiring non-standard element accesses (interceptors, access
// checks, strings and string wrappers, proxies) are handled in the runtime.
@@ -989,14 +1006,14 @@ void KeyedStoreGenericAssembler::KeyedStoreGeneric(
BIND(&if_index);
{
Comment("integer index");
- EmitGenericElementStore(receiver, receiver_map, instance_type,
+ EmitGenericElementStore(CAST(receiver), receiver_map, instance_type,
var_index.value(), value, context, &slow);
}
BIND(&if_unique_name);
{
Comment("key is unique name");
- StoreICParameters p(context, receiver, var_unique.value(), value, nullptr,
+ StoreICParameters p(context, receiver, var_unique.value(), value, {},
nullptr);
ExitPoint direct_exit(this);
EmitGenericPropertyStore(CAST(receiver), receiver_map, &p, &direct_exit,
@@ -1006,7 +1023,7 @@ void KeyedStoreGenericAssembler::KeyedStoreGeneric(
BIND(&not_internalized);
{
if (FLAG_internalize_on_the_fly) {
- TryInternalizeString(key, &if_index, &var_index, &if_unique_name,
+ TryInternalizeString(CAST(key), &if_index, &var_index, &if_unique_name,
&var_unique, &slow, &slow);
} else {
Goto(&slow);
@@ -1049,30 +1066,34 @@ void KeyedStoreGenericAssembler::SetProperty(TNode<Context> context,
void KeyedStoreGenericAssembler::StoreIC_NoFeedback() {
using Descriptor = StoreDescriptor;
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Object> receiver_maybe_smi = CAST(Parameter(Descriptor::kReceiver));
TNode<Object> name = CAST(Parameter(Descriptor::kName));
- Node* value = Parameter(Descriptor::kValue);
- Node* slot = Parameter(Descriptor::kSlot);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+ TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Label miss(this, Label::kDeferred), store_property(this);
- GotoIf(TaggedIsSmi(receiver), &miss);
- TNode<Map> receiver_map = LoadMap(receiver);
- TNode<Uint16T> instance_type = LoadMapInstanceType(receiver_map);
- // Receivers requiring non-standard element accesses (interceptors, access
- // checks, strings and string wrappers, proxies) are handled in the runtime.
- GotoIf(IsSpecialReceiverInstanceType(instance_type), &miss);
+ GotoIf(TaggedIsSmi(receiver_maybe_smi), &miss);
+
{
- StoreICParameters p(CAST(context), receiver, name, value, slot,
- UndefinedConstant());
- EmitGenericPropertyStore(receiver, receiver_map, &p, &miss);
+ TNode<HeapObject> receiver = CAST(receiver_maybe_smi);
+ TNode<Map> receiver_map = LoadMap(receiver);
+ TNode<Uint16T> instance_type = LoadMapInstanceType(receiver_map);
+ // Receivers requiring non-standard element accesses (interceptors, access
+ // checks, strings and string wrappers, proxies) are handled in the runtime.
+ GotoIf(IsSpecialReceiverInstanceType(instance_type), &miss);
+ {
+ StoreICParameters p(context, receiver, name, value, slot,
+ UndefinedConstant());
+ EmitGenericPropertyStore(CAST(receiver), receiver_map, &p, &miss);
+ }
}
BIND(&miss);
{
TailCallRuntime(Runtime::kStoreIC_Miss, context, value, slot,
- UndefinedConstant(), receiver, name);
+ UndefinedConstant(), receiver_maybe_smi, name);
}
}
@@ -1082,7 +1103,7 @@ void KeyedStoreGenericAssembler::SetProperty(TNode<Context> context,
TNode<Name> unique_name,
TNode<Object> value,
LanguageMode language_mode) {
- StoreICParameters p(context, receiver, unique_name, value, nullptr, nullptr);
+ StoreICParameters p(context, receiver, unique_name, value, {}, nullptr);
Label done(this), slow(this, Label::kDeferred);
ExitPoint exit_point(this, [&](Node* result) { Goto(&done); });
diff --git a/deps/v8/src/ic/keyed-store-generic.h b/deps/v8/src/ic/keyed-store-generic.h
index efee0da80e..8047fe6493 100644
--- a/deps/v8/src/ic/keyed-store-generic.h
+++ b/deps/v8/src/ic/keyed-store-generic.h
@@ -13,9 +13,6 @@ namespace internal {
class KeyedStoreGenericGenerator {
public:
- template <class T>
- using TNode = compiler::TNode<T>;
-
static void Generate(compiler::CodeAssemblerState* state);
// Building block for fast path of Object.assign implementation.
diff --git a/deps/v8/src/ic/stub-cache.cc b/deps/v8/src/ic/stub-cache.cc
index 04381bf693..c1d9aea374 100644
--- a/deps/v8/src/ic/stub-cache.cc
+++ b/deps/v8/src/ic/stub-cache.cc
@@ -26,11 +26,10 @@ void StubCache::Initialize() {
Clear();
}
-// Hash algorithm for the primary table. This algorithm is replicated in
-// assembler for every architecture. Returns an index into the table that
+// Hash algorithm for the primary table. This algorithm is replicated in
+// the AccessorAssembler. Returns an index into the table that
// is scaled by 1 << kCacheIndexShift.
int StubCache::PrimaryOffset(Name name, Map map) {
- STATIC_ASSERT(kCacheIndexShift == Name::kHashShift);
// Compute the hash of the name (use entire hash field).
DCHECK(name.HasHashCode());
uint32_t field = name.hash_field();
diff --git a/deps/v8/src/ic/stub-cache.h b/deps/v8/src/ic/stub-cache.h
index 87acc0e007..dc3317588d 100644
--- a/deps/v8/src/ic/stub-cache.h
+++ b/deps/v8/src/ic/stub-cache.h
@@ -78,10 +78,15 @@ class V8_EXPORT_PRIVATE StubCache {
Isolate* isolate() { return isolate_; }
- // Setting the entry size such that the index is shifted by Name::kHashShift
- // is convenient; shifting down the length field (to extract the hash code)
- // automatically discards the hash bit field.
- static const int kCacheIndexShift = Name::kHashShift;
+ // Ideally we would set kCacheIndexShift to Name::kHashShift, such that
+ // the bit field inside the hash field gets shifted out implicitly. However,
+ // sizeof(Entry) needs to be a multiple of 1 << kCacheIndexShift, and it
+ // isn't clear whether letting one bit of the bit field leak into the index
+ // computation is bad enough to warrant an additional shift to get rid of it.
+ static const int kCacheIndexShift = 2;
+ // The purpose of the static assert is to make us reconsider this choice
+ // if the bit field ever grows even more.
+ STATIC_ASSERT(kCacheIndexShift == Name::kHashShift - 1);
static const int kPrimaryTableBits = 11;
static const int kPrimaryTableSize = (1 << kPrimaryTableBits);
@@ -125,7 +130,10 @@ class V8_EXPORT_PRIVATE StubCache {
// of sizeof(Entry). This makes it easier to avoid making mistakes
// in the hashed offset computations.
static Entry* entry(Entry* table, int offset) {
- const int multiplier = sizeof(*table) >> Name::kHashShift;
+ // The size of {Entry} must be a multiple of 1 << kCacheIndexShift.
+ STATIC_ASSERT((sizeof(*table) >> kCacheIndexShift) << kCacheIndexShift ==
+ sizeof(*table));
+ const int multiplier = sizeof(*table) >> kCacheIndexShift;
return reinterpret_cast<Entry*>(reinterpret_cast<Address>(table) +
offset * multiplier);
}
diff --git a/deps/v8/src/init/bootstrapper.cc b/deps/v8/src/init/bootstrapper.cc
index f7e25ca0bb..148c60d89d 100644
--- a/deps/v8/src/init/bootstrapper.cc
+++ b/deps/v8/src/init/bootstrapper.cc
@@ -12,6 +12,7 @@
#include "src/debug/debug.h"
#include "src/execution/isolate-inl.h"
#include "src/execution/microtask-queue.h"
+#include "src/execution/protectors.h"
#include "src/extensions/cputracemark-extension.h"
#include "src/extensions/externalize-string-extension.h"
#include "src/extensions/free-buffer-extension.h"
@@ -130,15 +131,15 @@ static bool isValidCpuTraceMarkFunctionName() {
}
void Bootstrapper::InitializeOncePerProcess() {
- v8::RegisterExtension(v8::base::make_unique<FreeBufferExtension>());
- v8::RegisterExtension(v8::base::make_unique<GCExtension>(GCFunctionName()));
- v8::RegisterExtension(v8::base::make_unique<ExternalizeStringExtension>());
- v8::RegisterExtension(v8::base::make_unique<StatisticsExtension>());
- v8::RegisterExtension(v8::base::make_unique<TriggerFailureExtension>());
- v8::RegisterExtension(v8::base::make_unique<IgnitionStatisticsExtension>());
+ v8::RegisterExtension(std::make_unique<FreeBufferExtension>());
+ v8::RegisterExtension(std::make_unique<GCExtension>(GCFunctionName()));
+ v8::RegisterExtension(std::make_unique<ExternalizeStringExtension>());
+ v8::RegisterExtension(std::make_unique<StatisticsExtension>());
+ v8::RegisterExtension(std::make_unique<TriggerFailureExtension>());
+ v8::RegisterExtension(std::make_unique<IgnitionStatisticsExtension>());
if (isValidCpuTraceMarkFunctionName()) {
- v8::RegisterExtension(v8::base::make_unique<CpuTraceMarkExtension>(
- FLAG_expose_cputracemark_as));
+ v8::RegisterExtension(
+ std::make_unique<CpuTraceMarkExtension>(FLAG_expose_cputracemark_as));
}
}
@@ -284,6 +285,9 @@ class Genesis {
void TransferNamedProperties(Handle<JSObject> from, Handle<JSObject> to);
void TransferIndexedProperties(Handle<JSObject> from, Handle<JSObject> to);
+ Handle<Map> CreateInitialMapForArraySubclass(int size,
+ int inobject_properties);
+
static bool CompileExtension(Isolate* isolate, v8::Extension* extension);
Isolate* isolate_;
@@ -867,6 +871,29 @@ void Genesis::CreateIteratorMaps(Handle<JSFunction> empty) {
generator_next_internal->shared().set_native(false);
native_context()->set_generator_next_internal(*generator_next_internal);
+ // Internal version of async module functions, flagged as non-native such
+ // that they don't show up in Error traces.
+ {
+ Handle<JSFunction> async_module_evaluate_internal =
+ SimpleCreateFunction(isolate(), factory()->next_string(),
+ Builtins::kAsyncModuleEvaluate, 1, false);
+ async_module_evaluate_internal->shared().set_native(false);
+ native_context()->set_async_module_evaluate_internal(
+ *async_module_evaluate_internal);
+
+ Handle<JSFunction> call_async_module_fulfilled =
+ SimpleCreateFunction(isolate(), factory()->empty_string(),
+ Builtins::kCallAsyncModuleFulfilled, 1, false);
+ native_context()->set_call_async_module_fulfilled(
+ *call_async_module_fulfilled);
+
+ Handle<JSFunction> call_async_module_rejected =
+ SimpleCreateFunction(isolate(), factory()->empty_string(),
+ Builtins::kCallAsyncModuleRejected, 1, false);
+ native_context()->set_call_async_module_rejected(
+ *call_async_module_rejected);
+ }
+
// Create maps for generator functions and their prototypes. Store those
// maps in the native context. The "prototype" property descriptor is
// writable, non-enumerable, and non-configurable (as per ES6 draft
@@ -1098,9 +1125,9 @@ void ReplaceAccessors(Isolate* isolate, Handle<Map> map, Handle<String> name,
PropertyAttributes attributes,
Handle<AccessorPair> accessor_pair) {
DescriptorArray descriptors = map->instance_descriptors();
- int idx = descriptors.SearchWithCache(isolate, *name, *map);
+ InternalIndex entry = descriptors.SearchWithCache(isolate, *name, *map);
Descriptor d = Descriptor::AccessorConstant(name, accessor_pair, attributes);
- descriptors.Replace(idx, &d);
+ descriptors.Replace(entry, &d);
}
} // namespace
@@ -1274,8 +1301,8 @@ Handle<JSGlobalObject> Genesis::CreateNewGlobals(
DCHECK(native_context()
->get(Context::GLOBAL_PROXY_INDEX)
.IsUndefined(isolate()) ||
- native_context()->global_proxy() == *global_proxy);
- native_context()->set_global_proxy(*global_proxy);
+ native_context()->global_proxy_object() == *global_proxy);
+ native_context()->set_global_proxy_object(*global_proxy);
return global_object;
}
@@ -2432,7 +2459,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // -- R e g E x p
// Builtin functions for RegExp.prototype.
Handle<JSFunction> regexp_fun = InstallFunction(
- isolate_, global, "RegExp", JS_REGEXP_TYPE,
+ isolate_, global, "RegExp", JS_REG_EXP_TYPE,
JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kTaggedSize,
JSRegExp::kInObjectFieldCount, factory->the_hole_value(),
Builtins::kRegExpConstructor);
@@ -2455,7 +2482,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kRegExpPrototypeExec, 1, true);
native_context()->set_regexp_exec_function(*fun);
DCHECK_EQ(JSRegExp::kExecFunctionDescriptorIndex,
- prototype->map().LastAdded());
+ prototype->map().LastAdded().as_int());
}
SimpleInstallGetter(isolate_, prototype, factory->dotAll_string(),
@@ -2488,7 +2515,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kRegExpPrototypeMatch, 1, true);
native_context()->set_regexp_match_function(*fun);
DCHECK_EQ(JSRegExp::kSymbolMatchFunctionDescriptorIndex,
- prototype->map().LastAdded());
+ prototype->map().LastAdded().as_int());
}
{
@@ -2497,7 +2524,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
"[Symbol.matchAll]", Builtins::kRegExpPrototypeMatchAll, 1, true);
native_context()->set_regexp_match_all_function(*fun);
DCHECK_EQ(JSRegExp::kSymbolMatchAllFunctionDescriptorIndex,
- prototype->map().LastAdded());
+ prototype->map().LastAdded().as_int());
}
{
@@ -2506,7 +2533,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kRegExpPrototypeReplace, 2, false);
native_context()->set_regexp_replace_function(*fun);
DCHECK_EQ(JSRegExp::kSymbolReplaceFunctionDescriptorIndex,
- prototype->map().LastAdded());
+ prototype->map().LastAdded().as_int());
}
{
@@ -2515,7 +2542,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kRegExpPrototypeSearch, 1, true);
native_context()->set_regexp_search_function(*fun);
DCHECK_EQ(JSRegExp::kSymbolSearchFunctionDescriptorIndex,
- prototype->map().LastAdded());
+ prototype->map().LastAdded().as_int());
}
{
@@ -2524,7 +2551,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kRegExpPrototypeSplit, 2, false);
native_context()->set_regexp_split_function(*fun);
DCHECK_EQ(JSRegExp::kSymbolSplitFunctionDescriptorIndex,
- prototype->map().LastAdded());
+ prototype->map().LastAdded().as_int());
}
Handle<Map> prototype_map(prototype->map(), isolate());
@@ -2616,7 +2643,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{
Handle<PropertyCell> cell =
factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
+ cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
native_context()->set_regexp_species_protector(*cell);
}
@@ -2647,7 +2674,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
true);
Handle<JSFunction> regexp_string_iterator_function = CreateFunction(
- isolate(), "RegExpStringIterator", JS_REGEXP_STRING_ITERATOR_TYPE,
+ isolate(), "RegExpStringIterator", JS_REG_EXP_STRING_ITERATOR_TYPE,
JSRegExpStringIterator::kSize, 0, regexp_string_iterator_prototype,
Builtins::kIllegal);
regexp_string_iterator_function->shared().set_native(false);
@@ -2886,7 +2913,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // -- D a t e T i m e F o r m a t
Handle<JSFunction> date_time_format_constructor = InstallFunction(
- isolate_, intl, "DateTimeFormat", JS_INTL_DATE_TIME_FORMAT_TYPE,
+ isolate_, intl, "DateTimeFormat", JS_DATE_TIME_FORMAT_TYPE,
JSDateTimeFormat::kSize, 0, factory->the_hole_value(),
Builtins::kDateTimeFormatConstructor);
date_time_format_constructor->shared().set_length(0);
@@ -2914,13 +2941,20 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallGetter(isolate_, prototype, factory->format_string(),
Builtins::kDateTimeFormatPrototypeFormat, false);
+
+ SimpleInstallFunction(isolate_, prototype, "formatRange",
+ Builtins::kDateTimeFormatPrototypeFormatRange, 2,
+ false);
+ SimpleInstallFunction(
+ isolate_, prototype, "formatRangeToParts",
+ Builtins::kDateTimeFormatPrototypeFormatRangeToParts, 2, false);
}
{ // -- N u m b e r F o r m a t
- Handle<JSFunction> number_format_constructor = InstallFunction(
- isolate_, intl, "NumberFormat", JS_INTL_NUMBER_FORMAT_TYPE,
- JSNumberFormat::kSize, 0, factory->the_hole_value(),
- Builtins::kNumberFormatConstructor);
+ Handle<JSFunction> number_format_constructor =
+ InstallFunction(isolate_, intl, "NumberFormat", JS_NUMBER_FORMAT_TYPE,
+ JSNumberFormat::kSize, 0, factory->the_hole_value(),
+ Builtins::kNumberFormatConstructor);
number_format_constructor->shared().set_length(0);
number_format_constructor->shared().DontAdaptArguments();
InstallWithIntrinsicDefaultProto(
@@ -2949,8 +2983,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // -- C o l l a t o r
Handle<JSFunction> collator_constructor = InstallFunction(
- isolate_, intl, "Collator", JS_INTL_COLLATOR_TYPE, JSCollator::kSize,
- 0, factory->the_hole_value(), Builtins::kCollatorConstructor);
+ isolate_, intl, "Collator", JS_COLLATOR_TYPE, JSCollator::kSize, 0,
+ factory->the_hole_value(), Builtins::kCollatorConstructor);
collator_constructor->shared().DontAdaptArguments();
InstallWithIntrinsicDefaultProto(isolate_, collator_constructor,
Context::INTL_COLLATOR_FUNCTION_INDEX);
@@ -2974,7 +3008,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // -- V 8 B r e a k I t e r a t o r
Handle<JSFunction> v8_break_iterator_constructor = InstallFunction(
- isolate_, intl, "v8BreakIterator", JS_INTL_V8_BREAK_ITERATOR_TYPE,
+ isolate_, intl, "v8BreakIterator", JS_V8_BREAK_ITERATOR_TYPE,
JSV8BreakIterator::kSize, 0, factory->the_hole_value(),
Builtins::kV8BreakIteratorConstructor);
v8_break_iterator_constructor->shared().DontAdaptArguments();
@@ -3009,11 +3043,14 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
{ // -- P l u r a l R u l e s
- Handle<JSFunction> plural_rules_constructor = InstallFunction(
- isolate_, intl, "PluralRules", JS_INTL_PLURAL_RULES_TYPE,
- JSPluralRules::kSize, 0, factory->the_hole_value(),
- Builtins::kPluralRulesConstructor);
+ Handle<JSFunction> plural_rules_constructor =
+ InstallFunction(isolate_, intl, "PluralRules", JS_PLURAL_RULES_TYPE,
+ JSPluralRules::kSize, 0, factory->the_hole_value(),
+ Builtins::kPluralRulesConstructor);
plural_rules_constructor->shared().DontAdaptArguments();
+ InstallWithIntrinsicDefaultProto(
+ isolate_, plural_rules_constructor,
+ Context::INTL_PLURAL_RULES_FUNCTION_INDEX);
SimpleInstallFunction(isolate(), plural_rules_constructor,
"supportedLocalesOf",
@@ -3032,13 +3069,16 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kPluralRulesPrototypeSelect, 1, false);
}
- { // -- R e l a t i v e T i m e F o r m a t e
+ { // -- R e l a t i v e T i m e F o r m a t
Handle<JSFunction> relative_time_format_fun = InstallFunction(
- isolate(), intl, "RelativeTimeFormat",
- JS_INTL_RELATIVE_TIME_FORMAT_TYPE, JSRelativeTimeFormat::kSize, 0,
- factory->the_hole_value(), Builtins::kRelativeTimeFormatConstructor);
+ isolate(), intl, "RelativeTimeFormat", JS_RELATIVE_TIME_FORMAT_TYPE,
+ JSRelativeTimeFormat::kSize, 0, factory->the_hole_value(),
+ Builtins::kRelativeTimeFormatConstructor);
relative_time_format_fun->shared().set_length(0);
relative_time_format_fun->shared().DontAdaptArguments();
+ InstallWithIntrinsicDefaultProto(
+ isolate_, relative_time_format_fun,
+ Context::INTL_RELATIVE_TIME_FORMAT_FUNCTION_INDEX);
SimpleInstallFunction(
isolate(), relative_time_format_fun, "supportedLocalesOf",
@@ -3063,12 +3103,14 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
{ // -- L i s t F o r m a t
- Handle<JSFunction> list_format_fun = InstallFunction(
- isolate(), intl, "ListFormat", JS_INTL_LIST_FORMAT_TYPE,
- JSListFormat::kSize, 0, factory->the_hole_value(),
- Builtins::kListFormatConstructor);
+ Handle<JSFunction> list_format_fun =
+ InstallFunction(isolate(), intl, "ListFormat", JS_LIST_FORMAT_TYPE,
+ JSListFormat::kSize, 0, factory->the_hole_value(),
+ Builtins::kListFormatConstructor);
list_format_fun->shared().set_length(0);
list_format_fun->shared().DontAdaptArguments();
+ InstallWithIntrinsicDefaultProto(
+ isolate_, list_format_fun, Context::INTL_LIST_FORMAT_FUNCTION_INDEX);
SimpleInstallFunction(isolate(), list_format_fun, "supportedLocalesOf",
Builtins::kListFormatSupportedLocalesOf, 1, false);
@@ -3091,7 +3133,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // -- L o c a l e
Handle<JSFunction> locale_fun = InstallFunction(
- isolate(), intl, "Locale", JS_INTL_LOCALE_TYPE, JSLocale::kSize, 0,
+ isolate(), intl, "Locale", JS_LOCALE_TYPE, JSLocale::kSize, 0,
factory->the_hole_value(), Builtins::kLocaleConstructor);
InstallWithIntrinsicDefaultProto(isolate(), locale_fun,
Context::INTL_LOCALE_FUNCTION_INDEX);
@@ -3394,7 +3436,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
isolate_, prototype, "set", Builtins::kMapPrototypeSet, 2, true);
// Check that index of "set" function in JSCollection is correct.
DCHECK_EQ(JSCollection::kAddFunctionDescriptorIndex,
- prototype->map().LastAdded());
+ prototype->map().LastAdded().as_int());
native_context()->set_map_set(*map_set);
Handle<JSFunction> map_has = SimpleInstallFunction(
@@ -3490,7 +3532,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
isolate_, prototype, "add", Builtins::kSetPrototypeAdd, 1, true);
// Check that index of "add" function in JSCollection is correct.
DCHECK_EQ(JSCollection::kAddFunctionDescriptorIndex,
- prototype->map().LastAdded());
+ prototype->map().LastAdded().as_int());
native_context()->set_set_add(*set_add);
Handle<JSFunction> set_delete = SimpleInstallFunction(
@@ -3523,6 +3565,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<Map> map = factory->NewMap(
JS_MODULE_NAMESPACE_TYPE, JSModuleNamespace::kSize,
TERMINAL_FAST_ELEMENTS_KIND, JSModuleNamespace::kInObjectFieldCount);
+ map->SetConstructor(native_context()->object_function());
Map::SetPrototype(isolate(), map, isolate_->factory()->null_value());
Map::EnsureDescriptorSlack(isolate_, map, 1);
native_context()->set_js_module_namespace_map(*map);
@@ -3593,7 +3636,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
isolate_, prototype, "set", Builtins::kWeakMapPrototypeSet, 2, true);
// Check that index of "set" function in JSWeakCollection is correct.
DCHECK_EQ(JSWeakCollection::kAddFunctionDescriptorIndex,
- prototype->map().LastAdded());
+ prototype->map().LastAdded().as_int());
native_context()->set_weakmap_set(*weakmap_set);
SimpleInstallFunction(isolate_, prototype, "has",
@@ -3628,7 +3671,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
isolate_, prototype, "add", Builtins::kWeakSetPrototypeAdd, 1, true);
// Check that index of "add" function in JSWeakCollection is correct.
DCHECK_EQ(JSWeakCollection::kAddFunctionDescriptorIndex,
- prototype->map().LastAdded());
+ prototype->map().LastAdded().as_int());
native_context()->set_weakset_add(*weakset_add);
@@ -3748,7 +3791,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<String> arguments_string = factory->Arguments_string();
NewFunctionArgs args = NewFunctionArgs::ForBuiltinWithPrototype(
arguments_string, isolate_->initial_object_prototype(),
- JS_ARGUMENTS_TYPE, JSSloppyArgumentsObject::kSize, 2,
+ JS_ARGUMENTS_OBJECT_TYPE, JSSloppyArgumentsObject::kSize, 2,
Builtins::kIllegal, MUTABLE);
Handle<JSFunction> function = factory->NewFunction(args);
Handle<Map> map(function->initial_map(), isolate());
@@ -3805,8 +3848,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
callee->set_setter(*poison);
// Create the map. Allocate one in-object field for length.
- Handle<Map> map = factory->NewMap(
- JS_ARGUMENTS_TYPE, JSStrictArgumentsObject::kSize, PACKED_ELEMENTS, 1);
+ Handle<Map> map =
+ factory->NewMap(JS_ARGUMENTS_OBJECT_TYPE,
+ JSStrictArgumentsObject::kSize, PACKED_ELEMENTS, 1);
// Create the descriptor array for the arguments object.
Map::EnsureDescriptorSlack(isolate_, map, 2);
@@ -4265,16 +4309,14 @@ EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_import_meta)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_sequence)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_optional_chaining)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_nullish)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_top_level_await)
#ifdef V8_INTL_SUPPORT
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_add_calendar_numbering_system)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_bigint)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_dateformat_day_period)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(
harmony_intl_dateformat_fractional_second_digits)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_dateformat_quarter)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_datetime_style)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_numberformat_unified)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_other_calendars)
#endif // V8_INTL_SUPPORT
#undef EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE
@@ -4419,34 +4461,20 @@ void Genesis::InitializeGlobal_harmony_promise_all_settled() {
}
}
-#ifdef V8_INTL_SUPPORT
-
-void Genesis::InitializeGlobal_harmony_intl_date_format_range() {
- if (!FLAG_harmony_intl_date_format_range) return;
-
- Handle<JSObject> intl = Handle<JSObject>::cast(
- JSReceiver::GetProperty(
- isolate(),
- Handle<JSReceiver>(native_context()->global_object(), isolate()),
- factory()->InternalizeUtf8String("Intl"))
- .ToHandleChecked());
-
- Handle<JSFunction> date_time_format_constructor = Handle<JSFunction>::cast(
- JSReceiver::GetProperty(
- isolate(), intl, factory()->InternalizeUtf8String("DateTimeFormat"))
- .ToHandleChecked());
+void Genesis::InitializeGlobal_harmony_regexp_match_indices() {
+ if (!FLAG_harmony_regexp_match_indices) return;
- Handle<JSObject> prototype(
- JSObject::cast(date_time_format_constructor->prototype()), isolate_);
-
- SimpleInstallFunction(isolate_, prototype, "formatRange",
- Builtins::kDateTimeFormatPrototypeFormatRange, 2,
- false);
- SimpleInstallFunction(isolate_, prototype, "formatRangeToParts",
- Builtins::kDateTimeFormatPrototypeFormatRangeToParts, 2,
- false);
+ // Add indices accessor to JSRegExpResult's initial map.
+ Handle<Map> initial_map(native_context()->regexp_result_map(), isolate());
+ Descriptor d = Descriptor::AccessorConstant(
+ factory()->indices_string(), factory()->regexp_result_indices_accessor(),
+ NONE);
+ Map::EnsureDescriptorSlack(isolate(), initial_map, 1);
+ initial_map->AppendDescriptor(isolate(), &d);
}
+#ifdef V8_INTL_SUPPORT
+
void Genesis::InitializeGlobal_harmony_intl_segmenter() {
if (!FLAG_harmony_intl_segmenter) return;
Handle<JSObject> intl = Handle<JSObject>::cast(
@@ -4457,10 +4485,12 @@ void Genesis::InitializeGlobal_harmony_intl_segmenter() {
.ToHandleChecked());
Handle<JSFunction> segmenter_fun = InstallFunction(
- isolate(), intl, "Segmenter", JS_INTL_SEGMENTER_TYPE, JSSegmenter::kSize,
- 0, factory()->the_hole_value(), Builtins::kSegmenterConstructor);
+ isolate(), intl, "Segmenter", JS_SEGMENTER_TYPE, JSSegmenter::kSize, 0,
+ factory()->the_hole_value(), Builtins::kSegmenterConstructor);
segmenter_fun->shared().set_length(0);
segmenter_fun->shared().DontAdaptArguments();
+ InstallWithIntrinsicDefaultProto(isolate_, segmenter_fun,
+ Context::INTL_SEGMENTER_FUNCTION_INDEX);
SimpleInstallFunction(isolate(), segmenter_fun, "supportedLocalesOf",
Builtins::kSegmenterSupportedLocalesOf, 1, false);
@@ -4515,7 +4545,7 @@ void Genesis::InitializeGlobal_harmony_intl_segmenter() {
isolate()->factory()->SegmentIterator_string())
.ToHandleChecked();
Handle<JSFunction> segment_iterator_fun = CreateFunction(
- isolate(), name_string, JS_INTL_SEGMENT_ITERATOR_TYPE,
+ isolate(), name_string, JS_SEGMENT_ITERATOR_TYPE,
JSSegmentIterator::kSize, 0, prototype, Builtins::kIllegal);
segment_iterator_fun->shared().set_native(false);
@@ -4900,42 +4930,10 @@ bool Genesis::InstallNatives() {
// predefines the properties index, input, and groups).
{
// JSRegExpResult initial map.
-
- // Find global.Array.prototype to inherit from.
- Handle<JSFunction> array_constructor(native_context()->array_function(),
- isolate());
- Handle<JSObject> array_prototype(
- JSObject::cast(array_constructor->instance_prototype()), isolate());
-
- // Add initial map.
- Handle<Map> initial_map = factory()->NewMap(
- JS_ARRAY_TYPE, JSRegExpResult::kSize, TERMINAL_FAST_ELEMENTS_KIND,
- JSRegExpResult::kInObjectPropertyCount);
- initial_map->SetConstructor(*array_constructor);
-
- // Set prototype on map.
- initial_map->set_has_non_instance_prototype(false);
- Map::SetPrototype(isolate(), initial_map, array_prototype);
-
- // Update map with length accessor from Array and add "index", "input" and
- // "groups".
- Map::EnsureDescriptorSlack(isolate(), initial_map,
- JSRegExpResult::kInObjectPropertyCount + 1);
-
- // length descriptor.
- {
- JSFunction array_function = native_context()->array_function();
- Handle<DescriptorArray> array_descriptors(
- array_function.initial_map().instance_descriptors(), isolate());
- Handle<String> length = factory()->length_string();
- int old = array_descriptors->SearchWithCache(
- isolate(), *length, array_function.initial_map());
- DCHECK_NE(old, DescriptorArray::kNotFound);
- Descriptor d = Descriptor::AccessorConstant(
- length, handle(array_descriptors->GetStrongValue(old), isolate()),
- array_descriptors->GetDetails(old).attributes());
- initial_map->AppendDescriptor(isolate(), &d);
- }
+ // Add additional slack to the initial map in case regexp_match_indices
+ // are enabled to account for the additional descriptor.
+ Handle<Map> initial_map = CreateInitialMapForArraySubclass(
+ JSRegExpResult::kSize, JSRegExpResult::kInObjectPropertyCount);
// index descriptor.
{
@@ -4961,9 +4959,53 @@ bool Genesis::InstallNatives() {
initial_map->AppendDescriptor(isolate(), &d);
}
+ // Private internal only fields. All of the remaining fields have special
+ // symbols to prevent their use in Javascript.
+ // cached_indices_or_match_info descriptor.
+ {
+ PropertyAttributes attribs = DONT_ENUM;
+ {
+ Descriptor d = Descriptor::DataField(
+ isolate(),
+ factory()->regexp_result_cached_indices_or_match_info_symbol(),
+ JSRegExpResult::kCachedIndicesOrMatchInfoIndex, attribs,
+ Representation::Tagged());
+ initial_map->AppendDescriptor(isolate(), &d);
+ }
+
+ // names descriptor.
+ {
+ Descriptor d = Descriptor::DataField(
+ isolate(), factory()->regexp_result_names_symbol(),
+ JSRegExpResult::kNamesIndex, attribs, Representation::Tagged());
+ initial_map->AppendDescriptor(isolate(), &d);
+ }
+ }
+
native_context()->set_regexp_result_map(*initial_map);
}
+ // Create a constructor for JSRegExpResultIndices (a variant of Array that
+ // predefines the groups property).
+ {
+ // JSRegExpResultIndices initial map.
+ Handle<Map> initial_map = CreateInitialMapForArraySubclass(
+ JSRegExpResultIndices::kSize,
+ JSRegExpResultIndices::kInObjectPropertyCount);
+
+ // groups descriptor.
+ {
+ Descriptor d = Descriptor::DataField(
+ isolate(), factory()->groups_string(),
+ JSRegExpResultIndices::kGroupsIndex, NONE, Representation::Tagged());
+ initial_map->AppendDescriptor(isolate(), &d);
+ DCHECK_EQ(initial_map->LastAdded().as_int(),
+ JSRegExpResultIndices::kGroupsDescriptorIndex);
+ }
+
+ native_context()->set_regexp_result_indices_map(*initial_map);
+ }
+
// Add @@iterator method to the arguments object maps.
{
PropertyAttributes attribs = DONT_ENUM;
@@ -5263,7 +5305,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
if (from->HasFastProperties()) {
Handle<DescriptorArray> descs =
Handle<DescriptorArray>(from->map().instance_descriptors(), isolate());
- for (int i = 0; i < from->map().NumberOfOwnDescriptors(); i++) {
+ for (InternalIndex i : from->map().IterateOwnDescriptors()) {
PropertyDetails details = descs->GetDetails(i);
if (details.location() == kField) {
if (details.kind() == kData) {
@@ -5365,6 +5407,45 @@ void Genesis::TransferObject(Handle<JSObject> from, Handle<JSObject> to) {
JSObject::ForceSetPrototype(to, proto);
}
+Handle<Map> Genesis::CreateInitialMapForArraySubclass(int size,
+ int inobject_properties) {
+ // Find global.Array.prototype to inherit from.
+ Handle<JSFunction> array_constructor(native_context()->array_function(),
+ isolate());
+ Handle<JSObject> array_prototype(native_context()->initial_array_prototype(),
+ isolate());
+
+ // Add initial map.
+ Handle<Map> initial_map = factory()->NewMap(
+ JS_ARRAY_TYPE, size, TERMINAL_FAST_ELEMENTS_KIND, inobject_properties);
+ initial_map->SetConstructor(*array_constructor);
+
+ // Set prototype on map.
+ initial_map->set_has_non_instance_prototype(false);
+ Map::SetPrototype(isolate(), initial_map, array_prototype);
+
+ // Update map with length accessor from Array.
+ static constexpr int kTheLengthAccessor = 1;
+ Map::EnsureDescriptorSlack(isolate(), initial_map,
+ inobject_properties + kTheLengthAccessor);
+
+ // length descriptor.
+ {
+ JSFunction array_function = native_context()->array_function();
+ Handle<DescriptorArray> array_descriptors(
+ array_function.initial_map().instance_descriptors(), isolate());
+ Handle<String> length = factory()->length_string();
+ InternalIndex old = array_descriptors->SearchWithCache(
+ isolate(), *length, array_function.initial_map());
+ DCHECK(old.is_found());
+ Descriptor d = Descriptor::AccessorConstant(
+ length, handle(array_descriptors->GetStrongValue(old), isolate()),
+ array_descriptors->GetDetails(old).attributes());
+ initial_map->AppendDescriptor(isolate(), &d);
+ }
+ return initial_map;
+}
+
Genesis::Genesis(
Isolate* isolate, MaybeHandle<JSGlobalProxy> maybe_global_proxy,
v8::Local<v8::ObjectTemplate> global_proxy_template,
diff --git a/deps/v8/src/init/heap-symbols.h b/deps/v8/src/init/heap-symbols.h
index ce5a4f1a8b..9d630f547b 100644
--- a/deps/v8/src/init/heap-symbols.h
+++ b/deps/v8/src/init/heap-symbols.h
@@ -82,6 +82,7 @@
V(_, plusSign_string, "plusSign") \
V(_, quarter_string, "quarter") \
V(_, region_string, "region") \
+ V(_, relatedYear_string, "relatedYear") \
V(_, scientific_string, "scientific") \
V(_, second_string, "second") \
V(_, segment_string, "segment") \
@@ -107,7 +108,8 @@
V(_, unit_string, "unit") \
V(_, unitDisplay_string, "unitDisplay") \
V(_, weekday_string, "weekday") \
- V(_, year_string, "year")
+ V(_, year_string, "year") \
+ V(_, yearName_string, "yearName")
#else // V8_INTL_SUPPORT
#define INTERNALIZED_STRING_LIST_GENERATOR_INTL(V, _)
#endif // V8_INTL_SUPPORT
@@ -202,6 +204,7 @@
V(_, illegal_access_string, "illegal access") \
V(_, illegal_argument_string, "illegal argument") \
V(_, index_string, "index") \
+ V(_, indices_string, "indices") \
V(_, Infinity_string, "Infinity") \
V(_, infinity_string, "infinity") \
V(_, input_string, "input") \
@@ -209,6 +212,8 @@
V(_, Int32Array_string, "Int32Array") \
V(_, Int8Array_string, "Int8Array") \
V(_, isExtensible_string, "isExtensible") \
+ V(_, jsMemoryEstimate_string, "jsMemoryEstimate") \
+ V(_, jsMemoryRange_string, "jsMemoryRange") \
V(_, keys_string, "keys") \
V(_, lastIndex_string, "lastIndex") \
V(_, length_string, "length") \
@@ -299,6 +304,7 @@
V(_, toJSON_string, "toJSON") \
V(_, toString_string, "toString") \
V(_, true_string, "true") \
+ V(_, total_string, "total") \
V(_, TypeError_string, "TypeError") \
V(_, Uint16Array_string, "Uint16Array") \
V(_, Uint32Array_string, "Uint32Array") \
@@ -318,35 +324,36 @@
V(_, writable_string, "writable") \
V(_, zero_string, "0")
-#define PRIVATE_SYMBOL_LIST_GENERATOR(V, _) \
- V(_, call_site_frame_array_symbol) \
- V(_, call_site_frame_index_symbol) \
- V(_, console_context_id_symbol) \
- V(_, console_context_name_symbol) \
- V(_, class_fields_symbol) \
- V(_, class_positions_symbol) \
- V(_, detailed_stack_trace_symbol) \
- V(_, elements_transition_symbol) \
- V(_, error_end_pos_symbol) \
- V(_, error_script_symbol) \
- V(_, error_start_pos_symbol) \
- V(_, frozen_symbol) \
- V(_, generic_symbol) \
- V(_, home_object_symbol) \
- V(_, interpreter_trampoline_symbol) \
- V(_, megamorphic_symbol) \
- V(_, native_context_index_symbol) \
- V(_, nonextensible_symbol) \
- V(_, not_mapped_symbol) \
- V(_, premonomorphic_symbol) \
- V(_, promise_debug_marker_symbol) \
- V(_, promise_forwarding_handler_symbol) \
- V(_, promise_handled_by_symbol) \
- V(_, sealed_symbol) \
- V(_, stack_trace_symbol) \
- V(_, strict_function_transition_symbol) \
- V(_, wasm_exception_tag_symbol) \
- V(_, wasm_exception_values_symbol) \
+#define PRIVATE_SYMBOL_LIST_GENERATOR(V, _) \
+ V(_, call_site_frame_array_symbol) \
+ V(_, call_site_frame_index_symbol) \
+ V(_, console_context_id_symbol) \
+ V(_, console_context_name_symbol) \
+ V(_, class_fields_symbol) \
+ V(_, class_positions_symbol) \
+ V(_, detailed_stack_trace_symbol) \
+ V(_, elements_transition_symbol) \
+ V(_, error_end_pos_symbol) \
+ V(_, error_script_symbol) \
+ V(_, error_start_pos_symbol) \
+ V(_, frozen_symbol) \
+ V(_, generic_symbol) \
+ V(_, home_object_symbol) \
+ V(_, interpreter_trampoline_symbol) \
+ V(_, megamorphic_symbol) \
+ V(_, native_context_index_symbol) \
+ V(_, nonextensible_symbol) \
+ V(_, not_mapped_symbol) \
+ V(_, promise_debug_marker_symbol) \
+ V(_, promise_forwarding_handler_symbol) \
+ V(_, promise_handled_by_symbol) \
+ V(_, regexp_result_cached_indices_or_match_info_symbol) \
+ V(_, regexp_result_names_symbol) \
+ V(_, sealed_symbol) \
+ V(_, stack_trace_symbol) \
+ V(_, strict_function_transition_symbol) \
+ V(_, wasm_exception_tag_symbol) \
+ V(_, wasm_exception_values_symbol) \
V(_, uninitialized_symbol)
#define PUBLIC_SYMBOL_LIST_GENERATOR(V, _) \
diff --git a/deps/v8/src/init/icu_util.cc b/deps/v8/src/init/icu_util.cc
index 81c66e6a20..22ea3837cd 100644
--- a/deps/v8/src/init/icu_util.cc
+++ b/deps/v8/src/init/icu_util.cc
@@ -40,26 +40,23 @@ bool InitializeICUDefaultLocation(const char* exec_path,
const char* icu_data_file) {
#if !defined(V8_INTL_SUPPORT)
return true;
-#else
-#if ICU_UTIL_DATA_IMPL == ICU_UTIL_DATA_FILE
+#elif ICU_UTIL_DATA_IMPL == ICU_UTIL_DATA_FILE
if (icu_data_file) {
return InitializeICU(icu_data_file);
}
- char* icu_data_file_default;
#if defined(V8_TARGET_LITTLE_ENDIAN)
- base::RelativePath(&icu_data_file_default, exec_path, "icudtl.dat");
+ std::unique_ptr<char[]> icu_data_file_default =
+ base::RelativePath(exec_path, "icudtl.dat");
#elif defined(V8_TARGET_BIG_ENDIAN)
- base::RelativePath(&icu_data_file_default, exec_path, "icudtb.dat");
+ std::unique_ptr<char[]> icu_data_file_default =
+ base::RelativePath(exec_path, "icudtb.dat");
#else
#error Unknown byte ordering
#endif
- bool result = InitializeICU(icu_data_file_default);
- free(icu_data_file_default);
- return result;
+ return InitializeICU(icu_data_file_default.get());
#else
return InitializeICU(nullptr);
#endif
-#endif
}
bool InitializeICU(const char* icu_data_file) {
diff --git a/deps/v8/src/init/isolate-allocator.cc b/deps/v8/src/init/isolate-allocator.cc
index 6a9b4c33cd..b9ec6c3f43 100644
--- a/deps/v8/src/init/isolate-allocator.cc
+++ b/deps/v8/src/init/isolate-allocator.cc
@@ -6,6 +6,7 @@
#include "src/base/bounded-page-allocator.h"
#include "src/common/ptr-compr.h"
#include "src/execution/isolate.h"
+#include "src/utils/memcopy.h"
#include "src/utils/utils.h"
namespace v8 {
@@ -38,21 +39,39 @@ IsolateAllocator::~IsolateAllocator() {
}
#if V8_TARGET_ARCH_64_BIT
+
+namespace {
+
+// "IsolateRootBiasPage" is an optional region before the 4Gb aligned
+// reservation. This "IsolateRootBiasPage" page is supposed to be used for
+// storing part of the Isolate object when Isolate::isolate_root_bias() is
+// not zero.
+inline size_t GetIsolateRootBiasPageSize(
+ v8::PageAllocator* platform_page_allocator) {
+ return RoundUp(Isolate::isolate_root_bias(),
+ platform_page_allocator->AllocatePageSize());
+}
+
+} // namespace
+
Address IsolateAllocator::InitReservation() {
v8::PageAllocator* platform_page_allocator = GetPlatformPageAllocator();
- // Reserve a 4Gb region so that the middle is 4Gb aligned.
- // The VirtualMemory API does not support such an constraint so we have to
- // implement it manually here.
- size_t reservation_size = kPtrComprHeapReservationSize;
- size_t base_alignment = kPtrComprIsolateRootAlignment;
+ const size_t kIsolateRootBiasPageSize =
+ GetIsolateRootBiasPageSize(platform_page_allocator);
+
+ // Reserve a |4Gb + kIsolateRootBiasPageSize| region such as that the
+ // resevation address plus |kIsolateRootBiasPageSize| is 4Gb aligned.
+ const size_t reservation_size =
+ kPtrComprHeapReservationSize + kIsolateRootBiasPageSize;
+ const size_t base_alignment = kPtrComprIsolateRootAlignment;
const int kMaxAttempts = 4;
for (int attempt = 0; attempt < kMaxAttempts; ++attempt) {
Address hint = RoundDown(reinterpret_cast<Address>(
platform_page_allocator->GetRandomMmapAddr()),
- base_alignment) +
- kPtrComprIsolateRootBias;
+ base_alignment) -
+ kIsolateRootBiasPageSize;
// Within this reservation there will be a sub-region with proper alignment.
VirtualMemory padded_reservation(platform_page_allocator,
@@ -60,12 +79,11 @@ Address IsolateAllocator::InitReservation() {
reinterpret_cast<void*>(hint));
if (!padded_reservation.IsReserved()) break;
- // Find such a sub-region inside the reservation that it's middle is
- // |base_alignment|-aligned.
+ // Find properly aligned sub-region inside the reservation.
Address address =
- RoundUp(padded_reservation.address() + kPtrComprIsolateRootBias,
+ RoundUp(padded_reservation.address() + kIsolateRootBiasPageSize,
base_alignment) -
- kPtrComprIsolateRootBias;
+ kIsolateRootBiasPageSize;
CHECK(padded_reservation.InVM(address, reservation_size));
#if defined(V8_OS_FUCHSIA)
@@ -98,16 +116,16 @@ Address IsolateAllocator::InitReservation() {
if (!reservation.IsReserved()) break;
// The reservation could still be somewhere else but we can accept it
- // if the reservation has the required alignment.
- Address aligned_address =
- RoundUp(reservation.address() + kPtrComprIsolateRootBias,
+ // if it has the required alignment.
+ Address address =
+ RoundUp(reservation.address() + kIsolateRootBiasPageSize,
base_alignment) -
- kPtrComprIsolateRootBias;
+ kIsolateRootBiasPageSize;
- if (reservation.address() == aligned_address) {
+ if (reservation.address() == address) {
reservation_ = std::move(reservation);
CHECK_EQ(reservation_.size(), reservation_size);
- return aligned_address;
+ return address;
}
}
}
@@ -116,21 +134,26 @@ Address IsolateAllocator::InitReservation() {
return kNullAddress;
}
-void IsolateAllocator::CommitPagesForIsolate(Address heap_address) {
- CHECK(reservation_.InVM(heap_address, kPtrComprHeapReservationSize));
+void IsolateAllocator::CommitPagesForIsolate(Address heap_reservation_address) {
+ v8::PageAllocator* platform_page_allocator = GetPlatformPageAllocator();
+
+ const size_t kIsolateRootBiasPageSize =
+ GetIsolateRootBiasPageSize(platform_page_allocator);
- Address isolate_root = heap_address + kPtrComprIsolateRootBias;
+ Address isolate_root = heap_reservation_address + kIsolateRootBiasPageSize;
CHECK(IsAligned(isolate_root, kPtrComprIsolateRootAlignment));
- v8::PageAllocator* platform_page_allocator = GetPlatformPageAllocator();
+ CHECK(reservation_.InVM(
+ heap_reservation_address,
+ kPtrComprHeapReservationSize + kIsolateRootBiasPageSize));
// Simplify BoundedPageAllocator's life by configuring it to use same page
// size as the Heap will use (MemoryChunk::kPageSize).
size_t page_size = RoundUp(size_t{1} << kPageSizeBits,
platform_page_allocator->AllocatePageSize());
- page_allocator_instance_ = base::make_unique<base::BoundedPageAllocator>(
- platform_page_allocator, heap_address, kPtrComprHeapReservationSize,
+ page_allocator_instance_ = std::make_unique<base::BoundedPageAllocator>(
+ platform_page_allocator, isolate_root, kPtrComprHeapReservationSize,
page_size);
page_allocator_ = page_allocator_instance_.get();
@@ -139,7 +162,7 @@ void IsolateAllocator::CommitPagesForIsolate(Address heap_address) {
// Inform the bounded page allocator about reserved pages.
{
- Address reserved_region_address = RoundDown(isolate_address, page_size);
+ Address reserved_region_address = isolate_root;
size_t reserved_region_size =
RoundUp(isolate_end, page_size) - reserved_region_address;
@@ -163,10 +186,8 @@ void IsolateAllocator::CommitPagesForIsolate(Address heap_address) {
PageAllocator::kReadWrite));
if (Heap::ShouldZapGarbage()) {
- for (Address address = committed_region_address;
- address < committed_region_size; address += kSystemPointerSize) {
- base::Memory<Address>(address) = static_cast<Address>(kZapValue);
- }
+ MemsetPointer(reinterpret_cast<Address*>(committed_region_address),
+ kZapValue, committed_region_size / kSystemPointerSize);
}
}
isolate_memory_ = reinterpret_cast<void*>(isolate_address);
diff --git a/deps/v8/src/init/isolate-allocator.h b/deps/v8/src/init/isolate-allocator.h
index cd0e102d40..5f8b48ef3a 100644
--- a/deps/v8/src/init/isolate-allocator.h
+++ b/deps/v8/src/init/isolate-allocator.h
@@ -5,6 +5,8 @@
#ifndef V8_INIT_ISOLATE_ALLOCATOR_H_
#define V8_INIT_ISOLATE_ALLOCATOR_H_
+#include <memory>
+
#include "src/base/bounded-page-allocator.h"
#include "src/base/page-allocator.h"
#include "src/common/globals.h"
@@ -46,7 +48,7 @@ class V8_EXPORT_PRIVATE IsolateAllocator final {
private:
Address InitReservation();
- void CommitPagesForIsolate(Address heap_address);
+ void CommitPagesForIsolate(Address heap_reservation_address);
// The allocated memory for Isolate instance.
void* isolate_memory_ = nullptr;
diff --git a/deps/v8/src/init/setup-isolate-deserialize.cc b/deps/v8/src/init/setup-isolate-deserialize.cc
index 8a73ff0c8a..ff0268d3c8 100644
--- a/deps/v8/src/init/setup-isolate-deserialize.cc
+++ b/deps/v8/src/init/setup-isolate-deserialize.cc
@@ -7,7 +7,6 @@
#include "src/base/logging.h"
#include "src/execution/isolate.h"
#include "src/interpreter/interpreter.h"
-#include "src/objects/objects-inl.h"
#include "src/utils/ostreams.h"
namespace v8 {
diff --git a/deps/v8/src/init/startup-data-util.cc b/deps/v8/src/init/startup-data-util.cc
index 54d697c591..d234c152f8 100644
--- a/deps/v8/src/init/startup-data-util.cc
+++ b/deps/v8/src/init/startup-data-util.cc
@@ -38,6 +38,10 @@ void FreeStartupData() {
DeleteStartupData(&g_snapshot);
}
+// TODO(jgruber): Rename to FreeStartupData once natives support has been
+// removed (https://crbug.com/v8/7624).
+void FreeStartupDataSnapshotOnly() { DeleteStartupData(&g_snapshot); }
+
void Load(const char* blob_file, v8::StartupData* startup_data,
void (*setter_fn)(v8::StartupData*)) {
ClearStartupData(startup_data);
@@ -67,7 +71,7 @@ void Load(const char* blob_file, v8::StartupData* startup_data,
}
void LoadFromFiles(const char* natives_blob, const char* snapshot_blob) {
- Load(natives_blob, &g_natives, v8::V8::SetNativesDataBlob);
+ Load(natives_blob, &g_natives, i::V8::SetNativesBlob);
Load(snapshot_blob, &g_snapshot, v8::V8::SetSnapshotDataBlob);
atexit(&FreeStartupData);
@@ -78,19 +82,17 @@ void LoadFromFiles(const char* natives_blob, const char* snapshot_blob) {
void InitializeExternalStartupData(const char* directory_path) {
#ifdef V8_USE_EXTERNAL_STARTUP_DATA
- char* natives;
- char* snapshot;
const char* snapshot_name = "snapshot_blob.bin";
#ifdef V8_MULTI_SNAPSHOTS
if (!FLAG_untrusted_code_mitigations) {
snapshot_name = "snapshot_blob_trusted.bin";
}
#endif
- LoadFromFiles(
- base::RelativePath(&natives, directory_path, "natives_blob.bin"),
- base::RelativePath(&snapshot, directory_path, snapshot_name));
- free(natives);
- free(snapshot);
+ std::unique_ptr<char[]> natives =
+ base::RelativePath(directory_path, "natives_blob.bin");
+ std::unique_ptr<char[]> snapshot =
+ base::RelativePath(directory_path, snapshot_name);
+ LoadFromFiles(natives.get(), snapshot.get());
#endif // V8_USE_EXTERNAL_STARTUP_DATA
}
@@ -101,5 +103,12 @@ void InitializeExternalStartupData(const char* natives_blob,
#endif // V8_USE_EXTERNAL_STARTUP_DATA
}
+void InitializeExternalStartupDataFromFile(const char* snapshot_blob) {
+#ifdef V8_USE_EXTERNAL_STARTUP_DATA
+ Load(snapshot_blob, &g_snapshot, v8::V8::SetSnapshotDataBlob);
+ atexit(&FreeStartupDataSnapshotOnly);
+#endif // V8_USE_EXTERNAL_STARTUP_DATA
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/init/startup-data-util.h b/deps/v8/src/init/startup-data-util.h
index dfa26510ab..e4d1e540f7 100644
--- a/deps/v8/src/init/startup-data-util.h
+++ b/deps/v8/src/init/startup-data-util.h
@@ -21,6 +21,7 @@ void InitializeExternalStartupData(const char* directory_path);
void InitializeExternalStartupData(const char* natives_blob,
const char* snapshot_blob);
+void InitializeExternalStartupDataFromFile(const char* snapshot_blob);
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/init/v8.cc b/deps/v8/src/init/v8.cc
index 15eb929332..fd26c60848 100644
--- a/deps/v8/src/init/v8.cc
+++ b/deps/v8/src/init/v8.cc
@@ -90,6 +90,12 @@ void V8::InitializeOncePerProcessImpl() {
FLAG_expose_wasm = false;
}
+ if (FLAG_regexp_interpret_all && FLAG_regexp_tier_up) {
+ // Turning off the tier-up strategy, because the --regexp-interpret-all and
+ // --regexp-tier-up flags are incompatible.
+ FLAG_regexp_tier_up = false;
+ }
+
// The --jitless and --interpreted-frames-native-stack flags are incompatible
// since the latter requires code generation while the former prohibits code
// generation.
diff --git a/deps/v8/src/inspector/custom-preview.h b/deps/v8/src/inspector/custom-preview.h
index 1e8c74a154..d7b24adcce 100644
--- a/deps/v8/src/inspector/custom-preview.h
+++ b/deps/v8/src/inspector/custom-preview.h
@@ -5,6 +5,8 @@
#ifndef V8_INSPECTOR_CUSTOM_PREVIEW_H_
#define V8_INSPECTOR_CUSTOM_PREVIEW_H_
+#include <memory>
+
#include "src/inspector/protocol/Protocol.h"
#include "src/inspector/protocol/Runtime.h"
diff --git a/deps/v8/src/inspector/injected-script.cc b/deps/v8/src/inspector/injected-script.cc
index 18a10285dd..6afc6486e4 100644
--- a/deps/v8/src/inspector/injected-script.cc
+++ b/deps/v8/src/inspector/injected-script.cc
@@ -289,7 +289,7 @@ Response InjectedScript::getProperties(
int sessionId = m_sessionId;
v8::TryCatch tryCatch(isolate);
- *properties = v8::base::make_unique<Array<PropertyDescriptor>>();
+ *properties = std::make_unique<Array<PropertyDescriptor>>();
std::vector<PropertyMirror> mirrors;
PropertyAccumulator accumulator(&mirrors);
if (!ValueMirror::getProperties(context, object, ownProperties,
@@ -366,10 +366,8 @@ Response InjectedScript::getInternalAndPrivateProperties(
internalProperties,
std::unique_ptr<protocol::Array<PrivatePropertyDescriptor>>*
privateProperties) {
- *internalProperties =
- v8::base::make_unique<Array<InternalPropertyDescriptor>>();
- *privateProperties =
- v8::base::make_unique<Array<PrivatePropertyDescriptor>>();
+ *internalProperties = std::make_unique<Array<InternalPropertyDescriptor>>();
+ *privateProperties = std::make_unique<Array<PrivatePropertyDescriptor>>();
if (!value->IsObject()) return Response::OK();
@@ -521,7 +519,7 @@ std::unique_ptr<protocol::Runtime::RemoteObject> InjectedScript::wrapTable(
if (columnSet.find(property->getName()) == columnSet.end()) continue;
columnMap[property->getName()] = property.get();
}
- auto filtered = v8::base::make_unique<Array<PropertyPreview>>();
+ auto filtered = std::make_unique<Array<PropertyPreview>>();
for (const String16& column : selectedColumns) {
if (columnMap.find(column) == columnMap.end()) continue;
filtered->push_back(columnMap[column]->clone());
diff --git a/deps/v8/src/inspector/injected-script.h b/deps/v8/src/inspector/injected-script.h
index d007e9121e..080769f712 100644
--- a/deps/v8/src/inspector/injected-script.h
+++ b/deps/v8/src/inspector/injected-script.h
@@ -31,6 +31,7 @@
#ifndef V8_INSPECTOR_INJECTED_SCRIPT_H_
#define V8_INSPECTOR_INJECTED_SCRIPT_H_
+#include <memory>
#include <unordered_map>
#include <unordered_set>
diff --git a/deps/v8/src/inspector/inspected-context.cc b/deps/v8/src/inspector/inspected-context.cc
index 8098aa5cac..03a799cb5b 100644
--- a/deps/v8/src/inspector/inspected-context.cc
+++ b/deps/v8/src/inspector/inspected-context.cc
@@ -112,7 +112,7 @@ InjectedScript* InspectedContext::getInjectedScript(int sessionId) {
InjectedScript* InspectedContext::createInjectedScript(int sessionId) {
std::unique_ptr<InjectedScript> injectedScript =
- v8::base::make_unique<InjectedScript>(this, sessionId);
+ std::make_unique<InjectedScript>(this, sessionId);
CHECK(m_injectedScripts.find(sessionId) == m_injectedScripts.end());
m_injectedScripts[sessionId] = std::move(injectedScript);
return getInjectedScript(sessionId);
diff --git a/deps/v8/src/inspector/inspected-context.h b/deps/v8/src/inspector/inspected-context.h
index 4ec52dc1e4..68b865de67 100644
--- a/deps/v8/src/inspector/inspected-context.h
+++ b/deps/v8/src/inspector/inspected-context.h
@@ -5,6 +5,7 @@
#ifndef V8_INSPECTOR_INSPECTED_CONTEXT_H_
#define V8_INSPECTOR_INSPECTED_CONTEXT_H_
+#include <memory>
#include <unordered_map>
#include <unordered_set>
diff --git a/deps/v8/src/inspector/remote-object-id.h b/deps/v8/src/inspector/remote-object-id.h
index b199032359..5a35c13e58 100644
--- a/deps/v8/src/inspector/remote-object-id.h
+++ b/deps/v8/src/inspector/remote-object-id.h
@@ -5,6 +5,8 @@
#ifndef V8_INSPECTOR_REMOTE_OBJECT_ID_H_
#define V8_INSPECTOR_REMOTE_OBJECT_ID_H_
+#include <memory>
+
#include "src/inspector/protocol/Forward.h"
namespace v8_inspector {
diff --git a/deps/v8/src/inspector/search-util.h b/deps/v8/src/inspector/search-util.h
index 3c8a9fe31c..5958a404f0 100644
--- a/deps/v8/src/inspector/search-util.h
+++ b/deps/v8/src/inspector/search-util.h
@@ -5,6 +5,8 @@
#ifndef V8_INSPECTOR_SEARCH_UTIL_H_
#define V8_INSPECTOR_SEARCH_UTIL_H_
+#include <memory>
+
#include "src/inspector/protocol/Debugger.h"
#include "src/inspector/string-util.h"
diff --git a/deps/v8/src/inspector/string-16.cc b/deps/v8/src/inspector/string-16.cc
index 3a91169ac8..3adffeddf1 100644
--- a/deps/v8/src/inspector/string-16.cc
+++ b/deps/v8/src/inspector/string-16.cc
@@ -84,6 +84,13 @@ String16 String16::fromInteger(size_t number) {
}
// static
+String16 String16::fromInteger64(int64_t number) {
+ char buffer[50];
+ v8::base::OS::SNPrintF(buffer, arraysize(buffer), "%" PRId64 "", number);
+ return String16(buffer);
+}
+
+// static
String16 String16::fromDouble(double number) {
char arr[50];
v8::internal::Vector<char> buffer(arr, arraysize(arr));
diff --git a/deps/v8/src/inspector/string-16.h b/deps/v8/src/inspector/string-16.h
index c1dd5cb929..910a2e49c6 100644
--- a/deps/v8/src/inspector/string-16.h
+++ b/deps/v8/src/inspector/string-16.h
@@ -37,6 +37,7 @@ class String16 {
static String16 fromInteger(int);
static String16 fromInteger(size_t);
+ static String16 fromInteger64(int64_t);
static String16 fromDouble(double);
static String16 fromDouble(double, int precision);
diff --git a/deps/v8/src/inspector/string-util.cc b/deps/v8/src/inspector/string-util.cc
index 20c8951e2a..bae5dd91a9 100644
--- a/deps/v8/src/inspector/string-util.cc
+++ b/deps/v8/src/inspector/string-util.cc
@@ -169,15 +169,6 @@ StringBufferImpl::StringBufferImpl(String16& string) {
m_string = toStringView(m_owner);
}
-String16 debuggerIdToString(const std::pair<int64_t, int64_t>& debuggerId) {
- const size_t kBufferSize = 35;
-
- char buffer[kBufferSize];
- v8::base::OS::SNPrintF(buffer, kBufferSize, "(%08" PRIX64 "%08" PRIX64 ")",
- debuggerId.first, debuggerId.second);
- return String16(buffer);
-}
-
String16 stackTraceIdToString(uintptr_t id) {
String16Builder builder;
builder.appendNumber(static_cast<size_t>(id));
diff --git a/deps/v8/src/inspector/string-util.h b/deps/v8/src/inspector/string-util.h
index 513f436136..9b6a8bdd5d 100644
--- a/deps/v8/src/inspector/string-util.h
+++ b/deps/v8/src/inspector/string-util.h
@@ -101,13 +101,23 @@ class StringUtil {
// therefore it's unnecessary to provide an implementation here.
class Binary {
public:
- const uint8_t* data() const { UNIMPLEMENTED(); }
- size_t size() const { UNIMPLEMENTED(); }
+ Binary() = default;
+
+ const uint8_t* data() const { return bytes_->data(); }
+ size_t size() const { return bytes_->size(); }
String toBase64() const { UNIMPLEMENTED(); }
static Binary fromBase64(const String& base64, bool* success) {
UNIMPLEMENTED();
}
- static Binary fromSpan(const uint8_t* data, size_t size) { UNIMPLEMENTED(); }
+ static Binary fromSpan(const uint8_t* data, size_t size) {
+ return Binary(std::make_shared<std::vector<uint8_t>>(data, data + size));
+ }
+
+ private:
+ std::shared_ptr<std::vector<uint8_t>> bytes_;
+
+ explicit Binary(std::shared_ptr<std::vector<uint8_t>> bytes)
+ : bytes_(bytes) {}
};
} // namespace protocol
@@ -149,7 +159,6 @@ class BinaryStringBuffer : public StringBuffer {
DISALLOW_COPY_AND_ASSIGN(BinaryStringBuffer);
};
-String16 debuggerIdToString(const std::pair<int64_t, int64_t>& debuggerId);
String16 stackTraceIdToString(uintptr_t id);
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-console-message.cc b/deps/v8/src/inspector/v8-console-message.cc
index 458e4d4027..e4c678a272 100644
--- a/deps/v8/src/inspector/v8-console-message.cc
+++ b/deps/v8/src/inspector/v8-console-message.cc
@@ -258,7 +258,7 @@ V8ConsoleMessage::wrapArguments(V8InspectorSessionImpl* session,
v8::Local<v8::Context> context = inspectedContext->context();
auto args =
- v8::base::make_unique<protocol::Array<protocol::Runtime::RemoteObject>>();
+ std::make_unique<protocol::Array<protocol::Runtime::RemoteObject>>();
v8::Local<v8::Value> value = m_arguments[0]->Get(isolate);
if (value->IsObject() && m_type == ConsoleAPIType::kTable &&
@@ -341,8 +341,8 @@ void V8ConsoleMessage::reportToFrontend(protocol::Runtime::Frontend* frontend,
arguments = wrapArguments(session, generatePreview);
if (!inspector->hasConsoleMessageStorage(contextGroupId)) return;
if (!arguments) {
- arguments = v8::base::make_unique<
- protocol::Array<protocol::Runtime::RemoteObject>>();
+ arguments =
+ std::make_unique<protocol::Array<protocol::Runtime::RemoteObject>>();
if (!m_message.isEmpty()) {
std::unique_ptr<protocol::Runtime::RemoteObject> messageArg =
protocol::Runtime::RemoteObject::create()
diff --git a/deps/v8/src/inspector/v8-console-message.h b/deps/v8/src/inspector/v8-console-message.h
index cca5b47265..04bd10ff73 100644
--- a/deps/v8/src/inspector/v8-console-message.h
+++ b/deps/v8/src/inspector/v8-console-message.h
@@ -7,7 +7,9 @@
#include <deque>
#include <map>
+#include <memory>
#include <set>
+
#include "include/v8.h"
#include "src/inspector/protocol/Console.h"
#include "src/inspector/protocol/Forward.h"
diff --git a/deps/v8/src/inspector/v8-console.cc b/deps/v8/src/inspector/v8-console.cc
index 0f476f2316..f4d0ffa055 100644
--- a/deps/v8/src/inspector/v8-console.cc
+++ b/deps/v8/src/inspector/v8-console.cc
@@ -691,7 +691,7 @@ v8::Local<v8::Object> V8Console::createCommandLineAPI(
v8::Local<v8::ArrayBuffer> data =
v8::ArrayBuffer::New(isolate, sizeof(CommandLineAPIData));
- *static_cast<CommandLineAPIData*>(data->GetContents().Data()) =
+ *static_cast<CommandLineAPIData*>(data->GetBackingStore()->Data()) =
CommandLineAPIData(this, sessionId);
createBoundFunctionProperty(context, commandLineAPI, data, "dir",
&V8Console::call<&V8Console::Dir>,
diff --git a/deps/v8/src/inspector/v8-console.h b/deps/v8/src/inspector/v8-console.h
index 03d89ced10..4d38c51a2a 100644
--- a/deps/v8/src/inspector/v8-console.h
+++ b/deps/v8/src/inspector/v8-console.h
@@ -106,14 +106,14 @@ class V8Console : public v8::debug::ConsoleDelegate {
int)>
static void call(const v8::FunctionCallbackInfo<v8::Value>& info) {
CommandLineAPIData* data = static_cast<CommandLineAPIData*>(
- info.Data().As<v8::ArrayBuffer>()->GetContents().Data());
+ info.Data().As<v8::ArrayBuffer>()->GetBackingStore()->Data());
(data->first->*func)(info, data->second);
}
template <void (V8Console::*func)(const v8::debug::ConsoleCallArguments&,
const v8::debug::ConsoleContext&)>
static void call(const v8::FunctionCallbackInfo<v8::Value>& info) {
CommandLineAPIData* data = static_cast<CommandLineAPIData*>(
- info.Data().As<v8::ArrayBuffer>()->GetContents().Data());
+ info.Data().As<v8::ArrayBuffer>()->GetBackingStore()->Data());
v8::debug::ConsoleCallArguments args(info);
(data->first->*func)(args, v8::debug::ConsoleContext());
}
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.cc b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
index e5458823ea..18bf43fbbc 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
@@ -262,7 +262,7 @@ String16 scopeType(v8::debug::ScopeIterator::ScopeType type) {
Response buildScopes(v8::Isolate* isolate, v8::debug::ScopeIterator* iterator,
InjectedScript* injectedScript,
std::unique_ptr<Array<Scope>>* scopes) {
- *scopes = v8::base::make_unique<Array<Scope>>();
+ *scopes = std::make_unique<Array<Scope>>();
if (!injectedScript) return Response::OK();
if (iterator->Done()) return Response::OK();
@@ -353,8 +353,8 @@ Response V8DebuggerAgentImpl::enable(Maybe<double> maxScriptsCacheSize,
String16* outDebuggerId) {
m_maxScriptCacheSize = v8::base::saturated_cast<size_t>(
maxScriptsCacheSize.fromMaybe(std::numeric_limits<double>::max()));
- *outDebuggerId = debuggerIdToString(
- m_debugger->debuggerIdFor(m_session->contextGroupId()));
+ *outDebuggerId =
+ m_debugger->debuggerIdFor(m_session->contextGroupId()).toString();
if (enabled()) return Response::OK();
if (!m_inspector->client()->canExecuteScripts(m_session->contextGroupId()))
@@ -472,7 +472,7 @@ Response V8DebuggerAgentImpl::setBreakpointByUrl(
Maybe<int> optionalColumnNumber, Maybe<String16> optionalCondition,
String16* outBreakpointId,
std::unique_ptr<protocol::Array<protocol::Debugger::Location>>* locations) {
- *locations = v8::base::make_unique<Array<protocol::Debugger::Location>>();
+ *locations = std::make_unique<Array<protocol::Debugger::Location>>();
int specified = (optionalURL.isJust() ? 1 : 0) +
(optionalURLRegex.isJust() ? 1 : 0) +
@@ -708,8 +708,8 @@ Response V8DebuggerAgentImpl::getPossibleBreakpoints(
v8Start, v8End, restrictToFunction.fromMaybe(false), &v8Locations);
}
- *locations = v8::base::make_unique<
- protocol::Array<protocol::Debugger::BreakLocation>>();
+ *locations =
+ std::make_unique<protocol::Array<protocol::Debugger::BreakLocation>>();
for (size_t i = 0; i < v8Locations.size(); ++i) {
std::unique_ptr<protocol::Debugger::BreakLocation> breakLocation =
protocol::Debugger::BreakLocation::create()
@@ -752,17 +752,19 @@ Response V8DebuggerAgentImpl::getStackTrace(
std::unique_ptr<protocol::Runtime::StackTrace>* outStackTrace) {
bool isOk = false;
int64_t id = inStackTraceId->getId().toInteger64(&isOk);
- std::pair<int64_t, int64_t> debuggerId;
+ if (!isOk) return Response::Error("Invalid stack trace id");
+
+ V8DebuggerId debuggerId;
if (inStackTraceId->hasDebuggerId()) {
- debuggerId =
- m_debugger->debuggerIdFor(inStackTraceId->getDebuggerId(String16()));
+ debuggerId = V8DebuggerId(inStackTraceId->getDebuggerId(String16()));
} else {
debuggerId = m_debugger->debuggerIdFor(m_session->contextGroupId());
}
- V8StackTraceId v8StackTraceId(id, debuggerId);
- if (!isOk || v8StackTraceId.IsInvalid()) {
+ if (!debuggerId.isValid()) return Response::Error("Invalid stack trace id");
+
+ V8StackTraceId v8StackTraceId(id, debuggerId.pair());
+ if (v8StackTraceId.IsInvalid())
return Response::Error("Invalid stack trace id");
- }
auto stack =
m_debugger->stackTraceFor(m_session->contextGroupId(), v8StackTraceId);
if (!stack) {
@@ -872,11 +874,10 @@ Response V8DebuggerAgentImpl::searchInContent(
if (it == m_scripts.end())
return Response::Error("No script for id: " + scriptId);
- *results =
- v8::base::make_unique<protocol::Array<protocol::Debugger::SearchMatch>>(
- searchInTextByLinesImpl(m_session, it->second->source(0), query,
- optionalCaseSensitive.fromMaybe(false),
- optionalIsRegex.fromMaybe(false)));
+ *results = std::make_unique<protocol::Array<protocol::Debugger::SearchMatch>>(
+ searchInTextByLinesImpl(m_session, it->second->source(0), query,
+ optionalCaseSensitive.fromMaybe(false),
+ optionalIsRegex.fromMaybe(false)));
return Response::OK();
}
@@ -961,6 +962,20 @@ Response V8DebuggerAgentImpl::getScriptSource(const String16& scriptId,
return Response::OK();
}
+Response V8DebuggerAgentImpl::getWasmBytecode(const String16& scriptId,
+ protocol::Binary* bytecode) {
+ if (!enabled()) return Response::Error(kDebuggerNotEnabled);
+ ScriptsMap::iterator it = m_scripts.find(scriptId);
+ if (it == m_scripts.end())
+ return Response::Error("No script for id: " + scriptId);
+ v8::MemorySpan<const uint8_t> span;
+ if (!it->second->wasmBytecode().To(&span))
+ return Response::Error("Script with id " + scriptId +
+ " is not WebAssembly");
+ *bytecode = protocol::Binary::fromSpan(span.data(), span.size());
+ return Response::OK();
+}
+
void V8DebuggerAgentImpl::pushBreakDetails(
const String16& breakReason,
std::unique_ptr<protocol::DictionaryValue> breakAuxData) {
@@ -1040,13 +1055,7 @@ Response V8DebuggerAgentImpl::stepOut() {
Response V8DebuggerAgentImpl::pauseOnAsyncCall(
std::unique_ptr<protocol::Runtime::StackTraceId> inParentStackTraceId) {
- bool isOk = false;
- int64_t stackTraceId = inParentStackTraceId->getId().toInteger64(&isOk);
- if (!isOk) {
- return Response::Error("Invalid stack trace id");
- }
- m_debugger->pauseOnAsyncCall(m_session->contextGroupId(), stackTraceId,
- inParentStackTraceId->getDebuggerId(String16()));
+ // Deprecated, just return OK.
return Response::OK();
}
@@ -1270,11 +1279,11 @@ Response V8DebuggerAgentImpl::setBlackboxedRanges(
Response V8DebuggerAgentImpl::currentCallFrames(
std::unique_ptr<Array<CallFrame>>* result) {
if (!isPaused()) {
- *result = v8::base::make_unique<Array<CallFrame>>();
+ *result = std::make_unique<Array<CallFrame>>();
return Response::OK();
}
v8::HandleScope handles(m_isolate);
- *result = v8::base::make_unique<Array<CallFrame>>();
+ *result = std::make_unique<Array<CallFrame>>();
auto iterator = v8::debug::StackTraceIterator::Create(m_isolate);
int frameOrdinal = 0;
for (; !iterator->Done(); iterator->Advance(), frameOrdinal++) {
@@ -1373,28 +1382,10 @@ V8DebuggerAgentImpl::currentExternalStackTrace() {
if (externalParent.IsInvalid()) return nullptr;
return protocol::Runtime::StackTraceId::create()
.setId(stackTraceIdToString(externalParent.id))
- .setDebuggerId(debuggerIdToString(externalParent.debugger_id))
+ .setDebuggerId(V8DebuggerId(externalParent.debugger_id).toString())
.build();
}
-std::unique_ptr<protocol::Runtime::StackTraceId>
-V8DebuggerAgentImpl::currentScheduledAsyncCall() {
- v8_inspector::V8StackTraceId scheduledAsyncCall =
- m_debugger->scheduledAsyncCall();
- if (scheduledAsyncCall.IsInvalid()) return nullptr;
- std::unique_ptr<protocol::Runtime::StackTraceId> asyncCallStackTrace =
- protocol::Runtime::StackTraceId::create()
- .setId(stackTraceIdToString(scheduledAsyncCall.id))
- .build();
- // TODO(kozyatinskiy): extract this check to IsLocal function.
- if (scheduledAsyncCall.debugger_id.first ||
- scheduledAsyncCall.debugger_id.second) {
- asyncCallStackTrace->setDebuggerId(
- debuggerIdToString(scheduledAsyncCall.debugger_id));
- }
- return asyncCallStackTrace;
-}
-
bool V8DebuggerAgentImpl::isPaused() const {
return m_debugger->isPausedInContextGroup(m_session->contextGroupId());
}
@@ -1602,7 +1593,7 @@ void V8DebuggerAgentImpl::didPause(
}
}
- auto hitBreakpointIds = v8::base::make_unique<Array<String16>>();
+ auto hitBreakpointIds = std::make_unique<Array<String16>>();
for (const auto& id : hitBreakpoints) {
auto it = m_breakpointsOnScriptRun.find(id);
@@ -1655,12 +1646,11 @@ void V8DebuggerAgentImpl::didPause(
std::unique_ptr<Array<CallFrame>> protocolCallFrames;
Response response = currentCallFrames(&protocolCallFrames);
if (!response.isSuccess())
- protocolCallFrames = v8::base::make_unique<Array<CallFrame>>();
+ protocolCallFrames = std::make_unique<Array<CallFrame>>();
m_frontend.paused(std::move(protocolCallFrames), breakReason,
std::move(breakAuxData), std::move(hitBreakpointIds),
- currentAsyncStackTrace(), currentExternalStackTrace(),
- currentScheduledAsyncCall());
+ currentAsyncStackTrace(), currentExternalStackTrace());
}
void V8DebuggerAgentImpl::didContinue() {
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.h b/deps/v8/src/inspector/v8-debugger-agent-impl.h
index 0a5a169907..e6b35b845a 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.h
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.h
@@ -6,6 +6,7 @@
#define V8_INSPECTOR_V8_DEBUGGER_AGENT_IMPL_H_
#include <deque>
+#include <memory>
#include <unordered_map>
#include <vector>
@@ -94,6 +95,8 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
Maybe<protocol::Runtime::StackTraceId>* asyncStackTraceId) override;
Response getScriptSource(const String16& scriptId,
String16* scriptSource) override;
+ Response getWasmBytecode(const String16& scriptId,
+ protocol::Binary* bytecode) override;
Response pause() override;
Response resume() override;
Response stepOver() override;
@@ -165,7 +168,6 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
std::unique_ptr<protocol::Array<protocol::Debugger::CallFrame>>*);
std::unique_ptr<protocol::Runtime::StackTrace> currentAsyncStackTrace();
std::unique_ptr<protocol::Runtime::StackTraceId> currentExternalStackTrace();
- std::unique_ptr<protocol::Runtime::StackTraceId> currentScheduledAsyncCall();
void setPauseOnExceptionsImpl(int);
diff --git a/deps/v8/src/inspector/v8-debugger-script.cc b/deps/v8/src/inspector/v8-debugger-script.cc
index b83eafc96a..99511fc144 100644
--- a/deps/v8/src/inspector/v8-debugger-script.cc
+++ b/deps/v8/src/inspector/v8-debugger-script.cc
@@ -141,6 +141,12 @@ class ActualScript : public V8DebuggerScript {
static_cast<int>(pos), static_cast<int>(substringLength));
return String16(buffer.get(), substringLength);
}
+ v8::Maybe<v8::MemorySpan<const uint8_t>> wasmBytecode() const override {
+ v8::HandleScope scope(m_isolate);
+ auto script = this->script();
+ if (!script->IsWasm()) return v8::Nothing<v8::MemorySpan<const uint8_t>>();
+ return v8::Just(v8::debug::WasmScript::Cast(*script)->Bytecode());
+ }
int startLine() const override { return m_startLine; }
int startColumn() const override { return m_startColumn; }
int endLine() const override { return m_endLine; }
@@ -281,9 +287,8 @@ class ActualScript : public V8DebuggerScript {
m_startLine = script->LineOffset();
m_startColumn = script->ColumnOffset();
std::vector<int> lineEnds = script->LineEnds();
- CHECK(lineEnds.size());
- int source_length = lineEnds[lineEnds.size() - 1];
if (lineEnds.size()) {
+ int source_length = lineEnds[lineEnds.size() - 1];
m_endLine = static_cast<int>(lineEnds.size()) + m_startLine - 1;
if (lineEnds.size() > 1) {
m_endColumn = source_length - lineEnds[lineEnds.size() - 2] - 1;
@@ -356,6 +361,9 @@ class WasmVirtualScript : public V8DebuggerScript {
return m_wasmTranslation->GetSource(m_id, m_functionIndex)
.substring(pos, len);
}
+ v8::Maybe<v8::MemorySpan<const uint8_t>> wasmBytecode() const override {
+ return v8::Nothing<v8::MemorySpan<const uint8_t>>();
+ }
int startLine() const override {
return m_wasmTranslation->GetStartLine(m_id, m_functionIndex);
}
@@ -462,17 +470,17 @@ class WasmVirtualScript : public V8DebuggerScript {
std::unique_ptr<V8DebuggerScript> V8DebuggerScript::Create(
v8::Isolate* isolate, v8::Local<v8::debug::Script> scriptObj,
bool isLiveEdit, V8DebuggerAgentImpl* agent, V8InspectorClient* client) {
- return v8::base::make_unique<ActualScript>(isolate, scriptObj, isLiveEdit,
- agent, client);
+ return std::make_unique<ActualScript>(isolate, scriptObj, isLiveEdit, agent,
+ client);
}
std::unique_ptr<V8DebuggerScript> V8DebuggerScript::CreateWasm(
v8::Isolate* isolate, WasmTranslation* wasmTranslation,
v8::Local<v8::debug::WasmScript> underlyingScript, String16 id,
String16 url, int functionIndex) {
- return v8::base::make_unique<WasmVirtualScript>(
- isolate, wasmTranslation, underlyingScript, std::move(id), std::move(url),
- functionIndex);
+ return std::make_unique<WasmVirtualScript>(isolate, wasmTranslation,
+ underlyingScript, std::move(id),
+ std::move(url), functionIndex);
}
V8DebuggerScript::V8DebuggerScript(v8::Isolate* isolate, String16 id,
diff --git a/deps/v8/src/inspector/v8-debugger-script.h b/deps/v8/src/inspector/v8-debugger-script.h
index 547bb0a2cc..b53d2c15aa 100644
--- a/deps/v8/src/inspector/v8-debugger-script.h
+++ b/deps/v8/src/inspector/v8-debugger-script.h
@@ -30,6 +30,8 @@
#ifndef V8_INSPECTOR_V8_DEBUGGER_SCRIPT_H_
#define V8_INSPECTOR_V8_DEBUGGER_SCRIPT_H_
+#include <memory>
+
#include "src/base/macros.h"
#include "src/inspector/string-16.h"
#include "src/inspector/string-util.h"
@@ -61,6 +63,7 @@ class V8DebuggerScript {
virtual const String16& sourceMappingURL() const = 0;
virtual String16 source(size_t pos, size_t len = UINT_MAX) const = 0;
+ virtual v8::Maybe<v8::MemorySpan<const uint8_t>> wasmBytecode() const = 0;
virtual const String16& hash() const = 0;
virtual int startLine() const = 0;
virtual int startColumn() const = 0;
diff --git a/deps/v8/src/inspector/v8-debugger.cc b/deps/v8/src/inspector/v8-debugger.cc
index 5ddc375a80..bd127b2c1c 100644
--- a/deps/v8/src/inspector/v8-debugger.cc
+++ b/deps/v8/src/inspector/v8-debugger.cc
@@ -64,6 +64,42 @@ class MatchPrototypePredicate : public v8::debug::QueryObjectPredicate {
} // namespace
+V8DebuggerId::V8DebuggerId(std::pair<int64_t, int64_t> pair)
+ : m_first(pair.first), m_second(pair.second) {}
+
+// static
+V8DebuggerId V8DebuggerId::generate(v8::Isolate* isolate) {
+ V8DebuggerId debuggerId;
+ debuggerId.m_first = v8::debug::GetNextRandomInt64(isolate);
+ debuggerId.m_second = v8::debug::GetNextRandomInt64(isolate);
+ if (!debuggerId.m_first && !debuggerId.m_second) ++debuggerId.m_first;
+ return debuggerId;
+}
+
+V8DebuggerId::V8DebuggerId(const String16& debuggerId) {
+ const UChar dot = '.';
+ size_t pos = debuggerId.find(dot);
+ if (pos == String16::kNotFound) return;
+ bool ok = false;
+ int64_t first = debuggerId.substring(0, pos).toInteger64(&ok);
+ if (!ok) return;
+ int64_t second = debuggerId.substring(pos + 1).toInteger64(&ok);
+ if (!ok) return;
+ m_first = first;
+ m_second = second;
+}
+
+String16 V8DebuggerId::toString() const {
+ return String16::fromInteger64(m_first) + "." +
+ String16::fromInteger64(m_second);
+}
+
+bool V8DebuggerId::isValid() const { return m_first || m_second; }
+
+std::pair<int64_t, int64_t> V8DebuggerId::pair() const {
+ return std::make_pair(m_first, m_second);
+}
+
V8Debugger::V8Debugger(v8::Isolate* isolate, V8InspectorImpl* inspector)
: m_isolate(isolate),
m_inspector(inspector),
@@ -107,7 +143,9 @@ void V8Debugger::disable() {
if (--m_enableCount) return;
clearContinueToLocation();
m_taskWithScheduledBreak = nullptr;
- m_taskWithScheduledBreakDebuggerId = String16();
+ m_externalAsyncTaskPauseRequested = false;
+ m_taskWithScheduledBreakPauseRequested = false;
+ m_pauseOnNextCallRequested = false;
m_pauseOnAsyncCall = false;
m_wasmTranslation.Clear();
v8::debug::SetDebugDelegate(m_isolate, nullptr);
@@ -171,12 +209,19 @@ void V8Debugger::setPauseOnNextCall(bool pause, int targetContextGroupId) {
m_targetContextGroupId != targetContextGroupId) {
return;
}
- m_targetContextGroupId = targetContextGroupId;
- m_breakRequested = pause;
- if (pause)
- v8::debug::SetBreakOnNextFunctionCall(m_isolate);
- else
- v8::debug::ClearBreakOnNextFunctionCall(m_isolate);
+ if (pause) {
+ bool didHaveBreak = hasScheduledBreakOnNextFunctionCall();
+ m_pauseOnNextCallRequested = true;
+ if (!didHaveBreak) {
+ m_targetContextGroupId = targetContextGroupId;
+ v8::debug::SetBreakOnNextFunctionCall(m_isolate);
+ }
+ } else {
+ m_pauseOnNextCallRequested = false;
+ if (!hasScheduledBreakOnNextFunctionCall()) {
+ v8::debug::ClearBreakOnNextFunctionCall(m_isolate);
+ }
+ }
}
bool V8Debugger::canBreakProgram() {
@@ -275,21 +320,12 @@ bool V8Debugger::asyncStepOutOfFunction(int targetContextGroupId,
void* parentTask =
std::shared_ptr<AsyncStackTrace>(parent)->suspendedTaskId();
if (!parentTask) return false;
- pauseOnAsyncCall(targetContextGroupId,
- reinterpret_cast<uintptr_t>(parentTask), String16());
+ m_targetContextGroupId = targetContextGroupId;
+ m_taskWithScheduledBreak = parentTask;
continueProgram(targetContextGroupId);
return true;
}
-void V8Debugger::pauseOnAsyncCall(int targetContextGroupId, uintptr_t task,
- const String16& debuggerId) {
- DCHECK(targetContextGroupId);
- m_targetContextGroupId = targetContextGroupId;
-
- m_taskWithScheduledBreak = reinterpret_cast<void*>(task);
- m_taskWithScheduledBreakDebuggerId = debuggerId;
-}
-
void V8Debugger::terminateExecution(
std::unique_ptr<TerminateExecutionCallback> callback) {
if (m_terminateExecutionCallback) {
@@ -390,10 +426,11 @@ void V8Debugger::handleProgramBreak(
return;
}
m_targetContextGroupId = 0;
- m_breakRequested = false;
+ m_pauseOnNextCallRequested = false;
m_pauseOnAsyncCall = false;
m_taskWithScheduledBreak = nullptr;
- m_taskWithScheduledBreakDebuggerId = String16();
+ m_externalAsyncTaskPauseRequested = false;
+ m_taskWithScheduledBreakPauseRequested = false;
bool scheduledOOMBreak = m_scheduledOOMBreak;
bool scheduledAssertBreak = m_scheduledAssertBreak;
@@ -470,31 +507,30 @@ size_t V8Debugger::nearHeapLimitCallback(void* data, size_t current_heap_limit,
void V8Debugger::ScriptCompiled(v8::Local<v8::debug::Script> script,
bool is_live_edited, bool has_compile_error) {
+ if (m_ignoreScriptParsedEventsCounter != 0) return;
+
int contextId;
if (!script->ContextId().To(&contextId)) return;
- if (script->IsWasm() && script->SourceMappingURL().IsEmpty()) {
- WasmTranslation* wasmTranslation = &m_wasmTranslation;
- m_inspector->forEachSession(
- m_inspector->contextGroupId(contextId),
- [&script, &wasmTranslation](V8InspectorSessionImpl* session) {
- if (!session->debuggerAgent()->enabled()) return;
- wasmTranslation->AddScript(script.As<v8::debug::WasmScript>(),
- session->debuggerAgent());
- });
- } else if (m_ignoreScriptParsedEventsCounter == 0) {
- v8::Isolate* isolate = m_isolate;
- V8InspectorClient* client = m_inspector->client();
- m_inspector->forEachSession(
- m_inspector->contextGroupId(contextId),
- [&isolate, &script, &has_compile_error, &is_live_edited,
- &client](V8InspectorSessionImpl* session) {
- if (!session->debuggerAgent()->enabled()) return;
- session->debuggerAgent()->didParseSource(
- V8DebuggerScript::Create(isolate, script, is_live_edited,
- session->debuggerAgent(), client),
+
+ v8::Isolate* isolate = m_isolate;
+ V8InspectorClient* client = m_inspector->client();
+ WasmTranslation& wasmTranslation = m_wasmTranslation;
+
+ m_inspector->forEachSession(
+ m_inspector->contextGroupId(contextId),
+ [isolate, &script, has_compile_error, is_live_edited, client,
+ &wasmTranslation](V8InspectorSessionImpl* session) {
+ auto agent = session->debuggerAgent();
+ if (!agent->enabled()) return;
+ if (script->IsWasm() && script->SourceMappingURL().IsEmpty()) {
+ wasmTranslation.AddScript(script.As<v8::debug::WasmScript>(), agent);
+ } else {
+ agent->didParseSource(
+ V8DebuggerScript::Create(isolate, script, is_live_edited, agent,
+ client),
!has_compile_error);
- });
- }
+ }
+ });
}
void V8Debugger::BreakProgramRequested(
@@ -540,15 +576,15 @@ void V8Debugger::AsyncEventOccurred(v8::debug::DebugAsyncActionType type,
switch (type) {
case v8::debug::kDebugPromiseThen:
asyncTaskScheduledForStack("Promise.then", task, false);
- if (!isBlackboxed) asyncTaskCandidateForStepping(task, true);
+ if (!isBlackboxed) asyncTaskCandidateForStepping(task);
break;
case v8::debug::kDebugPromiseCatch:
asyncTaskScheduledForStack("Promise.catch", task, false);
- if (!isBlackboxed) asyncTaskCandidateForStepping(task, true);
+ if (!isBlackboxed) asyncTaskCandidateForStepping(task);
break;
case v8::debug::kDebugPromiseFinally:
asyncTaskScheduledForStack("Promise.finally", task, false);
- if (!isBlackboxed) asyncTaskCandidateForStepping(task, true);
+ if (!isBlackboxed) asyncTaskCandidateForStepping(task);
break;
case v8::debug::kDebugWillHandle:
asyncTaskStartedForStack(task);
@@ -786,7 +822,7 @@ void V8Debugger::setAsyncCallStackDepth(V8DebuggerAgentImpl* agent, int depth) {
std::shared_ptr<AsyncStackTrace> V8Debugger::stackTraceFor(
int contextGroupId, const V8StackTraceId& id) {
- if (debuggerIdFor(contextGroupId) != id.debugger_id) return nullptr;
+ if (debuggerIdFor(contextGroupId).pair() != id.debugger_id) return nullptr;
auto it = m_storedStackTraces.find(id.id);
if (it == m_storedStackTraces.end()) return nullptr;
return it->second.lock();
@@ -811,9 +847,13 @@ V8StackTraceId V8Debugger::storeCurrentStackTrace(
++m_asyncStacksCount;
collectOldAsyncStacksIfNeeded();
- asyncTaskCandidateForStepping(reinterpret_cast<void*>(id), false);
-
- return V8StackTraceId(id, debuggerIdFor(contextGroupId));
+ bool shouldPause =
+ m_pauseOnAsyncCall && contextGroupId == m_targetContextGroupId;
+ if (shouldPause) {
+ m_pauseOnAsyncCall = false;
+ v8::debug::ClearStepping(m_isolate); // Cancel step into.
+ }
+ return V8StackTraceId(id, debuggerIdFor(contextGroupId).pair(), shouldPause);
}
uintptr_t V8Debugger::storeStackTrace(
@@ -829,13 +869,12 @@ void V8Debugger::externalAsyncTaskStarted(const V8StackTraceId& parent) {
m_currentAsyncParent.emplace_back();
m_currentTasks.push_back(reinterpret_cast<void*>(parent.id));
- if (m_breakRequested) return;
- if (!m_taskWithScheduledBreakDebuggerId.isEmpty() &&
- reinterpret_cast<uintptr_t>(m_taskWithScheduledBreak) == parent.id &&
- m_taskWithScheduledBreakDebuggerId ==
- debuggerIdToString(parent.debugger_id)) {
- v8::debug::SetBreakOnNextFunctionCall(m_isolate);
- }
+ if (!parent.should_pause) return;
+ bool didHaveBreak = hasScheduledBreakOnNextFunctionCall();
+ m_externalAsyncTaskPauseRequested = true;
+ if (didHaveBreak) return;
+ m_targetContextGroupId = currentContextGroupId();
+ v8::debug::SetBreakOnNextFunctionCall(m_isolate);
}
void V8Debugger::externalAsyncTaskFinished(const V8StackTraceId& parent) {
@@ -845,22 +884,16 @@ void V8Debugger::externalAsyncTaskFinished(const V8StackTraceId& parent) {
DCHECK(m_currentTasks.back() == reinterpret_cast<void*>(parent.id));
m_currentTasks.pop_back();
- if (m_taskWithScheduledBreakDebuggerId.isEmpty() ||
- reinterpret_cast<uintptr_t>(m_taskWithScheduledBreak) != parent.id ||
- m_taskWithScheduledBreakDebuggerId !=
- debuggerIdToString(parent.debugger_id)) {
- return;
- }
- m_taskWithScheduledBreak = nullptr;
- m_taskWithScheduledBreakDebuggerId = String16();
- if (m_breakRequested) return;
+ if (!parent.should_pause) return;
+ m_externalAsyncTaskPauseRequested = false;
+ if (hasScheduledBreakOnNextFunctionCall()) return;
v8::debug::ClearBreakOnNextFunctionCall(m_isolate);
}
void V8Debugger::asyncTaskScheduled(const StringView& taskName, void* task,
bool recurring) {
asyncTaskScheduledForStack(toString16(taskName), task, recurring);
- asyncTaskCandidateForStepping(task, true);
+ asyncTaskCandidateForStepping(task);
}
void V8Debugger::asyncTaskCanceled(void* task) {
@@ -936,46 +969,36 @@ void V8Debugger::asyncTaskFinishedForStack(void* task) {
}
}
-void V8Debugger::asyncTaskCandidateForStepping(void* task, bool isLocal) {
+void V8Debugger::asyncTaskCandidateForStepping(void* task) {
if (!m_pauseOnAsyncCall) return;
int contextGroupId = currentContextGroupId();
if (contextGroupId != m_targetContextGroupId) return;
- if (isLocal) {
- m_scheduledAsyncCall = v8_inspector::V8StackTraceId(
- reinterpret_cast<uintptr_t>(task), std::make_pair(0, 0));
- } else {
- m_scheduledAsyncCall = v8_inspector::V8StackTraceId(
- reinterpret_cast<uintptr_t>(task), debuggerIdFor(contextGroupId));
- }
- breakProgram(m_targetContextGroupId);
- m_scheduledAsyncCall = v8_inspector::V8StackTraceId();
+ m_taskWithScheduledBreak = task;
+ m_pauseOnAsyncCall = false;
+ v8::debug::ClearStepping(m_isolate); // Cancel step into.
}
void V8Debugger::asyncTaskStartedForStepping(void* task) {
- if (m_breakRequested) return;
// TODO(kozyatinskiy): we should search task in async chain to support
// blackboxing.
- if (m_taskWithScheduledBreakDebuggerId.isEmpty() &&
- task == m_taskWithScheduledBreak) {
- v8::debug::SetBreakOnNextFunctionCall(m_isolate);
- }
+ if (task != m_taskWithScheduledBreak) return;
+ bool didHaveBreak = hasScheduledBreakOnNextFunctionCall();
+ m_taskWithScheduledBreakPauseRequested = true;
+ if (didHaveBreak) return;
+ m_targetContextGroupId = currentContextGroupId();
+ v8::debug::SetBreakOnNextFunctionCall(m_isolate);
}
void V8Debugger::asyncTaskFinishedForStepping(void* task) {
- if (!m_taskWithScheduledBreakDebuggerId.isEmpty() ||
- task != m_taskWithScheduledBreak) {
- return;
- }
+ if (task != m_taskWithScheduledBreak) return;
m_taskWithScheduledBreak = nullptr;
- if (m_breakRequested) return;
+ m_taskWithScheduledBreakPauseRequested = false;
+ if (hasScheduledBreakOnNextFunctionCall()) return;
v8::debug::ClearBreakOnNextFunctionCall(m_isolate);
}
void V8Debugger::asyncTaskCanceledForStepping(void* task) {
- if (!m_taskWithScheduledBreakDebuggerId.isEmpty() ||
- task != m_taskWithScheduledBreak)
- return;
- m_taskWithScheduledBreak = nullptr;
+ asyncTaskFinishedForStepping(task);
}
void V8Debugger::allAsyncTasksCanceled() {
@@ -1058,7 +1081,7 @@ std::shared_ptr<StackFrame> V8Debugger::symbolize(
return std::shared_ptr<StackFrame>(it->second);
}
std::shared_ptr<StackFrame> frame(new StackFrame(isolate(), v8Frame));
- // TODO(clemensh): Figure out a way to do this translation only right before
+ // TODO(clemensb): Figure out a way to do this translation only right before
// sending the stack trace over wire.
if (v8Frame->IsWasm()) frame->translate(&m_wasmTranslation);
if (m_maxAsyncCallStackDepth) {
@@ -1073,27 +1096,15 @@ void V8Debugger::setMaxAsyncTaskStacksForTest(int limit) {
m_maxAsyncCallStacks = limit;
}
-std::pair<int64_t, int64_t> V8Debugger::debuggerIdFor(int contextGroupId) {
+V8DebuggerId V8Debugger::debuggerIdFor(int contextGroupId) {
auto it = m_contextGroupIdToDebuggerId.find(contextGroupId);
if (it != m_contextGroupIdToDebuggerId.end()) return it->second;
- std::pair<int64_t, int64_t> debuggerId(
- v8::debug::GetNextRandomInt64(m_isolate),
- v8::debug::GetNextRandomInt64(m_isolate));
- if (!debuggerId.first && !debuggerId.second) ++debuggerId.first;
+ V8DebuggerId debuggerId = V8DebuggerId::generate(m_isolate);
m_contextGroupIdToDebuggerId.insert(
it, std::make_pair(contextGroupId, debuggerId));
- m_serializedDebuggerIdToDebuggerId.insert(
- std::make_pair(debuggerIdToString(debuggerId), debuggerId));
return debuggerId;
}
-std::pair<int64_t, int64_t> V8Debugger::debuggerIdFor(
- const String16& serializedDebuggerId) {
- auto it = m_serializedDebuggerIdToDebuggerId.find(serializedDebuggerId);
- if (it != m_serializedDebuggerIdToDebuggerId.end()) return it->second;
- return std::make_pair(0, 0);
-}
-
bool V8Debugger::addInternalObject(v8::Local<v8::Context> context,
v8::Local<v8::Object> object,
V8InternalValueType type) {
@@ -1110,4 +1121,9 @@ void V8Debugger::dumpAsyncTaskStacksStateForTest() {
fprintf(stdout, "\n");
}
+bool V8Debugger::hasScheduledBreakOnNextFunctionCall() const {
+ return m_pauseOnNextCallRequested || m_taskWithScheduledBreakPauseRequested ||
+ m_externalAsyncTaskPauseRequested;
+}
+
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-debugger.h b/deps/v8/src/inspector/v8-debugger.h
index ba64c4c032..a078d14f3d 100644
--- a/deps/v8/src/inspector/v8-debugger.h
+++ b/deps/v8/src/inspector/v8-debugger.h
@@ -6,6 +6,7 @@
#define V8_INSPECTOR_V8_DEBUGGER_H_
#include <list>
+#include <memory>
#include <unordered_map>
#include <unordered_set>
#include <vector>
@@ -36,6 +37,31 @@ using protocol::Response;
using TerminateExecutionCallback =
protocol::Runtime::Backend::TerminateExecutionCallback;
+// This debugger id tries to be unique by generating two random
+// numbers, which should most likely avoid collisions.
+// Debugger id has a 1:1 mapping to context group. It is used to
+// attribute stack traces to a particular debugging, when doing any
+// cross-debugger operations (e.g. async step in).
+// See also Runtime.UniqueDebuggerId in the protocol.
+class V8DebuggerId {
+ public:
+ V8DebuggerId() = default;
+ explicit V8DebuggerId(std::pair<int64_t, int64_t>);
+ explicit V8DebuggerId(const String16&);
+ V8DebuggerId(const V8DebuggerId&) V8_NOEXCEPT = default;
+ ~V8DebuggerId() = default;
+
+ static V8DebuggerId generate(v8::Isolate*);
+
+ String16 toString() const;
+ bool isValid() const;
+ std::pair<int64_t, int64_t> pair() const;
+
+ private:
+ int64_t m_first = 0;
+ int64_t m_second = 0;
+};
+
class V8Debugger : public v8::debug::DebugDelegate,
public v8::debug::AsyncEventDelegate {
public:
@@ -59,8 +85,6 @@ class V8Debugger : public v8::debug::DebugDelegate,
void stepIntoStatement(int targetContextGroupId, bool breakOnAsyncCall);
void stepOverStatement(int targetContextGroupId);
void stepOutOfFunction(int targetContextGroupId);
- void pauseOnAsyncCall(int targetContextGroupId, uintptr_t task,
- const String16& debuggerId);
void terminateExecution(std::unique_ptr<TerminateExecutionCallback> callback);
@@ -121,13 +145,7 @@ class V8Debugger : public v8::debug::DebugDelegate,
void setMaxAsyncTaskStacksForTest(int limit);
void dumpAsyncTaskStacksStateForTest();
- v8_inspector::V8StackTraceId scheduledAsyncCall() {
- return m_scheduledAsyncCall;
- }
-
- std::pair<int64_t, int64_t> debuggerIdFor(int contextGroupId);
- std::pair<int64_t, int64_t> debuggerIdFor(
- const String16& serializedDebuggerId);
+ V8DebuggerId debuggerIdFor(int contextGroupId);
std::shared_ptr<AsyncStackTrace> stackTraceFor(int contextGroupId,
const V8StackTraceId& id);
@@ -173,7 +191,7 @@ class V8Debugger : public v8::debug::DebugDelegate,
void asyncTaskStartedForStack(void* task);
void asyncTaskFinishedForStack(void* task);
- void asyncTaskCandidateForStepping(void* task, bool isLocal);
+ void asyncTaskCandidateForStepping(void* task);
void asyncTaskStartedForStepping(void* task);
void asyncTaskFinishedForStepping(void* task);
void asyncTaskCanceledForStepping(void* task);
@@ -197,6 +215,8 @@ class V8Debugger : public v8::debug::DebugDelegate,
int currentContextGroupId();
bool asyncStepOutOfFunction(int targetContextGroupId, bool onlyAtReturn);
+ bool hasScheduledBreakOnNextFunctionCall() const;
+
v8::Isolate* m_isolate;
V8InspectorImpl* m_inspector;
int m_enableCount;
@@ -233,23 +253,24 @@ class V8Debugger : public v8::debug::DebugDelegate,
std::unordered_map<V8DebuggerAgentImpl*, int> m_maxAsyncCallStackDepthMap;
void* m_taskWithScheduledBreak = nullptr;
- String16 m_taskWithScheduledBreakDebuggerId;
- bool m_breakRequested = false;
+ // If any of the following three is true, we schedule pause on next JS
+ // execution using SetBreakOnNextFunctionCall.
+ bool m_externalAsyncTaskPauseRequested = false; // External async task.
+ bool m_taskWithScheduledBreakPauseRequested = false; // Local async task.
+ bool m_pauseOnNextCallRequested = false; // setPauseOnNextCall API call.
v8::debug::ExceptionBreakState m_pauseOnExceptionsState;
+ // Whether we should pause on async call execution (if any) while stepping in.
+ // See Debugger.stepInto for details.
bool m_pauseOnAsyncCall = false;
- v8_inspector::V8StackTraceId m_scheduledAsyncCall;
using StackTraceIdToStackTrace =
std::unordered_map<uintptr_t, std::weak_ptr<AsyncStackTrace>>;
StackTraceIdToStackTrace m_storedStackTraces;
uintptr_t m_lastStackTraceId = 0;
- std::unordered_map<int, std::pair<int64_t, int64_t>>
- m_contextGroupIdToDebuggerId;
- std::unordered_map<String16, std::pair<int64_t, int64_t>>
- m_serializedDebuggerIdToDebuggerId;
+ std::unordered_map<int, V8DebuggerId> m_contextGroupIdToDebuggerId;
std::unique_ptr<TerminateExecutionCallback> m_terminateExecutionCallback;
diff --git a/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc b/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc
index fcee8a6ef3..02aa1ad9fe 100644
--- a/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc
@@ -4,7 +4,6 @@
#include "src/inspector/v8-heap-profiler-agent-impl.h"
-#include "src/base/template-utils.h"
#include "src/inspector/injected-script.h"
#include "src/inspector/inspected-context.h"
#include "src/inspector/protocol/Protocol.h"
@@ -128,7 +127,7 @@ class HeapStatsStream final : public v8::OutputStream {
WriteResult WriteHeapStatsChunk(v8::HeapStatsUpdate* updateData,
int count) override {
DCHECK_GT(count, 0);
- auto statsDiff = v8::base::make_unique<protocol::Array<int>>();
+ auto statsDiff = std::make_unique<protocol::Array<int>>();
for (int i = 0; i < count; ++i) {
statsDiff->emplace_back(updateData[i].index);
statsDiff->emplace_back(updateData[i].count);
@@ -337,7 +336,7 @@ namespace {
std::unique_ptr<protocol::HeapProfiler::SamplingHeapProfileNode>
buildSampingHeapProfileNode(v8::Isolate* isolate,
const v8::AllocationProfile::Node* node) {
- auto children = v8::base::make_unique<
+ auto children = std::make_unique<
protocol::Array<protocol::HeapProfiler::SamplingHeapProfileNode>>();
for (const auto* child : node->children)
children->emplace_back(buildSampingHeapProfileNode(isolate, child));
@@ -384,7 +383,7 @@ Response V8HeapProfilerAgentImpl::getSamplingProfile(
if (!v8Profile)
return Response::Error("V8 sampling heap profiler was not started.");
v8::AllocationProfile::Node* root = v8Profile->GetRootNode();
- auto samples = v8::base::make_unique<
+ auto samples = std::make_unique<
protocol::Array<protocol::HeapProfiler::SamplingHeapProfileSample>>();
for (const auto& sample : v8Profile->GetSamples()) {
samples->emplace_back(
diff --git a/deps/v8/src/inspector/v8-heap-profiler-agent-impl.h b/deps/v8/src/inspector/v8-heap-profiler-agent-impl.h
index 5c2107d573..665e30be94 100644
--- a/deps/v8/src/inspector/v8-heap-profiler-agent-impl.h
+++ b/deps/v8/src/inspector/v8-heap-profiler-agent-impl.h
@@ -5,6 +5,8 @@
#ifndef V8_INSPECTOR_V8_HEAP_PROFILER_AGENT_IMPL_H_
#define V8_INSPECTOR_V8_HEAP_PROFILER_AGENT_IMPL_H_
+#include <memory>
+
#include "src/base/macros.h"
#include "src/inspector/protocol/Forward.h"
#include "src/inspector/protocol/HeapProfiler.h"
diff --git a/deps/v8/src/inspector/v8-inspector-impl.cc b/deps/v8/src/inspector/v8-inspector-impl.cc
index b764118079..e91dd7f7f4 100644
--- a/deps/v8/src/inspector/v8-inspector-impl.cc
+++ b/deps/v8/src/inspector/v8-inspector-impl.cc
@@ -439,7 +439,7 @@ protocol::Response V8InspectorImpl::EvaluateScope::setTimeout(double timeout) {
}
m_cancelToken.reset(new CancelToken());
v8::debug::GetCurrentPlatform()->CallDelayedOnWorkerThread(
- v8::base::make_unique<TerminateTask>(m_isolate, m_cancelToken), timeout);
+ std::make_unique<TerminateTask>(m_isolate, m_cancelToken), timeout);
return protocol::Response::OK();
}
diff --git a/deps/v8/src/inspector/v8-inspector-impl.h b/deps/v8/src/inspector/v8-inspector-impl.h
index 5b89cb0920..6276d6d7f6 100644
--- a/deps/v8/src/inspector/v8-inspector-impl.h
+++ b/deps/v8/src/inspector/v8-inspector-impl.h
@@ -33,6 +33,7 @@
#include <functional>
#include <map>
+#include <memory>
#include <unordered_map>
#include "src/base/macros.h"
diff --git a/deps/v8/src/inspector/v8-inspector-session-impl.h b/deps/v8/src/inspector/v8-inspector-session-impl.h
index 7a976bcd40..786dc2a048 100644
--- a/deps/v8/src/inspector/v8-inspector-session-impl.h
+++ b/deps/v8/src/inspector/v8-inspector-session-impl.h
@@ -5,6 +5,7 @@
#ifndef V8_INSPECTOR_V8_INSPECTOR_SESSION_IMPL_H_
#define V8_INSPECTOR_V8_INSPECTOR_SESSION_IMPL_H_
+#include <memory>
#include <vector>
#include "src/base/macros.h"
diff --git a/deps/v8/src/inspector/v8-profiler-agent-impl.cc b/deps/v8/src/inspector/v8-profiler-agent-impl.cc
index 3b02f7faa1..286a18a673 100644
--- a/deps/v8/src/inspector/v8-profiler-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-profiler-agent-impl.cc
@@ -44,8 +44,8 @@ std::unique_ptr<protocol::Array<protocol::Profiler::PositionTickInfo>>
buildInspectorObjectForPositionTicks(const v8::CpuProfileNode* node) {
unsigned lineCount = node->GetHitLineCount();
if (!lineCount) return nullptr;
- auto array = v8::base::make_unique<
- protocol::Array<protocol::Profiler::PositionTickInfo>>();
+ auto array =
+ std::make_unique<protocol::Array<protocol::Profiler::PositionTickInfo>>();
std::vector<v8::CpuProfileNode::LineTick> entries(lineCount);
if (node->GetLineTicks(&entries[0], lineCount)) {
for (unsigned i = 0; i < lineCount; i++) {
@@ -80,7 +80,7 @@ std::unique_ptr<protocol::Profiler::ProfileNode> buildInspectorObjectFor(
const int childrenCount = node->GetChildrenCount();
if (childrenCount) {
- auto children = v8::base::make_unique<protocol::Array<int>>();
+ auto children = std::make_unique<protocol::Array<int>>();
for (int i = 0; i < childrenCount; i++)
children->emplace_back(node->GetChild(i)->GetNodeId());
result->setChildren(std::move(children));
@@ -98,7 +98,7 @@ std::unique_ptr<protocol::Profiler::ProfileNode> buildInspectorObjectFor(
std::unique_ptr<protocol::Array<int>> buildInspectorObjectForSamples(
v8::CpuProfile* v8profile) {
- auto array = v8::base::make_unique<protocol::Array<int>>();
+ auto array = std::make_unique<protocol::Array<int>>();
int count = v8profile->GetSamplesCount();
for (int i = 0; i < count; i++)
array->emplace_back(v8profile->GetSample(i)->GetNodeId());
@@ -107,7 +107,7 @@ std::unique_ptr<protocol::Array<int>> buildInspectorObjectForSamples(
std::unique_ptr<protocol::Array<int>> buildInspectorObjectForTimestamps(
v8::CpuProfile* v8profile) {
- auto array = v8::base::make_unique<protocol::Array<int>>();
+ auto array = std::make_unique<protocol::Array<int>>();
int count = v8profile->GetSamplesCount();
uint64_t lastTime = v8profile->GetStartTime();
for (int i = 0; i < count; i++) {
@@ -130,7 +130,7 @@ void flattenNodesTree(V8InspectorImpl* inspector,
std::unique_ptr<protocol::Profiler::Profile> createCPUProfile(
V8InspectorImpl* inspector, v8::CpuProfile* v8profile) {
auto nodes =
- v8::base::make_unique<protocol::Array<protocol::Profiler::ProfileNode>>();
+ std::make_unique<protocol::Array<protocol::Profiler::ProfileNode>>();
flattenNodesTree(inspector, v8profile->GetTopDownRoot(), nodes.get());
return protocol::Profiler::Profile::create()
.setNodes(std::move(nodes))
@@ -338,18 +338,18 @@ Response coverageToProtocol(
V8InspectorImpl* inspector, const v8::debug::Coverage& coverage,
std::unique_ptr<protocol::Array<protocol::Profiler::ScriptCoverage>>*
out_result) {
- auto result = v8::base::make_unique<
- protocol::Array<protocol::Profiler::ScriptCoverage>>();
+ auto result =
+ std::make_unique<protocol::Array<protocol::Profiler::ScriptCoverage>>();
v8::Isolate* isolate = inspector->isolate();
for (size_t i = 0; i < coverage.ScriptCount(); i++) {
v8::debug::Coverage::ScriptData script_data = coverage.GetScriptData(i);
v8::Local<v8::debug::Script> script = script_data.GetScript();
- auto functions = v8::base::make_unique<
+ auto functions = std::make_unique<
protocol::Array<protocol::Profiler::FunctionCoverage>>();
for (size_t j = 0; j < script_data.FunctionCount(); j++) {
v8::debug::Coverage::FunctionData function_data =
script_data.GetFunctionData(j);
- auto ranges = v8::base::make_unique<
+ auto ranges = std::make_unique<
protocol::Array<protocol::Profiler::CoverageRange>>();
// Add function range.
@@ -418,19 +418,19 @@ namespace {
std::unique_ptr<protocol::Array<protocol::Profiler::ScriptTypeProfile>>
typeProfileToProtocol(V8InspectorImpl* inspector,
const v8::debug::TypeProfile& type_profile) {
- auto result = v8::base::make_unique<
+ auto result = std::make_unique<
protocol::Array<protocol::Profiler::ScriptTypeProfile>>();
v8::Isolate* isolate = inspector->isolate();
for (size_t i = 0; i < type_profile.ScriptCount(); i++) {
v8::debug::TypeProfile::ScriptData script_data =
type_profile.GetScriptData(i);
v8::Local<v8::debug::Script> script = script_data.GetScript();
- auto entries = v8::base::make_unique<
+ auto entries = std::make_unique<
protocol::Array<protocol::Profiler::TypeProfileEntry>>();
for (const auto& entry : script_data.Entries()) {
- auto types = v8::base::make_unique<
- protocol::Array<protocol::Profiler::TypeObject>>();
+ auto types =
+ std::make_unique<protocol::Array<protocol::Profiler::TypeObject>>();
for (const auto& type : entry.Types()) {
types->emplace_back(
protocol::Profiler::TypeObject::create()
diff --git a/deps/v8/src/inspector/v8-profiler-agent-impl.h b/deps/v8/src/inspector/v8-profiler-agent-impl.h
index 5370d39eb4..832d2ce139 100644
--- a/deps/v8/src/inspector/v8-profiler-agent-impl.h
+++ b/deps/v8/src/inspector/v8-profiler-agent-impl.h
@@ -5,6 +5,7 @@
#ifndef V8_INSPECTOR_V8_PROFILER_AGENT_IMPL_H_
#define V8_INSPECTOR_V8_PROFILER_AGENT_IMPL_H_
+#include <memory>
#include <vector>
#include "src/base/macros.h"
diff --git a/deps/v8/src/inspector/v8-runtime-agent-impl.cc b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
index a8aee0b7f3..4dfc210edc 100644
--- a/deps/v8/src/inspector/v8-runtime-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
@@ -235,7 +235,8 @@ void V8RuntimeAgentImpl::evaluate(
Maybe<int> executionContextId, Maybe<bool> returnByValue,
Maybe<bool> generatePreview, Maybe<bool> userGesture,
Maybe<bool> awaitPromise, Maybe<bool> throwOnSideEffect,
- Maybe<double> timeout, std::unique_ptr<EvaluateCallback> callback) {
+ Maybe<double> timeout, Maybe<bool> disableBreaks,
+ std::unique_ptr<EvaluateCallback> callback) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("devtools.timeline"),
"EvaluateScript");
int contextId = 0;
@@ -272,9 +273,16 @@ void V8RuntimeAgentImpl::evaluate(
}
v8::MicrotasksScope microtasksScope(m_inspector->isolate(),
v8::MicrotasksScope::kRunMicrotasks);
+ v8::debug::EvaluateGlobalMode mode =
+ v8::debug::EvaluateGlobalMode::kDefault;
+ if (throwOnSideEffect.fromMaybe(false)) {
+ mode = v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect;
+ } else if (disableBreaks.fromMaybe(false)) {
+ mode = v8::debug::EvaluateGlobalMode::kDisableBreaks;
+ }
maybeResultValue = v8::debug::EvaluateGlobal(
m_inspector->isolate(), toV8String(m_inspector->isolate(), expression),
- throwOnSideEffect.fromMaybe(false));
+ mode);
} // Run microtasks before returning result.
// Re-initialize after running client's code, as it could have destroyed
@@ -613,7 +621,7 @@ Response V8RuntimeAgentImpl::globalLexicalScopeNames(
v8::PersistentValueVector<v8::String> names(m_inspector->isolate());
v8::debug::GlobalLexicalScopeNames(scope.context(), &names);
- *outNames = v8::base::make_unique<protocol::Array<String16>>();
+ *outNames = std::make_unique<protocol::Array<String16>>();
for (size_t i = 0; i < names.Size(); ++i) {
(*outNames)->emplace_back(
toProtocolString(m_inspector->isolate(), names.Get(i)));
diff --git a/deps/v8/src/inspector/v8-runtime-agent-impl.h b/deps/v8/src/inspector/v8-runtime-agent-impl.h
index a2002e3660..7ecbafd611 100644
--- a/deps/v8/src/inspector/v8-runtime-agent-impl.h
+++ b/deps/v8/src/inspector/v8-runtime-agent-impl.h
@@ -31,6 +31,7 @@
#ifndef V8_INSPECTOR_V8_RUNTIME_AGENT_IMPL_H_
#define V8_INSPECTOR_V8_RUNTIME_AGENT_IMPL_H_
+#include <memory>
#include <unordered_map>
#include "src/base/macros.h"
@@ -66,7 +67,7 @@ class V8RuntimeAgentImpl : public protocol::Runtime::Backend {
Maybe<int> executionContextId, Maybe<bool> returnByValue,
Maybe<bool> generatePreview, Maybe<bool> userGesture,
Maybe<bool> awaitPromise, Maybe<bool> throwOnSideEffect,
- Maybe<double> timeout,
+ Maybe<double> timeout, Maybe<bool> disableBreaks,
std::unique_ptr<EvaluateCallback>) override;
void awaitPromise(const String16& promiseObjectId, Maybe<bool> returnByValue,
Maybe<bool> generatePreview,
diff --git a/deps/v8/src/inspector/v8-schema-agent-impl.cc b/deps/v8/src/inspector/v8-schema-agent-impl.cc
index 808f59b0bf..ae19416d1f 100644
--- a/deps/v8/src/inspector/v8-schema-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-schema-agent-impl.cc
@@ -4,7 +4,6 @@
#include "src/inspector/v8-schema-agent-impl.h"
-#include "src/base/template-utils.h"
#include "src/inspector/protocol/Protocol.h"
#include "src/inspector/v8-inspector-session-impl.h"
@@ -19,9 +18,9 @@ V8SchemaAgentImpl::~V8SchemaAgentImpl() = default;
Response V8SchemaAgentImpl::getDomains(
std::unique_ptr<protocol::Array<protocol::Schema::Domain>>* result) {
- *result = v8::base::make_unique<
- std::vector<std::unique_ptr<protocol::Schema::Domain>>>(
- m_session->supportedDomainsImpl());
+ *result =
+ std::make_unique<std::vector<std::unique_ptr<protocol::Schema::Domain>>>(
+ m_session->supportedDomainsImpl());
return Response::OK();
}
diff --git a/deps/v8/src/inspector/v8-schema-agent-impl.h b/deps/v8/src/inspector/v8-schema-agent-impl.h
index b96cce1401..1251e98bc5 100644
--- a/deps/v8/src/inspector/v8-schema-agent-impl.h
+++ b/deps/v8/src/inspector/v8-schema-agent-impl.h
@@ -5,6 +5,8 @@
#ifndef V8_INSPECTOR_V8_SCHEMA_AGENT_IMPL_H_
#define V8_INSPECTOR_V8_SCHEMA_AGENT_IMPL_H_
+#include <memory>
+
#include "src/base/macros.h"
#include "src/inspector/protocol/Forward.h"
#include "src/inspector/protocol/Schema.h"
diff --git a/deps/v8/src/inspector/v8-stack-trace-impl.cc b/deps/v8/src/inspector/v8-stack-trace-impl.cc
index e2be811069..04feca284c 100644
--- a/deps/v8/src/inspector/v8-stack-trace-impl.cc
+++ b/deps/v8/src/inspector/v8-stack-trace-impl.cc
@@ -6,7 +6,6 @@
#include <algorithm>
-#include "src/base/template-utils.h"
#include "src/inspector/v8-debugger.h"
#include "src/inspector/v8-inspector-impl.h"
#include "src/inspector/wasm-translation.h"
@@ -17,6 +16,10 @@ int V8StackTraceImpl::maxCallStackSizeToCapture = 200;
namespace {
+static const char kId[] = "id";
+static const char kDebuggerId[] = "debuggerId";
+static const char kShouldPause[] = "shouldPause";
+
static const v8::StackTrace::StackTraceOptions stackTraceOptions =
static_cast<v8::StackTrace::StackTraceOptions>(
v8::StackTrace::kDetailed |
@@ -74,7 +77,7 @@ std::unique_ptr<protocol::Runtime::StackTrace> buildInspectorObjectCommon(
}
auto inspectorFrames =
- v8::base::make_unique<protocol::Array<protocol::Runtime::CallFrame>>();
+ std::make_unique<protocol::Array<protocol::Runtime::CallFrame>>();
for (const std::shared_ptr<StackFrame>& frame : frames) {
V8InspectorClient* client = nullptr;
if (debugger && debugger->inspector())
@@ -102,7 +105,7 @@ std::unique_ptr<protocol::Runtime::StackTrace> buildInspectorObjectCommon(
stackTrace->setParentId(
protocol::Runtime::StackTraceId::create()
.setId(stackTraceIdToString(externalParent.id))
- .setDebuggerId(debuggerIdToString(externalParent.debugger_id))
+ .setDebuggerId(V8DebuggerId(externalParent.debugger_id).toString())
.build());
}
return stackTrace;
@@ -110,14 +113,47 @@ std::unique_ptr<protocol::Runtime::StackTrace> buildInspectorObjectCommon(
} // namespace
-V8StackTraceId::V8StackTraceId() : id(0), debugger_id(std::make_pair(0, 0)) {}
+V8StackTraceId::V8StackTraceId() : id(0), debugger_id(V8DebuggerId().pair()) {}
V8StackTraceId::V8StackTraceId(uintptr_t id,
const std::pair<int64_t, int64_t> debugger_id)
: id(id), debugger_id(debugger_id) {}
+V8StackTraceId::V8StackTraceId(uintptr_t id,
+ const std::pair<int64_t, int64_t> debugger_id,
+ bool should_pause)
+ : id(id), debugger_id(debugger_id), should_pause(should_pause) {}
+
+V8StackTraceId::V8StackTraceId(const StringView& json)
+ : id(0), debugger_id(V8DebuggerId().pair()) {
+ auto dict =
+ protocol::DictionaryValue::cast(protocol::StringUtil::parseJSON(json));
+ if (!dict) return;
+ String16 s;
+ if (!dict->getString(kId, &s)) return;
+ bool isOk = false;
+ int64_t parsedId = s.toInteger64(&isOk);
+ if (!isOk || !parsedId) return;
+ if (!dict->getString(kDebuggerId, &s)) return;
+ V8DebuggerId debuggerId(s);
+ if (!debuggerId.isValid()) return;
+ if (!dict->getBoolean(kShouldPause, &should_pause)) return;
+ id = parsedId;
+ debugger_id = debuggerId.pair();
+}
+
bool V8StackTraceId::IsInvalid() const { return !id; }
+std::unique_ptr<StringBuffer> V8StackTraceId::ToString() {
+ if (IsInvalid()) return nullptr;
+ auto dict = protocol::DictionaryValue::create();
+ dict->setString(kId, String16::fromInteger64(id));
+ dict->setString(kDebuggerId, V8DebuggerId(debugger_id).toString());
+ dict->setBoolean(kShouldPause, should_pause);
+ String16 json = dict->toJSONString();
+ return StringBufferImpl::adopt(json);
+}
+
StackFrame::StackFrame(v8::Isolate* isolate, v8::Local<v8::StackFrame> v8Frame)
: m_functionName(toProtocolString(isolate, v8Frame->GetFunctionName())),
m_scriptId(String16::fromInteger(v8Frame->GetScriptId())),
diff --git a/deps/v8/src/inspector/value-mirror.cc b/deps/v8/src/inspector/value-mirror.cc
index 9edfbc1a21..903a5c6b02 100644
--- a/deps/v8/src/inspector/value-mirror.cc
+++ b/deps/v8/src/inspector/value-mirror.cc
@@ -372,8 +372,7 @@ class PrimitiveValueMirror final : public ValueMirror {
.setType(m_type)
.setDescription(descriptionForPrimitiveType(context, m_value))
.setOverflow(false)
- .setProperties(
- v8::base::make_unique<protocol::Array<PropertyPreview>>())
+ .setProperties(std::make_unique<protocol::Array<PropertyPreview>>())
.build();
if (m_value->IsNull())
(*preview)->setSubtype(RemoteObject::SubtypeEnum::Null);
@@ -438,8 +437,7 @@ class NumberMirror final : public ValueMirror {
.setType(RemoteObject::TypeEnum::Number)
.setDescription(description(&unserializable))
.setOverflow(false)
- .setProperties(
- v8::base::make_unique<protocol::Array<PropertyPreview>>())
+ .setProperties(std::make_unique<protocol::Array<PropertyPreview>>())
.build();
}
@@ -496,8 +494,7 @@ class BigIntMirror final : public ValueMirror {
.setType(RemoteObject::TypeEnum::Bigint)
.setDescription(descriptionForBigInt(context, m_value))
.setOverflow(false)
- .setProperties(
- v8::base::make_unique<protocol::Array<PropertyPreview>>())
+ .setProperties(std::make_unique<protocol::Array<PropertyPreview>>())
.build();
}
@@ -656,8 +653,7 @@ class FunctionMirror final : public ValueMirror {
.setType(RemoteObject::TypeEnum::Function)
.setDescription(descriptionForFunction(context, m_value))
.setOverflow(false)
- .setProperties(
- v8::base::make_unique<protocol::Array<PropertyPreview>>())
+ .setProperties(std::make_unique<protocol::Array<PropertyPreview>>())
.build();
}
@@ -939,7 +935,7 @@ class ObjectMirror final : public ValueMirror {
v8::Local<v8::Context> context, bool forEntry,
bool generatePreviewForTable, int* nameLimit, int* indexLimit,
std::unique_ptr<ObjectPreview>* result) const {
- auto properties = v8::base::make_unique<protocol::Array<PropertyPreview>>();
+ auto properties = std::make_unique<protocol::Array<PropertyPreview>>();
std::unique_ptr<protocol::Array<EntryPreview>> entriesPreview;
bool overflow = false;
@@ -996,8 +992,7 @@ class ObjectMirror final : public ValueMirror {
if (forEntry) {
overflow = true;
} else {
- entriesPreview =
- v8::base::make_unique<protocol::Array<EntryPreview>>();
+ entriesPreview = std::make_unique<protocol::Array<EntryPreview>>();
for (const auto& entry : entries) {
std::unique_ptr<ObjectPreview> valuePreview;
entry.value->buildEntryPreview(context, nameLimit, indexLimit,
@@ -1545,11 +1540,11 @@ std::unique_ptr<ValueMirror> clientMirror(v8::Local<v8::Context> context,
const String16& subtype) {
// TODO(alph): description and length retrieval should move to embedder.
if (subtype == "node") {
- return v8::base::make_unique<ObjectMirror>(
- value, subtype, descriptionForNode(context, value));
+ return std::make_unique<ObjectMirror>(value, subtype,
+ descriptionForNode(context, value));
}
if (subtype == "error") {
- return v8::base::make_unique<ObjectMirror>(
+ return std::make_unique<ObjectMirror>(
value, RemoteObject::SubtypeEnum::Error,
descriptionForError(context, value.As<v8::Object>(),
ErrorType::kClient));
@@ -1562,14 +1557,14 @@ std::unique_ptr<ValueMirror> clientMirror(v8::Local<v8::Context> context,
if (object->Get(context, toV8String(isolate, "length"))
.ToLocal(&lengthValue)) {
if (lengthValue->IsInt32()) {
- return v8::base::make_unique<ObjectMirror>(
+ return std::make_unique<ObjectMirror>(
value, RemoteObject::SubtypeEnum::Array,
descriptionForCollection(isolate, object,
lengthValue.As<v8::Int32>()->Value()));
}
}
}
- return v8::base::make_unique<ObjectMirror>(
+ return std::make_unique<ObjectMirror>(
value,
descriptionForObject(context->GetIsolate(), value.As<v8::Object>()));
}
@@ -1577,26 +1572,26 @@ std::unique_ptr<ValueMirror> clientMirror(v8::Local<v8::Context> context,
std::unique_ptr<ValueMirror> ValueMirror::create(v8::Local<v8::Context> context,
v8::Local<v8::Value> value) {
if (value->IsNull()) {
- return v8::base::make_unique<PrimitiveValueMirror>(
+ return std::make_unique<PrimitiveValueMirror>(
value, RemoteObject::TypeEnum::Object);
}
if (value->IsBoolean()) {
- return v8::base::make_unique<PrimitiveValueMirror>(
+ return std::make_unique<PrimitiveValueMirror>(
value, RemoteObject::TypeEnum::Boolean);
}
if (value->IsNumber()) {
- return v8::base::make_unique<NumberMirror>(value.As<v8::Number>());
+ return std::make_unique<NumberMirror>(value.As<v8::Number>());
}
v8::Isolate* isolate = context->GetIsolate();
if (value->IsString()) {
- return v8::base::make_unique<PrimitiveValueMirror>(
+ return std::make_unique<PrimitiveValueMirror>(
value, RemoteObject::TypeEnum::String);
}
if (value->IsBigInt()) {
- return v8::base::make_unique<BigIntMirror>(value.As<v8::BigInt>());
+ return std::make_unique<BigIntMirror>(value.As<v8::BigInt>());
}
if (value->IsSymbol()) {
- return v8::base::make_unique<SymbolMirror>(value.As<v8::Symbol>());
+ return std::make_unique<SymbolMirror>(value.As<v8::Symbol>());
}
auto clientSubtype = (value->IsUndefined() || value->IsObject())
? clientFor(context)->valueSubtype(value)
@@ -1606,121 +1601,121 @@ std::unique_ptr<ValueMirror> ValueMirror::create(v8::Local<v8::Context> context,
return clientMirror(context, value, subtype);
}
if (value->IsUndefined()) {
- return v8::base::make_unique<PrimitiveValueMirror>(
+ return std::make_unique<PrimitiveValueMirror>(
value, RemoteObject::TypeEnum::Undefined);
}
if (value->IsRegExp()) {
- return v8::base::make_unique<ObjectMirror>(
+ return std::make_unique<ObjectMirror>(
value, RemoteObject::SubtypeEnum::Regexp,
descriptionForRegExp(isolate, value.As<v8::RegExp>()));
}
if (value->IsProxy()) {
- return v8::base::make_unique<ObjectMirror>(
+ return std::make_unique<ObjectMirror>(
value, RemoteObject::SubtypeEnum::Proxy, "Proxy");
}
if (value->IsFunction()) {
- return v8::base::make_unique<FunctionMirror>(value);
+ return std::make_unique<FunctionMirror>(value);
}
if (value->IsDate()) {
- return v8::base::make_unique<ObjectMirror>(
+ return std::make_unique<ObjectMirror>(
value, RemoteObject::SubtypeEnum::Date,
descriptionForDate(context, value.As<v8::Date>()));
}
if (value->IsPromise()) {
v8::Local<v8::Promise> promise = value.As<v8::Promise>();
- return v8::base::make_unique<ObjectMirror>(
+ return std::make_unique<ObjectMirror>(
promise, RemoteObject::SubtypeEnum::Promise,
descriptionForObject(isolate, promise));
}
if (value->IsNativeError()) {
- return v8::base::make_unique<ObjectMirror>(
+ return std::make_unique<ObjectMirror>(
value, RemoteObject::SubtypeEnum::Error,
descriptionForError(context, value.As<v8::Object>(),
ErrorType::kNative));
}
if (value->IsMap()) {
v8::Local<v8::Map> map = value.As<v8::Map>();
- return v8::base::make_unique<ObjectMirror>(
+ return std::make_unique<ObjectMirror>(
value, RemoteObject::SubtypeEnum::Map,
descriptionForCollection(isolate, map, map->Size()));
}
if (value->IsSet()) {
v8::Local<v8::Set> set = value.As<v8::Set>();
- return v8::base::make_unique<ObjectMirror>(
+ return std::make_unique<ObjectMirror>(
value, RemoteObject::SubtypeEnum::Set,
descriptionForCollection(isolate, set, set->Size()));
}
if (value->IsWeakMap()) {
- return v8::base::make_unique<ObjectMirror>(
+ return std::make_unique<ObjectMirror>(
value, RemoteObject::SubtypeEnum::Weakmap,
descriptionForObject(isolate, value.As<v8::Object>()));
}
if (value->IsWeakSet()) {
- return v8::base::make_unique<ObjectMirror>(
+ return std::make_unique<ObjectMirror>(
value, RemoteObject::SubtypeEnum::Weakset,
descriptionForObject(isolate, value.As<v8::Object>()));
}
if (value->IsMapIterator() || value->IsSetIterator()) {
- return v8::base::make_unique<ObjectMirror>(
+ return std::make_unique<ObjectMirror>(
value, RemoteObject::SubtypeEnum::Iterator,
descriptionForObject(isolate, value.As<v8::Object>()));
}
if (value->IsGeneratorObject()) {
v8::Local<v8::Object> object = value.As<v8::Object>();
- return v8::base::make_unique<ObjectMirror>(
+ return std::make_unique<ObjectMirror>(
object, RemoteObject::SubtypeEnum::Generator,
descriptionForObject(isolate, object));
}
if (value->IsTypedArray()) {
v8::Local<v8::TypedArray> array = value.As<v8::TypedArray>();
- return v8::base::make_unique<ObjectMirror>(
+ return std::make_unique<ObjectMirror>(
value, RemoteObject::SubtypeEnum::Typedarray,
descriptionForCollection(isolate, array, array->Length()));
}
if (value->IsArrayBuffer()) {
v8::Local<v8::ArrayBuffer> buffer = value.As<v8::ArrayBuffer>();
- return v8::base::make_unique<ObjectMirror>(
+ return std::make_unique<ObjectMirror>(
value, RemoteObject::SubtypeEnum::Arraybuffer,
descriptionForCollection(isolate, buffer, buffer->ByteLength()));
}
if (value->IsSharedArrayBuffer()) {
v8::Local<v8::SharedArrayBuffer> buffer = value.As<v8::SharedArrayBuffer>();
- return v8::base::make_unique<ObjectMirror>(
+ return std::make_unique<ObjectMirror>(
value, RemoteObject::SubtypeEnum::Arraybuffer,
descriptionForCollection(isolate, buffer, buffer->ByteLength()));
}
if (value->IsDataView()) {
v8::Local<v8::DataView> view = value.As<v8::DataView>();
- return v8::base::make_unique<ObjectMirror>(
+ return std::make_unique<ObjectMirror>(
value, RemoteObject::SubtypeEnum::Dataview,
descriptionForCollection(isolate, view, view->ByteLength()));
}
V8InternalValueType internalType =
v8InternalValueTypeFrom(context, v8::Local<v8::Object>::Cast(value));
if (value->IsArray() && internalType == V8InternalValueType::kScopeList) {
- return v8::base::make_unique<ObjectMirror>(
+ return std::make_unique<ObjectMirror>(
value, "internal#scopeList",
descriptionForScopeList(value.As<v8::Array>()));
}
if (value->IsObject() && internalType == V8InternalValueType::kEntry) {
- return v8::base::make_unique<ObjectMirror>(
+ return std::make_unique<ObjectMirror>(
value, "internal#entry",
descriptionForEntry(context, value.As<v8::Object>()));
}
if (value->IsObject() && internalType == V8InternalValueType::kScope) {
- return v8::base::make_unique<ObjectMirror>(
+ return std::make_unique<ObjectMirror>(
value, "internal#scope",
descriptionForScope(context, value.As<v8::Object>()));
}
size_t length = 0;
if (value->IsArray() || isArrayLike(context, value, &length)) {
length = value->IsArray() ? value.As<v8::Array>()->Length() : length;
- return v8::base::make_unique<ObjectMirror>(
+ return std::make_unique<ObjectMirror>(
value, RemoteObject::SubtypeEnum::Array,
descriptionForCollection(isolate, value.As<v8::Object>(), length));
}
if (value->IsObject()) {
- return v8::base::make_unique<ObjectMirror>(
+ return std::make_unique<ObjectMirror>(
value, descriptionForObject(isolate, value.As<v8::Object>()));
}
return nullptr;
diff --git a/deps/v8/src/inspector/wasm-translation.cc b/deps/v8/src/inspector/wasm-translation.cc
index 4836a6bc4a..5a1526d142 100644
--- a/deps/v8/src/inspector/wasm-translation.cc
+++ b/deps/v8/src/inspector/wasm-translation.cc
@@ -67,15 +67,20 @@ class WasmTranslation::TranslatorImpl {
column(column) {}
};
- TranslatorImpl(v8::Isolate* isolate, v8::Local<v8::debug::WasmScript> script)
+ TranslatorImpl(v8::Isolate* isolate, WasmTranslation* translation,
+ v8::Local<v8::debug::WasmScript> script)
: script_(isolate, script) {
script_.AnnotateStrongRetainer(kGlobalScriptHandleLabel);
+
+ ForEachFunction(script, [this, translation](String16& script_id,
+ int func_idx) {
+ translation->AddFakeScript(GetFakeScriptId(script_id, func_idx), this);
+ });
}
- void Init(v8::Isolate* isolate, WasmTranslation* translation,
- V8DebuggerAgentImpl* agent) {
- // Register fake scripts for each function in this wasm module/script.
- v8::Local<v8::debug::WasmScript> script = script_.Get(isolate);
+ template <typename Callback>
+ void ForEachFunction(v8::Local<v8::debug::WasmScript> script,
+ Callback callback) {
int num_functions = script->NumFunctions();
int num_imported_functions = script->NumImportedFunctions();
DCHECK_LE(0, num_imported_functions);
@@ -84,10 +89,18 @@ class WasmTranslation::TranslatorImpl {
String16 script_id = String16::fromInteger(script->Id());
for (int func_idx = num_imported_functions; func_idx < num_functions;
++func_idx) {
- AddFakeScript(isolate, script_id, func_idx, translation, agent);
+ callback(script_id, func_idx);
}
}
+ void ReportFakeScripts(v8::Isolate* isolate, WasmTranslation* translation,
+ V8DebuggerAgentImpl* agent) {
+ ForEachFunction(
+ script_.Get(isolate), [=](String16& script_id, int func_idx) {
+ ReportFakeScript(isolate, script_id, func_idx, translation, agent);
+ });
+ }
+
void Translate(TransLocation* loc) {
const OffsetTable& offset_table = GetOffsetTable(loc);
DCHECK(!offset_table.empty());
@@ -212,9 +225,10 @@ class WasmTranslation::TranslatorImpl {
return GetFakeScriptId(loc->script_id, loc->line);
}
- void AddFakeScript(v8::Isolate* isolate, const String16& underlyingScriptId,
- int func_idx, WasmTranslation* translation,
- V8DebuggerAgentImpl* agent) {
+ void ReportFakeScript(v8::Isolate* isolate,
+ const String16& underlyingScriptId, int func_idx,
+ WasmTranslation* translation,
+ V8DebuggerAgentImpl* agent) {
String16 fake_script_id = GetFakeScriptId(underlyingScriptId, func_idx);
String16 fake_script_url = GetFakeScriptUrl(isolate, func_idx);
@@ -223,7 +237,6 @@ class WasmTranslation::TranslatorImpl {
fake_script_id, std::move(fake_script_url),
func_idx);
- translation->AddFakeScript(fake_script->scriptId(), this);
agent->didParseSource(std::move(fake_script), true);
}
@@ -254,6 +267,9 @@ class WasmTranslation::TranslatorImpl {
// We assume to only disassemble a subset of the functions, so store them in a
// map instead of an array.
std::unordered_map<int, WasmSourceInformation> source_informations_;
+
+ // Disallow copies, because our pointer is registered in translation.
+ DISALLOW_COPY_AND_ASSIGN(TranslatorImpl);
};
constexpr char WasmTranslation::TranslatorImpl::kGlobalScriptHandleLabel[];
@@ -264,15 +280,11 @@ WasmTranslation::~WasmTranslation() { Clear(); }
void WasmTranslation::AddScript(v8::Local<v8::debug::WasmScript> script,
V8DebuggerAgentImpl* agent) {
- std::unique_ptr<TranslatorImpl> impl;
- impl.reset(new TranslatorImpl(isolate_, script));
- DCHECK(impl);
- auto inserted =
- wasm_translators_.insert(std::make_pair(script->Id(), std::move(impl)));
- // Check that no mapping for this script id existed before.
- DCHECK(inserted.second);
- // impl has been moved, use the returned iterator to call Init.
- inserted.first->second->Init(isolate_, this, agent);
+ auto& impl = wasm_translators_[script->Id()];
+ if (impl == nullptr) {
+ impl = std::make_unique<TranslatorImpl>(isolate_, this, script);
+ }
+ impl->ReportFakeScripts(isolate_, this, agent);
}
void WasmTranslation::Clear() {
diff --git a/deps/v8/src/inspector/wasm-translation.h b/deps/v8/src/inspector/wasm-translation.h
index 2d41822e59..a19aa85205 100644
--- a/deps/v8/src/inspector/wasm-translation.h
+++ b/deps/v8/src/inspector/wasm-translation.h
@@ -5,6 +5,7 @@
#ifndef V8_INSPECTOR_WASM_TRANSLATION_H_
#define V8_INSPECTOR_WASM_TRANSLATION_H_
+#include <memory>
#include <unordered_map>
#include "include/v8.h"
diff --git a/deps/v8/src/interpreter/bytecode-array-accessor.cc b/deps/v8/src/interpreter/bytecode-array-accessor.cc
index d460c1a45f..0690e16aa9 100644
--- a/deps/v8/src/interpreter/bytecode-array-accessor.cc
+++ b/deps/v8/src/interpreter/bytecode-array-accessor.cc
@@ -66,7 +66,7 @@ BytecodeArrayAccessor::BytecodeArrayAccessor(
BytecodeArrayAccessor::BytecodeArrayAccessor(
Handle<BytecodeArray> bytecode_array, int initial_offset)
: BytecodeArrayAccessor(
- base::make_unique<OnHeapBytecodeArray>(bytecode_array),
+ std::make_unique<OnHeapBytecodeArray>(bytecode_array),
initial_offset) {}
void BytecodeArrayAccessor::SetOffset(int offset) {
diff --git a/deps/v8/src/interpreter/bytecode-array-accessor.h b/deps/v8/src/interpreter/bytecode-array-accessor.h
index 97278af7bd..92d0da6607 100644
--- a/deps/v8/src/interpreter/bytecode-array-accessor.h
+++ b/deps/v8/src/interpreter/bytecode-array-accessor.h
@@ -5,6 +5,8 @@
#ifndef V8_INTERPRETER_BYTECODE_ARRAY_ACCESSOR_H_
#define V8_INTERPRETER_BYTECODE_ARRAY_ACCESSOR_H_
+#include <memory>
+
#include "src/base/optional.h"
#include "src/common/globals.h"
#include "src/handles/handles.h"
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.cc b/deps/v8/src/interpreter/bytecode-array-builder.cc
index cfc3eb36c1..1c61776cdf 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.cc
+++ b/deps/v8/src/interpreter/bytecode-array-builder.cc
@@ -824,9 +824,16 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadKeyedProperty(
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::GetIterator(Register object,
- int feedback_slot) {
- OutputGetIterator(object, feedback_slot);
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadIteratorProperty(
+ Register object, int feedback_slot) {
+ size_t name_index = IteratorSymbolConstantPoolEntry();
+ OutputLdaNamedProperty(object, name_index, feedback_slot);
+ return *this;
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::GetIterator(
+ Register object, int load_feedback_slot, int call_feedback_slot) {
+ OutputGetIterator(object, load_feedback_slot, call_feedback_slot);
return *this;
}
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.h b/deps/v8/src/interpreter/bytecode-array-builder.h
index 06230f9270..39cd4fa6f6 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.h
+++ b/deps/v8/src/interpreter/bytecode-array-builder.h
@@ -135,7 +135,12 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
BytecodeArrayBuilder& LoadKeyedProperty(Register object, int feedback_slot);
// Named load property of the @@iterator symbol.
- BytecodeArrayBuilder& GetIterator(Register object, int feedback_slot);
+ BytecodeArrayBuilder& LoadIteratorProperty(Register object,
+ int feedback_slot);
+
+ // Load and call property of the @@iterator symbol
+ BytecodeArrayBuilder& GetIterator(Register object, int load_feedback_slot,
+ int call_feedback_slot);
// Named load property of the @@asyncIterator symbol.
BytecodeArrayBuilder& LoadAsyncIteratorProperty(Register object,
diff --git a/deps/v8/src/interpreter/bytecode-array-iterator.h b/deps/v8/src/interpreter/bytecode-array-iterator.h
index e6b58deadc..b992ffc037 100644
--- a/deps/v8/src/interpreter/bytecode-array-iterator.h
+++ b/deps/v8/src/interpreter/bytecode-array-iterator.h
@@ -5,6 +5,8 @@
#ifndef V8_INTERPRETER_BYTECODE_ARRAY_ITERATOR_H_
#define V8_INTERPRETER_BYTECODE_ARRAY_ITERATOR_H_
+#include <memory>
+
#include "src/interpreter/bytecode-array-accessor.h"
namespace v8 {
diff --git a/deps/v8/src/interpreter/bytecode-array-random-iterator.h b/deps/v8/src/interpreter/bytecode-array-random-iterator.h
index a3b69b7015..68905a146c 100644
--- a/deps/v8/src/interpreter/bytecode-array-random-iterator.h
+++ b/deps/v8/src/interpreter/bytecode-array-random-iterator.h
@@ -5,6 +5,8 @@
#ifndef V8_INTERPRETER_BYTECODE_ARRAY_RANDOM_ITERATOR_H_
#define V8_INTERPRETER_BYTECODE_ARRAY_RANDOM_ITERATOR_H_
+#include <memory>
+
#include "src/interpreter/bytecode-array-accessor.h"
#include "src/zone/zone-containers.h"
#include "src/zone/zone.h"
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index 29065d6a55..92ae15127e 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -2042,7 +2042,71 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr, Register name) {
VisitDeclarations(expr->scope()->declarations());
Register class_constructor = register_allocator()->NewRegister();
+ // Create the class brand symbol and store it on the context during class
+ // evaluation. This will be stored in the instance later in the constructor.
+ // We do this early so that invalid access to private methods or accessors
+ // in computed property keys throw.
+ if (expr->scope()->brand() != nullptr) {
+ Register brand = register_allocator()->NewRegister();
+ const AstRawString* class_name =
+ expr->scope()->class_variable() != nullptr
+ ? expr->scope()->class_variable()->raw_name()
+ : ast_string_constants()->empty_string();
+ builder()
+ ->LoadLiteral(class_name)
+ .StoreAccumulatorInRegister(brand)
+ .CallRuntime(Runtime::kCreatePrivateNameSymbol, brand);
+ BuildVariableAssignment(expr->scope()->brand(), Token::INIT,
+ HoleCheckMode::kElided);
+ }
+
AccessorTable<ClassLiteral::Property> private_accessors(zone());
+ for (int i = 0; i < expr->private_members()->length(); i++) {
+ ClassLiteral::Property* property = expr->private_members()->at(i);
+ DCHECK(property->is_private());
+ switch (property->kind()) {
+ case ClassLiteral::Property::FIELD: {
+ // Initialize the private field variables early.
+ // Create the private name symbols for fields during class
+ // evaluation and store them on the context. These will be
+ // used as keys later during instance or static initialization.
+ RegisterAllocationScope private_name_register_scope(this);
+ Register private_name = register_allocator()->NewRegister();
+ VisitForRegisterValue(property->key(), private_name);
+ builder()
+ ->LoadLiteral(property->key()->AsLiteral()->AsRawPropertyName())
+ .StoreAccumulatorInRegister(private_name)
+ .CallRuntime(Runtime::kCreatePrivateNameSymbol, private_name);
+ DCHECK_NOT_NULL(property->private_name_var());
+ BuildVariableAssignment(property->private_name_var(), Token::INIT,
+ HoleCheckMode::kElided);
+ break;
+ }
+ case ClassLiteral::Property::METHOD: {
+ // We can initialize the private methods and accessors later so that the
+ // home objects can be assigned right after the creation of the
+ // closures, and those are guarded by the brand checks.
+ break;
+ }
+ // Collect private accessors into a table to merge the creation of
+ // those closures later.
+ case ClassLiteral::Property::GETTER: {
+ Literal* key = property->key()->AsLiteral();
+ DCHECK_NULL(private_accessors.LookupOrInsert(key)->getter);
+ private_accessors.LookupOrInsert(key)->getter = property;
+ break;
+ }
+ case ClassLiteral::Property::SETTER: {
+ Literal* key = property->key()->AsLiteral();
+ DCHECK_NULL(private_accessors.LookupOrInsert(key)->setter);
+ private_accessors.LookupOrInsert(key)->setter = property;
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ }
+
{
RegisterAllocationScope register_scope(this);
RegisterList args = register_allocator()->NewGrowableRegisterList();
@@ -2065,8 +2129,8 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr, Register name) {
.StoreAccumulatorInRegister(class_boilerplate);
// Create computed names and method values nodes to store into the literal.
- for (int i = 0; i < expr->properties()->length(); i++) {
- ClassLiteral::Property* property = expr->properties()->at(i);
+ for (int i = 0; i < expr->public_members()->length(); i++) {
+ ClassLiteral::Property* property = expr->public_members()->at(i);
if (property->is_computed_name()) {
Register key = register_allocator()->GrowRegisterList(&args);
@@ -2099,50 +2163,7 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr, Register name) {
}
}
- if (property->is_private()) {
- // Assign private class member's name variables.
- switch (property->kind()) {
- case ClassLiteral::Property::FIELD: {
- // Create the private name symbols for fields during class
- // evaluation and store them on the context. These will be
- // used as keys later during instance or static initialization.
- RegisterAllocationScope private_name_register_scope(this);
- Register private_name = register_allocator()->NewRegister();
- VisitForRegisterValue(property->key(), private_name);
- builder()
- ->LoadLiteral(property->key()->AsLiteral()->AsRawPropertyName())
- .StoreAccumulatorInRegister(private_name)
- .CallRuntime(Runtime::kCreatePrivateNameSymbol, private_name);
- DCHECK_NOT_NULL(property->private_name_var());
- BuildVariableAssignment(property->private_name_var(), Token::INIT,
- HoleCheckMode::kElided);
- break;
- }
- case ClassLiteral::Property::METHOD: {
- // Create the closures for private methods.
- VisitForAccumulatorValue(property->value());
- BuildVariableAssignment(property->private_name_var(), Token::INIT,
- HoleCheckMode::kElided);
- break;
- }
- case ClassLiteral::Property::GETTER: {
- Literal* key = property->key()->AsLiteral();
- DCHECK_NULL(private_accessors.LookupOrInsert(key)->getter);
- private_accessors.LookupOrInsert(key)->getter = property;
- break;
- }
- case ClassLiteral::Property::SETTER: {
- Literal* key = property->key()->AsLiteral();
- DCHECK_NULL(private_accessors.LookupOrInsert(key)->setter);
- private_accessors.LookupOrInsert(key)->setter = property;
- break;
- }
- }
- // The private fields are initialized in the initializer function and
- // the private brand for the private methods are initialized in the
- // constructor instead.
- continue;
- }
+ DCHECK(!property->is_private());
if (property->kind() == ClassLiteral::Property::FIELD) {
// We don't compute field's value here, but instead do it in the
@@ -2160,60 +2181,55 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr, Register name) {
builder()->StoreAccumulatorInRegister(prototype);
// Assign to class variable.
- if (expr->class_variable() != nullptr) {
- DCHECK(expr->class_variable()->IsStackLocal() ||
- expr->class_variable()->IsContextSlot());
+ Variable* class_variable = expr->scope()->class_variable();
+ if (class_variable != nullptr && class_variable->is_used()) {
+ DCHECK(class_variable->IsStackLocal() || class_variable->IsContextSlot());
builder()->LoadAccumulatorWithRegister(class_constructor);
- BuildVariableAssignment(expr->class_variable(), Token::INIT,
+ BuildVariableAssignment(class_variable, Token::INIT,
HoleCheckMode::kElided);
}
- // Create the class brand symbol and store it on the context
- // during class evaluation. This will be stored in the
- // receiver later in the constructor.
- if (expr->scope()->brand() != nullptr) {
- Register brand = register_allocator()->NewRegister();
- const AstRawString* class_name =
- expr->class_variable() != nullptr
- ? expr->class_variable()->raw_name()
- : ast_string_constants()->empty_string();
- builder()
- ->LoadLiteral(class_name)
- .StoreAccumulatorInRegister(brand)
- .CallRuntime(Runtime::kCreatePrivateNameSymbol, brand);
- BuildVariableAssignment(expr->scope()->brand(), Token::INIT,
- HoleCheckMode::kElided);
-
- // Store the home object for any private methods that need
- // them. We do this here once the prototype and brand symbol has
- // been created. Private accessors have their home object set later
- // when they are defined.
- for (int i = 0; i < expr->properties()->length(); i++) {
+ // Create the closures of private methods, and store the home object for
+ // any private methods that need them.
+ if (expr->has_private_methods()) {
+ for (int i = 0; i < expr->private_members()->length(); i++) {
+ ClassLiteral::Property* property = expr->private_members()->at(i);
+ if (property->kind() != ClassLiteral::Property::METHOD) {
+ continue;
+ }
RegisterAllocationScope register_scope(this);
- ClassLiteral::Property* property = expr->properties()->at(i);
+ VisitForAccumulatorValue(property->value());
+ BuildVariableAssignment(property->private_name_var(), Token::INIT,
+ HoleCheckMode::kElided);
+ Register home_object = property->private_name_var()->is_static()
+ ? class_constructor
+ : prototype;
if (property->NeedsHomeObjectOnClassPrototype()) {
Register func = register_allocator()->NewRegister();
- BuildVariableLoad(property->private_name_var(), HoleCheckMode::kElided);
builder()->StoreAccumulatorInRegister(func);
- VisitSetHomeObject(func, prototype, property);
+ VisitSetHomeObject(func, home_object, property);
}
}
+ }
- // Define accessors, using only a single call to the runtime for each pair
- // of corresponding getters and setters.
- for (auto accessors : private_accessors.ordered_accessors()) {
- RegisterAllocationScope inner_register_scope(this);
- RegisterList accessors_reg = register_allocator()->NewRegisterList(2);
- ClassLiteral::Property* getter = accessors.second->getter;
- ClassLiteral::Property* setter = accessors.second->setter;
- VisitLiteralAccessor(prototype, getter, accessors_reg[0]);
- VisitLiteralAccessor(prototype, setter, accessors_reg[1]);
- builder()->CallRuntime(Runtime::kCreatePrivateAccessors, accessors_reg);
- Variable* var = getter != nullptr ? getter->private_name_var()
- : setter->private_name_var();
- DCHECK_NOT_NULL(var);
- BuildVariableAssignment(var, Token::INIT, HoleCheckMode::kElided);
- }
+ // Define private accessors, using only a single call to the runtime for
+ // each pair of corresponding getters and setters, in the order the first
+ // component is declared. Store the home objects if necessary.
+ for (auto accessors : private_accessors.ordered_accessors()) {
+ RegisterAllocationScope inner_register_scope(this);
+ RegisterList accessors_reg = register_allocator()->NewRegisterList(2);
+ ClassLiteral::Property* getter = accessors.second->getter;
+ ClassLiteral::Property* setter = accessors.second->setter;
+ bool is_static =
+ getter != nullptr ? getter->is_static() : setter->is_static();
+ Register home_object = is_static ? class_constructor : prototype;
+ VisitLiteralAccessor(home_object, getter, accessors_reg[0]);
+ VisitLiteralAccessor(home_object, setter, accessors_reg[1]);
+ builder()->CallRuntime(Runtime::kCreatePrivateAccessors, accessors_reg);
+ Variable* var = getter != nullptr ? getter->private_name_var()
+ : setter->private_name_var();
+ DCHECK_NOT_NULL(var);
+ BuildVariableAssignment(var, Token::INIT, HoleCheckMode::kElided);
}
if (expr->instance_members_initializer_function() != nullptr) {
@@ -3086,7 +3102,8 @@ void BytecodeGenerator::BuildAsyncReturn(int source_position) {
.StoreAccumulatorInRegister(args[2]) // done
.CallRuntime(Runtime::kInlineAsyncGeneratorResolve, args);
} else {
- DCHECK(IsAsyncFunction(info()->literal()->kind()));
+ DCHECK(IsAsyncFunction(info()->literal()->kind()) ||
+ IsAsyncModule(info()->literal()->kind()));
RegisterList args = register_allocator()->NewRegisterList(3);
builder()
->MoveRegister(generator_object(), args[0]) // generator
@@ -3921,7 +3938,8 @@ void BytecodeGenerator::BuildAssignment(
Property* property = lhs_data.expr()->AsProperty();
Register object = VisitForRegisterValue(property->obj());
Register key = VisitForRegisterValue(property->key());
- BuildPrivateBrandCheck(property, object);
+ BuildPrivateBrandCheck(property, object,
+ MessageTemplate::kInvalidPrivateMemberWrite);
BuildPrivateSetterAccess(object, key, value);
if (!execution_result()->IsEffect()) {
builder()->LoadAccumulatorWithRegister(value);
@@ -4004,6 +4022,12 @@ void BytecodeGenerator::VisitCompoundAssignment(CompoundAssignment* expr) {
// in the accumulator. When the generator is resumed, the sent value is loaded
// in the accumulator.
void BytecodeGenerator::BuildSuspendPoint(int position) {
+ // Because we eliminate jump targets in dead code, we also eliminate resumes
+ // when the suspend is not emitted because otherwise the below call to Bind
+ // would start a new basic block and the code would be considered alive.
+ if (builder()->RemainderOfBlockIsDead()) {
+ return;
+ }
const int suspend_id = suspend_count_++;
RegisterList registers = register_allocator()->AllLiveRegisters();
@@ -4454,12 +4478,14 @@ void BytecodeGenerator::VisitPropertyLoad(Register obj, Property* property) {
case PRIVATE_GETTER_ONLY:
case PRIVATE_GETTER_AND_SETTER: {
Register key = VisitForRegisterValue(property->key());
- BuildPrivateBrandCheck(property, obj);
+ BuildPrivateBrandCheck(property, obj,
+ MessageTemplate::kInvalidPrivateMemberRead);
BuildPrivateGetterAccess(obj, key);
break;
}
case PRIVATE_METHOD: {
- BuildPrivateBrandCheck(property, obj);
+ BuildPrivateBrandCheck(property, obj,
+ MessageTemplate::kInvalidPrivateMemberRead);
// In the case of private methods, property->key() is the function to be
// loaded (stored in a context slot), so load this directly.
VisitForAccumulatorValue(property->key());
@@ -4499,15 +4525,29 @@ void BytecodeGenerator::BuildPrivateSetterAccess(Register object,
}
void BytecodeGenerator::BuildPrivateBrandCheck(Property* property,
- Register object) {
+ Register object,
+ MessageTemplate tmpl) {
Variable* private_name = property->key()->AsVariableProxy()->var();
- DCHECK(private_name->requires_brand_check());
+ DCHECK(IsPrivateMethodOrAccessorVariableMode(private_name->mode()));
ClassScope* scope = private_name->scope()->AsClassScope();
- Variable* brand = scope->brand();
- BuildVariableLoadForAccumulatorValue(brand, HoleCheckMode::kElided);
- builder()->SetExpressionPosition(property);
- builder()->LoadKeyedProperty(
- object, feedback_index(feedback_spec()->AddKeyedLoadICSlot()));
+ if (private_name->is_static()) {
+ DCHECK_NOT_NULL(scope->class_variable());
+ // For static private methods, the only valid receiver is the class.
+ // Load the class constructor.
+ BuildVariableLoadForAccumulatorValue(scope->class_variable(),
+ HoleCheckMode::kElided);
+ BytecodeLabel return_check;
+ builder()->CompareReference(object).JumpIfTrue(
+ ToBooleanMode::kAlreadyBoolean, &return_check);
+ BuildInvalidPropertyAccess(tmpl, property);
+ builder()->Bind(&return_check);
+ } else {
+ BuildVariableLoadForAccumulatorValue(scope->brand(),
+ HoleCheckMode::kElided);
+ builder()->SetExpressionPosition(property);
+ builder()->LoadKeyedProperty(
+ object, feedback_index(feedback_spec()->AddKeyedLoadICSlot()));
+ }
}
void BytecodeGenerator::VisitPropertyLoadForRegister(Register obj,
@@ -5113,7 +5153,8 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
case PRIVATE_GETTER_AND_SETTER: {
object = VisitForRegisterValue(property->obj());
key = VisitForRegisterValue(property->key());
- BuildPrivateBrandCheck(property, object);
+ BuildPrivateBrandCheck(property, object,
+ MessageTemplate::kInvalidPrivateMemberRead);
BuildPrivateGetterAccess(object, key);
break;
}
@@ -5407,7 +5448,8 @@ void BytecodeGenerator::BuildGetIterator(IteratorType hint) {
// If method is undefined,
// Let syncMethod be GetMethod(obj, @@iterator)
builder()
- ->GetIterator(obj, feedback_index(feedback_spec()->AddLoadICSlot()))
+ ->LoadIteratorProperty(obj,
+ feedback_index(feedback_spec()->AddLoadICSlot()))
.StoreAccumulatorInRegister(method);
// Let syncIterator be Call(syncMethod, obj)
@@ -5426,24 +5468,17 @@ void BytecodeGenerator::BuildGetIterator(IteratorType hint) {
RegisterAllocationScope scope(this);
Register obj = register_allocator()->NewRegister();
- Register method = register_allocator()->NewRegister();
-
- // Let method be GetMethod(obj, @@iterator).
- builder()
- ->StoreAccumulatorInRegister(obj)
- .GetIterator(obj, feedback_index(feedback_spec()->AddLoadICSlot()))
- .StoreAccumulatorInRegister(method);
+ int load_feedback_index =
+ feedback_index(feedback_spec()->AddLoadICSlot());
+ int call_feedback_index =
+ feedback_index(feedback_spec()->AddCallICSlot());
- // Let iterator be Call(method, obj).
- builder()->CallProperty(method, RegisterList(obj),
- feedback_index(feedback_spec()->AddCallICSlot()));
+ // Let method be GetMethod(obj, @@iterator) and
+ // iterator be Call(method, obj). If Type(iterator) is not Object,
+ // throw a SymbolIteratorInvalid exception.
+ builder()->StoreAccumulatorInRegister(obj).GetIterator(
+ obj, load_feedback_index, call_feedback_index);
}
-
- // If Type(iterator) is not Object, throw a TypeError exception.
- BytecodeLabel no_type_error;
- builder()->JumpIfJSReceiver(&no_type_error);
- builder()->CallRuntime(Runtime::kThrowSymbolIteratorInvalid);
- builder()->Bind(&no_type_error);
}
}
@@ -6102,8 +6137,9 @@ void BytecodeGenerator::BuildGeneratorObjectVariableInitialization() {
RegisterAllocationScope register_scope(this);
RegisterList args = register_allocator()->NewRegisterList(2);
Runtime::FunctionId function_id =
- (IsAsyncFunction(info()->literal()->kind()) &&
- !IsAsyncGeneratorFunction(info()->literal()->kind()))
+ ((IsAsyncFunction(info()->literal()->kind()) &&
+ !IsAsyncGeneratorFunction(info()->literal()->kind())) ||
+ IsAsyncModule(info()->literal()->kind()))
? Runtime::kInlineAsyncFunctionEnter
: Runtime::kInlineCreateJSGeneratorObject;
builder()
diff --git a/deps/v8/src/interpreter/bytecode-generator.h b/deps/v8/src/interpreter/bytecode-generator.h
index 134b1b463a..ecfe50ba5a 100644
--- a/deps/v8/src/interpreter/bytecode-generator.h
+++ b/deps/v8/src/interpreter/bytecode-generator.h
@@ -250,12 +250,6 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void BuildHoleCheckForVariableAssignment(Variable* variable, Token::Value op);
void BuildThrowIfHole(Variable* variable);
- // Build jump to targets[value], where
- // start_index <= value < start_index + size.
- void BuildIndexedJump(
- Register value, size_t start_index, size_t size,
- ZoneVector<BytecodeLabel>& targets); // NOLINT(runtime/references)
-
void BuildNewLocalActivationContext();
void BuildLocalActivationContextInitialization();
void BuildNewLocalBlockContext(Scope* scope);
@@ -307,10 +301,13 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void VisitRestArgumentsArray(Variable* rest);
void VisitCallSuper(Call* call);
void BuildInvalidPropertyAccess(MessageTemplate tmpl, Property* property);
- void BuildPrivateBrandCheck(Property* property, Register object);
+ void BuildPrivateBrandCheck(Property* property, Register object,
+ MessageTemplate tmpl);
void BuildPrivateGetterAccess(Register obj, Register access_pair);
void BuildPrivateSetterAccess(Register obj, Register access_pair,
Register value);
+ void BuildPrivateMethods(ClassLiteral* expr, bool is_static,
+ Register home_object);
void BuildClassLiteral(ClassLiteral* expr, Register name);
void VisitClassLiteral(ClassLiteral* expr, Register name);
void VisitNewTargetVariable(Variable* variable);
diff --git a/deps/v8/src/interpreter/bytecodes.cc b/deps/v8/src/interpreter/bytecodes.cc
index 60f30ee1d9..88e80b9613 100644
--- a/deps/v8/src/interpreter/bytecodes.cc
+++ b/deps/v8/src/interpreter/bytecodes.cc
@@ -217,6 +217,7 @@ bool Bytecodes::MakesCallAlongCriticalPath(Bytecode bytecode) {
case Bytecode::kCreateBlockContext:
case Bytecode::kCreateCatchContext:
case Bytecode::kCreateRegExpLiteral:
+ case Bytecode::kGetIterator:
return true;
default:
return false;
diff --git a/deps/v8/src/interpreter/bytecodes.h b/deps/v8/src/interpreter/bytecodes.h
index 6802d53c95..80f9e4d311 100644
--- a/deps/v8/src/interpreter/bytecodes.h
+++ b/deps/v8/src/interpreter/bytecodes.h
@@ -356,7 +356,8 @@ namespace interpreter {
OperandType::kRegOutList, OperandType::kRegCount) \
\
/* Iterator protocol operations */ \
- V(GetIterator, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kIdx) \
+ V(GetIterator, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kIdx, \
+ OperandType::kIdx) \
\
/* Debugger */ \
V(Debugger, AccumulatorUse::kNone) \
diff --git a/deps/v8/src/interpreter/constant-array-builder.cc b/deps/v8/src/interpreter/constant-array-builder.cc
index 167b0ee7e2..0a4bdd62f7 100644
--- a/deps/v8/src/interpreter/constant-array-builder.cc
+++ b/deps/v8/src/interpreter/constant-array-builder.cc
@@ -378,7 +378,7 @@ Handle<Object> ConstantArrayBuilder::Entry::ToHandle(Isolate* isolate) const {
case Tag::kRawString:
return raw_string_->string();
case Tag::kHeapNumber:
- return isolate->factory()->NewNumber(heap_number_, AllocationType::kOld);
+ return isolate->factory()->NewNumber<AllocationType::kOld>(heap_number_);
case Tag::kBigInt:
// This should never fail: the parser will never create a BigInt
// literal that cannot be allocated.
diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc
index f01821b565..a55e074b3a 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.cc
+++ b/deps/v8/src/interpreter/interpreter-assembler.cc
@@ -22,8 +22,6 @@ namespace interpreter {
using compiler::CodeAssemblerState;
using compiler::Node;
-template <class T>
-using TNode = compiler::TNode<T>;
InterpreterAssembler::InterpreterAssembler(CodeAssemblerState* state,
Bytecode bytecode,
@@ -32,19 +30,19 @@ InterpreterAssembler::InterpreterAssembler(CodeAssemblerState* state,
bytecode_(bytecode),
operand_scale_(operand_scale),
TVARIABLE_CONSTRUCTOR(interpreted_frame_pointer_),
- VARIABLE_CONSTRUCTOR(
- bytecode_array_, MachineRepresentation::kTagged,
- Parameter(InterpreterDispatchDescriptor::kBytecodeArray)),
+ TVARIABLE_CONSTRUCTOR(
+ bytecode_array_,
+ CAST(Parameter(InterpreterDispatchDescriptor::kBytecodeArray))),
TVARIABLE_CONSTRUCTOR(
bytecode_offset_,
UncheckedCast<IntPtrT>(
Parameter(InterpreterDispatchDescriptor::kBytecodeOffset))),
- VARIABLE_CONSTRUCTOR(
- dispatch_table_, MachineType::PointerRepresentation(),
- Parameter(InterpreterDispatchDescriptor::kDispatchTable)),
- VARIABLE_CONSTRUCTOR(
- accumulator_, MachineRepresentation::kTagged,
- Parameter(InterpreterDispatchDescriptor::kAccumulator)),
+ TVARIABLE_CONSTRUCTOR(
+ dispatch_table_, UncheckedCast<ExternalReference>(Parameter(
+ InterpreterDispatchDescriptor::kDispatchTable))),
+ TVARIABLE_CONSTRUCTOR(
+ accumulator_,
+ CAST(Parameter(InterpreterDispatchDescriptor::kAccumulator))),
accumulator_use_(AccumulatorUse::kNone),
made_call_(false),
reloaded_frame_ptr_(false),
@@ -129,27 +127,27 @@ void InterpreterAssembler::SaveBytecodeOffset() {
}
}
-Node* InterpreterAssembler::BytecodeArrayTaggedPointer() {
+TNode<BytecodeArray> InterpreterAssembler::BytecodeArrayTaggedPointer() {
// Force a re-load of the bytecode array after every call in case the debugger
// has been activated.
if (!bytecode_array_valid_) {
- bytecode_array_.Bind(LoadRegister(Register::bytecode_array()));
+ bytecode_array_ = CAST(LoadRegister(Register::bytecode_array()));
bytecode_array_valid_ = true;
}
return bytecode_array_.value();
}
-Node* InterpreterAssembler::DispatchTableRawPointer() {
+TNode<ExternalReference> InterpreterAssembler::DispatchTablePointer() {
if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
(dispatch_table_.value() ==
Parameter(InterpreterDispatchDescriptor::kDispatchTable))) {
- dispatch_table_.Bind(ExternalConstant(
- ExternalReference::interpreter_dispatch_table_address(isolate())));
+ dispatch_table_ = ExternalConstant(
+ ExternalReference::interpreter_dispatch_table_address(isolate()));
}
return dispatch_table_.value();
}
-Node* InterpreterAssembler::GetAccumulatorUnchecked() {
+TNode<Object> InterpreterAssembler::GetAccumulatorUnchecked() {
return accumulator_.value();
}
@@ -159,10 +157,11 @@ TNode<Object> InterpreterAssembler::GetAccumulator() {
return TaggedPoisonOnSpeculation(GetAccumulatorUnchecked());
}
-void InterpreterAssembler::SetAccumulator(Node* value) {
+// TODO(v8:6949): Remove sloppy-ness from SetAccumulator's value argument.
+void InterpreterAssembler::SetAccumulator(SloppyTNode<Object> value) {
DCHECK(Bytecodes::WritesAccumulator(bytecode_));
accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
- accumulator_.Bind(value);
+ accumulator_ = value;
}
TNode<Context> InterpreterAssembler::GetContext() {
@@ -173,15 +172,14 @@ void InterpreterAssembler::SetContext(TNode<Context> value) {
StoreRegister(value, Register::current_context());
}
-Node* InterpreterAssembler::GetContextAtDepth(TNode<Context> context,
- TNode<Uint32T> depth) {
+TNode<Context> InterpreterAssembler::GetContextAtDepth(TNode<Context> context,
+ TNode<Uint32T> depth) {
TVARIABLE(Context, cur_context, context);
TVARIABLE(Uint32T, cur_depth, depth);
Label context_found(this);
- Variable* context_search_loop_variables[2] = {&cur_depth, &cur_context};
- Label context_search(this, 2, context_search_loop_variables);
+ Label context_search(this, {&cur_depth, &cur_context});
// Fast path if the depth is 0.
Branch(Word32Equal(depth, Int32Constant(0)), &context_found, &context_search);
@@ -206,33 +204,38 @@ void InterpreterAssembler::GotoIfHasContextExtensionUpToDepth(
TVARIABLE(Context, cur_context, context);
TVARIABLE(Uint32T, cur_depth, depth);
- Variable* context_search_loop_variables[2] = {&cur_depth, &cur_context};
- Label context_search(this, 2, context_search_loop_variables);
+ Label context_search(this, {&cur_depth, &cur_context});
+ Label no_extension(this);
// Loop until the depth is 0.
Goto(&context_search);
BIND(&context_search);
{
- // TODO(leszeks): We only need to do this check if the context had a sloppy
- // eval, we could pass in a context chain bitmask to figure out which
- // contexts actually need to be checked.
+ // Check if context has an extension slot
+ TNode<BoolT> has_extension =
+ LoadContextHasExtensionField(cur_context.value());
+ GotoIfNot(has_extension, &no_extension);
+ // Jump to the target if the extension slot is not a hole.
TNode<Object> extension_slot =
LoadContextElement(cur_context.value(), Context::EXTENSION_INDEX);
+ Branch(TaggedNotEqual(extension_slot, TheHoleConstant()), target,
+ &no_extension);
- // Jump to the target if the extension slot is not a hole.
- GotoIf(TaggedNotEqual(extension_slot, TheHoleConstant()), target);
-
- cur_depth = Unsigned(Int32Sub(cur_depth.value(), Int32Constant(1)));
- cur_context =
- CAST(LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX));
+ BIND(&no_extension);
+ {
+ cur_depth = Unsigned(Int32Sub(cur_depth.value(), Int32Constant(1)));
+ cur_context = CAST(
+ LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX));
- GotoIf(Word32NotEqual(cur_depth.value(), Int32Constant(0)),
- &context_search);
+ GotoIf(Word32NotEqual(cur_depth.value(), Int32Constant(0)),
+ &context_search);
+ }
}
}
-TNode<IntPtrT> InterpreterAssembler::RegisterLocation(Node* reg_index) {
+TNode<IntPtrT> InterpreterAssembler::RegisterLocation(
+ TNode<IntPtrT> reg_index) {
return Signed(WordPoisonOnSpeculation(
IntPtrAdd(GetInterpretedFramePointer(), RegisterFrameOffset(reg_index))));
}
@@ -241,11 +244,11 @@ TNode<IntPtrT> InterpreterAssembler::RegisterLocation(Register reg) {
return RegisterLocation(IntPtrConstant(reg.ToOperand()));
}
-TNode<IntPtrT> InterpreterAssembler::RegisterFrameOffset(Node* index) {
- return Signed(TimesSystemPointerSize(index));
+TNode<IntPtrT> InterpreterAssembler::RegisterFrameOffset(TNode<IntPtrT> index) {
+ return TimesSystemPointerSize(index);
}
-TNode<Object> InterpreterAssembler::LoadRegister(Node* reg_index) {
+TNode<Object> InterpreterAssembler::LoadRegister(TNode<IntPtrT> reg_index) {
return LoadFullTagged(GetInterpretedFramePointer(),
RegisterFrameOffset(reg_index),
LoadSensitivity::kCritical);
@@ -281,7 +284,7 @@ std::pair<TNode<Object>, TNode<Object>>
InterpreterAssembler::LoadRegisterPairAtOperandIndex(int operand_index) {
DCHECK_EQ(OperandType::kRegPair,
Bytecodes::GetOperandType(bytecode_, operand_index));
- Node* first_reg_index =
+ TNode<IntPtrT> first_reg_index =
BytecodeOperandReg(operand_index, LoadSensitivity::kSafe);
TNode<IntPtrT> second_reg_index = NextRegister(first_reg_index);
return std::make_pair(LoadRegister(first_reg_index),
@@ -300,7 +303,7 @@ InterpreterAssembler::GetRegisterListAtOperandIndex(int operand_index) {
return RegListNodePair(base_reg, reg_count);
}
-Node* InterpreterAssembler::LoadRegisterFromRegisterList(
+TNode<Object> InterpreterAssembler::LoadRegisterFromRegisterList(
const RegListNodePair& reg_list, int index) {
TNode<IntPtrT> location = RegisterLocationInRegisterList(reg_list, index);
// Location is already poisoned on speculation, so no need to poison here.
@@ -317,29 +320,30 @@ TNode<IntPtrT> InterpreterAssembler::RegisterLocationInRegisterList(
return Signed(IntPtrSub(reg_list.base_reg_location(), offset));
}
-void InterpreterAssembler::StoreRegister(Node* value, Register reg) {
+void InterpreterAssembler::StoreRegister(TNode<Object> value, Register reg) {
StoreFullTaggedNoWriteBarrier(
GetInterpretedFramePointer(),
IntPtrConstant(reg.ToOperand() * kSystemPointerSize), value);
}
-void InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
+void InterpreterAssembler::StoreRegister(TNode<Object> value,
+ TNode<IntPtrT> reg_index) {
StoreFullTaggedNoWriteBarrier(GetInterpretedFramePointer(),
RegisterFrameOffset(reg_index), value);
}
-void InterpreterAssembler::StoreRegisterAtOperandIndex(Node* value,
+void InterpreterAssembler::StoreRegisterAtOperandIndex(TNode<Object> value,
int operand_index) {
StoreRegister(value,
BytecodeOperandReg(operand_index, LoadSensitivity::kSafe));
}
-void InterpreterAssembler::StoreRegisterPairAtOperandIndex(Node* value1,
- Node* value2,
+void InterpreterAssembler::StoreRegisterPairAtOperandIndex(TNode<Object> value1,
+ TNode<Object> value2,
int operand_index) {
DCHECK_EQ(OperandType::kRegOutPair,
Bytecodes::GetOperandType(bytecode_, operand_index));
- Node* first_reg_index =
+ TNode<IntPtrT> first_reg_index =
BytecodeOperandReg(operand_index, LoadSensitivity::kSafe);
StoreRegister(value1, first_reg_index);
TNode<IntPtrT> second_reg_index = NextRegister(first_reg_index);
@@ -347,10 +351,11 @@ void InterpreterAssembler::StoreRegisterPairAtOperandIndex(Node* value1,
}
void InterpreterAssembler::StoreRegisterTripleAtOperandIndex(
- Node* value1, Node* value2, Node* value3, int operand_index) {
+ TNode<Object> value1, TNode<Object> value2, TNode<Object> value3,
+ int operand_index) {
DCHECK_EQ(OperandType::kRegOutTriple,
Bytecodes::GetOperandType(bytecode_, operand_index));
- Node* first_reg_index =
+ TNode<IntPtrT> first_reg_index =
BytecodeOperandReg(operand_index, LoadSensitivity::kSafe);
StoreRegister(value1, first_reg_index);
TNode<IntPtrT> second_reg_index = NextRegister(first_reg_index);
@@ -359,12 +364,12 @@ void InterpreterAssembler::StoreRegisterTripleAtOperandIndex(
StoreRegister(value3, third_reg_index);
}
-TNode<IntPtrT> InterpreterAssembler::NextRegister(Node* reg_index) {
+TNode<IntPtrT> InterpreterAssembler::NextRegister(TNode<IntPtrT> reg_index) {
// Register indexes are negative, so the next index is minus one.
return Signed(IntPtrAdd(reg_index, IntPtrConstant(-1)));
}
-Node* InterpreterAssembler::OperandOffset(int operand_index) {
+TNode<IntPtrT> InterpreterAssembler::OperandOffset(int operand_index) {
return IntPtrConstant(
Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale()));
}
@@ -374,7 +379,7 @@ TNode<Uint8T> InterpreterAssembler::BytecodeOperandUnsignedByte(
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
bytecode_, operand_index, operand_scale()));
- Node* operand_offset = OperandOffset(operand_index);
+ TNode<IntPtrT> operand_offset = OperandOffset(operand_index);
return Load<Uint8T>(BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(), operand_offset),
needs_poisoning);
@@ -385,7 +390,7 @@ TNode<Int8T> InterpreterAssembler::BytecodeOperandSignedByte(
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
bytecode_, operand_index, operand_scale()));
- Node* operand_offset = OperandOffset(operand_index);
+ TNode<IntPtrT> operand_offset = OperandOffset(operand_index);
return Load<Int8T>(BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(), operand_offset),
needs_poisoning);
@@ -429,7 +434,7 @@ TNode<Word32T> InterpreterAssembler::BytecodeOperandReadUnaligned(
MachineType machine_type = (i == 0) ? msb_type : MachineType::Uint8();
TNode<IntPtrT> offset =
IntPtrConstant(relative_offset + msb_offset + i * kStep);
- TNode<WordT> array_offset = IntPtrAdd(BytecodeOffset(), offset);
+ TNode<IntPtrT> array_offset = IntPtrAdd(BytecodeOffset(), offset);
bytes[i] =
UncheckedCast<Word32T>(Load(machine_type, BytecodeArrayTaggedPointer(),
array_offset, needs_poisoning));
@@ -561,7 +566,7 @@ TNode<Uint32T> InterpreterAssembler::BytecodeOperandCount(int operand_index) {
return BytecodeUnsignedOperand(operand_index, operand_size);
}
-Node* InterpreterAssembler::BytecodeOperandFlag(int operand_index) {
+TNode<Uint32T> InterpreterAssembler::BytecodeOperandFlag(int operand_index) {
DCHECK_EQ(OperandType::kFlag8,
Bytecodes::GetOperandType(bytecode_, operand_index));
OperandSize operand_size =
@@ -578,15 +583,16 @@ TNode<Uint32T> InterpreterAssembler::BytecodeOperandUImm(int operand_index) {
return BytecodeUnsignedOperand(operand_index, operand_size);
}
-Node* InterpreterAssembler::BytecodeOperandUImmWord(int operand_index) {
+TNode<UintPtrT> InterpreterAssembler::BytecodeOperandUImmWord(
+ int operand_index) {
return ChangeUint32ToWord(BytecodeOperandUImm(operand_index));
}
-Node* InterpreterAssembler::BytecodeOperandUImmSmi(int operand_index) {
- return SmiFromInt32(Signed(BytecodeOperandUImm(operand_index)));
+TNode<Smi> InterpreterAssembler::BytecodeOperandUImmSmi(int operand_index) {
+ return SmiFromUint32(BytecodeOperandUImm(operand_index));
}
-Node* InterpreterAssembler::BytecodeOperandImm(int operand_index) {
+TNode<Int32T> InterpreterAssembler::BytecodeOperandImm(int operand_index) {
DCHECK_EQ(OperandType::kImm,
Bytecodes::GetOperandType(bytecode_, operand_index));
OperandSize operand_size =
@@ -594,15 +600,17 @@ Node* InterpreterAssembler::BytecodeOperandImm(int operand_index) {
return BytecodeSignedOperand(operand_index, operand_size);
}
-Node* InterpreterAssembler::BytecodeOperandImmIntPtr(int operand_index) {
+TNode<IntPtrT> InterpreterAssembler::BytecodeOperandImmIntPtr(
+ int operand_index) {
return ChangeInt32ToIntPtr(BytecodeOperandImm(operand_index));
}
-Node* InterpreterAssembler::BytecodeOperandImmSmi(int operand_index) {
+TNode<Smi> InterpreterAssembler::BytecodeOperandImmSmi(int operand_index) {
return SmiFromInt32(BytecodeOperandImm(operand_index));
}
-Node* InterpreterAssembler::BytecodeOperandIdxInt32(int operand_index) {
+TNode<Uint32T> InterpreterAssembler::BytecodeOperandIdxInt32(
+ int operand_index) {
DCHECK_EQ(OperandType::kIdx,
Bytecodes::GetOperandType(bytecode_, operand_index));
OperandSize operand_size =
@@ -610,15 +618,15 @@ Node* InterpreterAssembler::BytecodeOperandIdxInt32(int operand_index) {
return BytecodeUnsignedOperand(operand_index, operand_size);
}
-Node* InterpreterAssembler::BytecodeOperandIdx(int operand_index) {
+TNode<UintPtrT> InterpreterAssembler::BytecodeOperandIdx(int operand_index) {
return ChangeUint32ToWord(BytecodeOperandIdxInt32(operand_index));
}
-Node* InterpreterAssembler::BytecodeOperandIdxSmi(int operand_index) {
- return SmiTag(BytecodeOperandIdx(operand_index));
+TNode<Smi> InterpreterAssembler::BytecodeOperandIdxSmi(int operand_index) {
+ return SmiTag(Signed(BytecodeOperandIdx(operand_index)));
}
-Node* InterpreterAssembler::BytecodeOperandConstantPoolIdx(
+TNode<UintPtrT> InterpreterAssembler::BytecodeOperandConstantPoolIdx(
int operand_index, LoadSensitivity needs_poisoning) {
DCHECK_EQ(OperandType::kIdx,
Bytecodes::GetOperandType(bytecode_, operand_index));
@@ -628,7 +636,7 @@ Node* InterpreterAssembler::BytecodeOperandConstantPoolIdx(
BytecodeUnsignedOperand(operand_index, operand_size, needs_poisoning));
}
-Node* InterpreterAssembler::BytecodeOperandReg(
+TNode<IntPtrT> InterpreterAssembler::BytecodeOperandReg(
int operand_index, LoadSensitivity needs_poisoning) {
DCHECK(Bytecodes::IsRegisterOperandType(
Bytecodes::GetOperandType(bytecode_, operand_index)));
@@ -638,7 +646,8 @@ Node* InterpreterAssembler::BytecodeOperandReg(
BytecodeSignedOperand(operand_index, operand_size, needs_poisoning));
}
-Node* InterpreterAssembler::BytecodeOperandRuntimeId(int operand_index) {
+TNode<Uint32T> InterpreterAssembler::BytecodeOperandRuntimeId(
+ int operand_index) {
DCHECK_EQ(OperandType::kRuntimeId,
Bytecodes::GetOperandType(bytecode_, operand_index));
OperandSize operand_size =
@@ -647,7 +656,7 @@ Node* InterpreterAssembler::BytecodeOperandRuntimeId(int operand_index) {
return BytecodeUnsignedOperand(operand_index, operand_size);
}
-Node* InterpreterAssembler::BytecodeOperandNativeContextIndex(
+TNode<UintPtrT> InterpreterAssembler::BytecodeOperandNativeContextIndex(
int operand_index) {
DCHECK_EQ(OperandType::kNativeContextIndex,
Bytecodes::GetOperandType(bytecode_, operand_index));
@@ -657,7 +666,8 @@ Node* InterpreterAssembler::BytecodeOperandNativeContextIndex(
BytecodeUnsignedOperand(operand_index, operand_size));
}
-Node* InterpreterAssembler::BytecodeOperandIntrinsicId(int operand_index) {
+TNode<Uint32T> InterpreterAssembler::BytecodeOperandIntrinsicId(
+ int operand_index) {
DCHECK_EQ(OperandType::kIntrinsicId,
Bytecodes::GetOperandType(bytecode_, operand_index));
OperandSize operand_size =
@@ -666,7 +676,7 @@ Node* InterpreterAssembler::BytecodeOperandIntrinsicId(int operand_index) {
return BytecodeUnsignedOperand(operand_index, operand_size);
}
-Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) {
+TNode<Object> InterpreterAssembler::LoadConstantPoolEntry(TNode<WordT> index) {
TNode<FixedArray> constant_pool = CAST(LoadObjectField(
BytecodeArrayTaggedPointer(), BytecodeArray::kConstantPoolOffset));
return UnsafeLoadFixedArrayElement(
@@ -674,13 +684,13 @@ Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) {
}
TNode<IntPtrT> InterpreterAssembler::LoadAndUntagConstantPoolEntry(
- Node* index) {
- return SmiUntag(LoadConstantPoolEntry(index));
+ TNode<WordT> index) {
+ return SmiUntag(CAST(LoadConstantPoolEntry(index)));
}
-Node* InterpreterAssembler::LoadConstantPoolEntryAtOperandIndex(
+TNode<Object> InterpreterAssembler::LoadConstantPoolEntryAtOperandIndex(
int operand_index) {
- Node* index =
+ TNode<UintPtrT> index =
BytecodeOperandConstantPoolIdx(operand_index, LoadSensitivity::kSafe);
return LoadConstantPoolEntry(index);
}
@@ -688,7 +698,7 @@ Node* InterpreterAssembler::LoadConstantPoolEntryAtOperandIndex(
TNode<IntPtrT>
InterpreterAssembler::LoadAndUntagConstantPoolEntryAtOperandIndex(
int operand_index) {
- return SmiUntag(LoadConstantPoolEntryAtOperandIndex(operand_index));
+ return SmiUntag(CAST(LoadConstantPoolEntryAtOperandIndex(operand_index)));
}
TNode<HeapObject> InterpreterAssembler::LoadFeedbackVector() {
@@ -713,151 +723,15 @@ void InterpreterAssembler::CallPrologue() {
void InterpreterAssembler::CallEpilogue() {
}
-void InterpreterAssembler::IncrementCallCount(Node* feedback_vector,
- Node* slot_id) {
- Comment("increment call count");
- TNode<Smi> call_count =
- CAST(LoadFeedbackVectorSlot(feedback_vector, slot_id, kTaggedSize));
- // The lowest {FeedbackNexus::CallCountField::kShift} bits of the call
- // count are used as flags. To increment the call count by 1 we hence
- // have to increment by 1 << {FeedbackNexus::CallCountField::kShift}.
- TNode<Smi> new_count = SmiAdd(
- call_count, SmiConstant(1 << FeedbackNexus::CallCountField::kShift));
- // Count is Smi, so we don't need a write barrier.
- StoreFeedbackVectorSlot(feedback_vector, slot_id, new_count,
- SKIP_WRITE_BARRIER, kTaggedSize);
-}
-
-void InterpreterAssembler::CollectCallableFeedback(Node* target, Node* context,
- Node* feedback_vector,
- Node* slot_id) {
- Label extra_checks(this, Label::kDeferred), done(this);
-
- // Check if we have monomorphic {target} feedback already.
- TNode<MaybeObject> feedback =
- LoadFeedbackVectorSlot(feedback_vector, slot_id);
- Comment("check if monomorphic");
- TNode<BoolT> is_monomorphic = IsWeakReferenceTo(feedback, CAST(target));
- GotoIf(is_monomorphic, &done);
-
- // Check if it is a megamorphic {target}.
- Comment("check if megamorphic");
- TNode<BoolT> is_megamorphic = TaggedEqual(
- feedback, HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())));
- Branch(is_megamorphic, &done, &extra_checks);
-
- BIND(&extra_checks);
- {
- Label initialize(this), mark_megamorphic(this);
-
- Comment("check if weak reference");
- TNode<BoolT> is_uninitialized = TaggedEqual(
- feedback,
- HeapConstant(FeedbackVector::UninitializedSentinel(isolate())));
- GotoIf(is_uninitialized, &initialize);
- CSA_ASSERT(this, IsWeakOrCleared(feedback));
-
- // If the weak reference is cleared, we have a new chance to become
- // monomorphic.
- Comment("check if weak reference is cleared");
- Branch(IsCleared(feedback), &initialize, &mark_megamorphic);
-
- BIND(&initialize);
- {
- // Check if {target} is a JSFunction in the current native context.
- Comment("check if function in same native context");
- GotoIf(TaggedIsSmi(target), &mark_megamorphic);
- // Check if the {target} is a JSFunction or JSBoundFunction
- // in the current native context.
- VARIABLE(var_current, MachineRepresentation::kTagged, target);
- Label loop(this, &var_current), done_loop(this);
- Goto(&loop);
- BIND(&loop);
- {
- Label if_boundfunction(this), if_function(this);
- Node* current = var_current.value();
- CSA_ASSERT(this, TaggedIsNotSmi(current));
- TNode<Uint16T> current_instance_type = LoadInstanceType(current);
- GotoIf(InstanceTypeEqual(current_instance_type, JS_BOUND_FUNCTION_TYPE),
- &if_boundfunction);
- Branch(InstanceTypeEqual(current_instance_type, JS_FUNCTION_TYPE),
- &if_function, &mark_megamorphic);
-
- BIND(&if_function);
- {
- // Check that the JSFunction {current} is in the current native
- // context.
- TNode<Context> current_context =
- CAST(LoadObjectField(current, JSFunction::kContextOffset));
- TNode<Context> current_native_context =
- LoadNativeContext(current_context);
- Branch(
- TaggedEqual(LoadNativeContext(context), current_native_context),
- &done_loop, &mark_megamorphic);
- }
-
- BIND(&if_boundfunction);
- {
- // Continue with the [[BoundTargetFunction]] of {target}.
- var_current.Bind(LoadObjectField(
- current, JSBoundFunction::kBoundTargetFunctionOffset));
- Goto(&loop);
- }
- }
- BIND(&done_loop);
- StoreWeakReferenceInFeedbackVector(feedback_vector, slot_id,
- CAST(target));
- ReportFeedbackUpdate(feedback_vector, slot_id, "Call:Initialize");
- Goto(&done);
- }
-
- BIND(&mark_megamorphic);
- {
- // MegamorphicSentinel is an immortal immovable object so
- // write-barrier is not needed.
- Comment("transition to megamorphic");
- DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kmegamorphic_symbol));
- StoreFeedbackVectorSlot(
- feedback_vector, slot_id,
- HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())),
- SKIP_WRITE_BARRIER);
- ReportFeedbackUpdate(feedback_vector, slot_id,
- "Call:TransitionMegamorphic");
- Goto(&done);
- }
- }
-
- BIND(&done);
-}
-
-void InterpreterAssembler::CollectCallFeedback(Node* target, Node* context,
- Node* maybe_feedback_vector,
- Node* slot_id) {
- Label feedback_done(this);
- // If feedback_vector is not valid, then nothing to do.
- GotoIf(IsUndefined(maybe_feedback_vector), &feedback_done);
-
- CSA_SLOW_ASSERT(this, IsFeedbackVector(maybe_feedback_vector));
-
- // Increment the call count.
- IncrementCallCount(maybe_feedback_vector, slot_id);
-
- // Collect the callable {target} feedback.
- CollectCallableFeedback(target, context, maybe_feedback_vector, slot_id);
- Goto(&feedback_done);
-
- BIND(&feedback_done);
-}
-
void InterpreterAssembler::CallJSAndDispatch(
- Node* function, Node* context, const RegListNodePair& args,
+ TNode<Object> function, TNode<Context> context, const RegListNodePair& args,
ConvertReceiverMode receiver_mode) {
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
DCHECK(Bytecodes::IsCallOrConstruct(bytecode_) ||
bytecode_ == Bytecode::kInvokeIntrinsic);
DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), receiver_mode);
- Node* args_count;
+ TNode<Word32T> args_count;
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
// The receiver is implied, so it is not in the argument list.
args_count = args.reg_count();
@@ -879,8 +753,9 @@ void InterpreterAssembler::CallJSAndDispatch(
}
template <class... TArgs>
-void InterpreterAssembler::CallJSAndDispatch(Node* function, Node* context,
- Node* arg_count,
+void InterpreterAssembler::CallJSAndDispatch(TNode<Object> function,
+ TNode<Context> context,
+ TNode<Word32T> arg_count,
ConvertReceiverMode receiver_mode,
TArgs... args) {
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
@@ -892,9 +767,9 @@ void InterpreterAssembler::CallJSAndDispatch(Node* function, Node* context,
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
// The first argument parameter (the receiver) is implied to be undefined.
- TailCallStubThenBytecodeDispatch(
- callable.descriptor(), code_target, context, function, arg_count,
- static_cast<Node*>(UndefinedConstant()), args...);
+ TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target,
+ context, function, arg_count,
+ UndefinedConstant(), args...);
} else {
TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target,
context, function, arg_count, args...);
@@ -906,21 +781,22 @@ void InterpreterAssembler::CallJSAndDispatch(Node* function, Node* context,
// Instantiate CallJSAndDispatch() for argument counts used by interpreter
// generator.
template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
- Node* function, Node* context, Node* arg_count,
+ TNode<Object> function, TNode<Context> context, TNode<Word32T> arg_count,
ConvertReceiverMode receiver_mode);
template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
- Node* function, Node* context, Node* arg_count,
- ConvertReceiverMode receiver_mode, Node*);
+ TNode<Object> function, TNode<Context> context, TNode<Word32T> arg_count,
+ ConvertReceiverMode receiver_mode, TNode<Object>);
template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
- Node* function, Node* context, Node* arg_count,
- ConvertReceiverMode receiver_mode, Node*, Node*);
+ TNode<Object> function, TNode<Context> context, TNode<Word32T> arg_count,
+ ConvertReceiverMode receiver_mode, TNode<Object>, TNode<Object>);
template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
- Node* function, Node* context, Node* arg_count,
- ConvertReceiverMode receiver_mode, Node*, Node*, Node*);
+ TNode<Object> function, TNode<Context> context, TNode<Word32T> arg_count,
+ ConvertReceiverMode receiver_mode, TNode<Object>, TNode<Object>,
+ TNode<Object>);
void InterpreterAssembler::CallJSWithSpreadAndDispatch(
- Node* function, Node* context, const RegListNodePair& args, Node* slot_id,
- Node* maybe_feedback_vector) {
+ TNode<Object> function, TNode<Context> context, const RegListNodePair& args,
+ TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector) {
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), ConvertReceiverMode::kAny);
CollectCallFeedback(function, context, maybe_feedback_vector, slot_id);
@@ -939,16 +815,18 @@ void InterpreterAssembler::CallJSWithSpreadAndDispatch(
accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
}
-Node* InterpreterAssembler::Construct(SloppyTNode<Object> target, Node* context,
- SloppyTNode<Object> new_target,
- const RegListNodePair& args,
- Node* slot_id, Node* feedback_vector) {
+TNode<Object> InterpreterAssembler::Construct(
+ TNode<Object> target, TNode<Context> context, TNode<Object> new_target,
+ const RegListNodePair& args, TNode<UintPtrT> slot_id,
+ TNode<HeapObject> maybe_feedback_vector) {
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
- VARIABLE(var_result, MachineRepresentation::kTagged);
- VARIABLE(var_site, MachineRepresentation::kTagged);
+ TVARIABLE(Object, var_result);
+ TVARIABLE(AllocationSite, var_site);
Label extra_checks(this, Label::kDeferred), return_result(this, &var_result),
construct(this), construct_array(this, &var_site);
- GotoIf(IsUndefined(feedback_vector), &construct);
+ GotoIf(IsUndefined(maybe_feedback_vector), &construct);
+
+ TNode<FeedbackVector> feedback_vector = CAST(maybe_feedback_vector);
// Increment the call count.
IncrementCallCount(feedback_vector, slot_id);
@@ -956,7 +834,8 @@ Node* InterpreterAssembler::Construct(SloppyTNode<Object> target, Node* context,
// Check if we have monomorphic {new_target} feedback already.
TNode<MaybeObject> feedback =
LoadFeedbackVectorSlot(feedback_vector, slot_id);
- Branch(IsWeakReferenceTo(feedback, new_target), &construct, &extra_checks);
+ Branch(IsWeakReferenceToObject(feedback, new_target), &construct,
+ &extra_checks);
BIND(&extra_checks);
{
@@ -989,7 +868,7 @@ Node* InterpreterAssembler::Construct(SloppyTNode<Object> target, Node* context,
LoadNativeContext(context), Context::ARRAY_FUNCTION_INDEX);
GotoIfNot(TaggedEqual(target, array_function), &mark_megamorphic);
GotoIfNot(TaggedEqual(new_target, array_function), &mark_megamorphic);
- var_site.Bind(strong_feedback);
+ var_site = CAST(strong_feedback);
Goto(&construct_array);
}
@@ -1008,14 +887,13 @@ Node* InterpreterAssembler::Construct(SloppyTNode<Object> target, Node* context,
GotoIf(TaggedIsSmi(new_target), &mark_megamorphic);
// Check if the {new_target} is a JSFunction or JSBoundFunction
// in the current native context.
- VARIABLE(var_current, MachineRepresentation::kTagged, new_target);
+ TVARIABLE(HeapObject, var_current, CAST(new_target));
Label loop(this, &var_current), done_loop(this);
Goto(&loop);
BIND(&loop);
{
Label if_boundfunction(this), if_function(this);
- Node* current = var_current.value();
- CSA_ASSERT(this, TaggedIsNotSmi(current));
+ TNode<HeapObject> current = var_current.value();
TNode<Uint16T> current_instance_type = LoadInstanceType(current);
GotoIf(InstanceTypeEqual(current_instance_type, JS_BOUND_FUNCTION_TYPE),
&if_boundfunction);
@@ -1028,7 +906,7 @@ Node* InterpreterAssembler::Construct(SloppyTNode<Object> target, Node* context,
// context.
TNode<Context> current_context =
CAST(LoadObjectField(current, JSFunction::kContextOffset));
- TNode<Context> current_native_context =
+ TNode<NativeContext> current_native_context =
LoadNativeContext(current_context);
Branch(
TaggedEqual(LoadNativeContext(context), current_native_context),
@@ -1038,8 +916,8 @@ Node* InterpreterAssembler::Construct(SloppyTNode<Object> target, Node* context,
BIND(&if_boundfunction);
{
// Continue with the [[BoundTargetFunction]] of {current}.
- var_current.Bind(LoadObjectField(
- current, JSBoundFunction::kBoundTargetFunctionOffset));
+ var_current = LoadObjectField<HeapObject>(
+ current, JSBoundFunction::kBoundTargetFunctionOffset);
Goto(&loop);
}
}
@@ -1056,8 +934,8 @@ Node* InterpreterAssembler::Construct(SloppyTNode<Object> target, Node* context,
BIND(&create_allocation_site);
{
- var_site.Bind(CreateAllocationSiteInFeedbackVector(feedback_vector,
- SmiTag(slot_id)));
+ var_site =
+ CreateAllocationSiteInFeedbackVector(feedback_vector, slot_id);
ReportFeedbackUpdate(feedback_vector, slot_id,
"Construct:CreateAllocationSite");
Goto(&construct_array);
@@ -1097,9 +975,9 @@ Node* InterpreterAssembler::Construct(SloppyTNode<Object> target, Node* context,
Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
isolate(), InterpreterPushArgsMode::kArrayFunction);
TNode<Code> code_target = HeapConstant(callable.code());
- var_result.Bind(CallStub(callable.descriptor(), code_target, context,
- args.reg_count(), args.base_reg_location(), target,
- new_target, var_site.value()));
+ var_result = CallStub(callable.descriptor(), code_target, context,
+ args.reg_count(), args.base_reg_location(), target,
+ new_target, var_site.value());
Goto(&return_result);
}
@@ -1110,9 +988,9 @@ Node* InterpreterAssembler::Construct(SloppyTNode<Object> target, Node* context,
Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
isolate(), InterpreterPushArgsMode::kOther);
TNode<Code> code_target = HeapConstant(callable.code());
- var_result.Bind(CallStub(callable.descriptor(), code_target, context,
- args.reg_count(), args.base_reg_location(), target,
- new_target, UndefinedConstant()));
+ var_result = CallStub(callable.descriptor(), code_target, context,
+ args.reg_count(), args.base_reg_location(), target,
+ new_target, UndefinedConstant());
Goto(&return_result);
}
@@ -1120,17 +998,18 @@ Node* InterpreterAssembler::Construct(SloppyTNode<Object> target, Node* context,
return var_result.value();
}
-Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context,
- Node* new_target,
- const RegListNodePair& args,
- Node* slot_id,
- Node* feedback_vector) {
+TNode<Object> InterpreterAssembler::ConstructWithSpread(
+ TNode<Object> target, TNode<Context> context, TNode<Object> new_target,
+ const RegListNodePair& args, TNode<UintPtrT> slot_id,
+ TNode<HeapObject> maybe_feedback_vector) {
// TODO(bmeurer): Unify this with the Construct bytecode feedback
// above once we have a way to pass the AllocationSite to the Array
// constructor _and_ spread the last argument at the same time.
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
Label extra_checks(this, Label::kDeferred), construct(this);
- GotoIf(IsUndefined(feedback_vector), &construct);
+ GotoIf(IsUndefined(maybe_feedback_vector), &construct);
+
+ TNode<FeedbackVector> feedback_vector = CAST(maybe_feedback_vector);
// Increment the call count.
IncrementCallCount(feedback_vector, slot_id);
@@ -1138,7 +1017,7 @@ Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context,
// Check if we have monomorphic {new_target} feedback already.
TNode<MaybeObject> feedback =
LoadFeedbackVectorSlot(feedback_vector, slot_id);
- Branch(IsWeakReferenceTo(feedback, CAST(new_target)), &construct,
+ Branch(IsWeakReferenceToObject(feedback, new_target), &construct,
&extra_checks);
BIND(&extra_checks);
@@ -1174,14 +1053,13 @@ Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context,
GotoIf(TaggedIsSmi(new_target), &mark_megamorphic);
// Check if the {new_target} is a JSFunction or JSBoundFunction
// in the current native context.
- VARIABLE(var_current, MachineRepresentation::kTagged, new_target);
+ TVARIABLE(HeapObject, var_current, CAST(new_target));
Label loop(this, &var_current), done_loop(this);
Goto(&loop);
BIND(&loop);
{
Label if_boundfunction(this), if_function(this);
- Node* current = var_current.value();
- CSA_ASSERT(this, TaggedIsNotSmi(current));
+ TNode<HeapObject> current = var_current.value();
TNode<Uint16T> current_instance_type = LoadInstanceType(current);
GotoIf(InstanceTypeEqual(current_instance_type, JS_BOUND_FUNCTION_TYPE),
&if_boundfunction);
@@ -1194,7 +1072,7 @@ Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context,
// context.
TNode<Context> current_context =
CAST(LoadObjectField(current, JSFunction::kContextOffset));
- TNode<Context> current_native_context =
+ TNode<NativeContext> current_native_context =
LoadNativeContext(current_context);
Branch(
TaggedEqual(LoadNativeContext(context), current_native_context),
@@ -1204,8 +1082,8 @@ Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context,
BIND(&if_boundfunction);
{
// Continue with the [[BoundTargetFunction]] of {current}.
- var_current.Bind(LoadObjectField(
- current, JSBoundFunction::kBoundTargetFunctionOffset));
+ var_current = LoadObjectField<HeapObject>(
+ current, JSBoundFunction::kBoundTargetFunctionOffset);
Goto(&loop);
}
}
@@ -1243,7 +1121,8 @@ Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context,
UndefinedConstant());
}
-Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context,
+Node* InterpreterAssembler::CallRuntimeN(TNode<Uint32T> function_id,
+ TNode<Context> context,
const RegListNodePair& args,
int result_size) {
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
@@ -1252,22 +1131,22 @@ Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context,
TNode<Code> code_target = HeapConstant(callable.code());
// Get the function entry from the function id.
- Node* function_table = ExternalConstant(
- ExternalReference::runtime_function_table_address(isolate()));
+ TNode<RawPtrT> function_table = ReinterpretCast<RawPtrT>(ExternalConstant(
+ ExternalReference::runtime_function_table_address(isolate())));
TNode<Word32T> function_offset =
Int32Mul(function_id, Int32Constant(sizeof(Runtime::Function)));
TNode<WordT> function =
IntPtrAdd(function_table, ChangeUint32ToWord(function_offset));
- Node* function_entry =
- Load(MachineType::Pointer(), function,
- IntPtrConstant(offsetof(Runtime::Function, entry)));
+ TNode<RawPtrT> function_entry = Load<RawPtrT>(
+ function, IntPtrConstant(offsetof(Runtime::Function, entry)));
return CallStubR(StubCallMode::kCallCodeObject, callable.descriptor(),
result_size, code_target, context, args.reg_count(),
args.base_reg_location(), function_entry);
}
-void InterpreterAssembler::UpdateInterruptBudget(Node* weight, bool backward) {
+void InterpreterAssembler::UpdateInterruptBudget(TNode<Int32T> weight,
+ bool backward) {
Comment("[ UpdateInterruptBudget");
// Assert that the weight is positive (negative weights should be implemented
@@ -1289,7 +1168,7 @@ void InterpreterAssembler::UpdateInterruptBudget(Node* weight, bool backward) {
TVARIABLE(Int32T, new_budget);
if (backward) {
// Update budget by |weight| and check if it reaches zero.
- new_budget = Signed(Int32Sub(budget_after_bytecode, weight));
+ new_budget = Int32Sub(budget_after_bytecode, weight);
TNode<BoolT> condition =
Int32GreaterThanOrEqual(new_budget.value(), Int32Constant(0));
Label ok(this), interrupt_check(this, Label::kDeferred);
@@ -1303,7 +1182,7 @@ void InterpreterAssembler::UpdateInterruptBudget(Node* weight, bool backward) {
} else {
// For a forward jump, we know we only increase the interrupt budget, so
// no need to check if it's below zero.
- new_budget = Signed(Int32Add(budget_after_bytecode, weight));
+ new_budget = Int32Add(budget_after_bytecode, weight);
}
// Update budget.
@@ -1323,7 +1202,7 @@ TNode<IntPtrT> InterpreterAssembler::Advance(int delta) {
return Advance(IntPtrConstant(delta));
}
-TNode<IntPtrT> InterpreterAssembler::Advance(SloppyTNode<IntPtrT> delta,
+TNode<IntPtrT> InterpreterAssembler::Advance(TNode<IntPtrT> delta,
bool backward) {
#ifdef V8_TRACE_IGNITION
TraceBytecode(Runtime::kInterpreterTraceBytecodeExit);
@@ -1334,45 +1213,51 @@ TNode<IntPtrT> InterpreterAssembler::Advance(SloppyTNode<IntPtrT> delta,
return next_offset;
}
-Node* InterpreterAssembler::Jump(Node* delta, bool backward) {
+void InterpreterAssembler::Jump(TNode<IntPtrT> jump_offset, bool backward) {
DCHECK(!Bytecodes::IsStarLookahead(bytecode_, operand_scale_));
- UpdateInterruptBudget(TruncateIntPtrToInt32(delta), backward);
- Node* new_bytecode_offset = Advance(delta, backward);
- TNode<WordT> target_bytecode = LoadBytecode(new_bytecode_offset);
- return DispatchToBytecode(target_bytecode, new_bytecode_offset);
+ UpdateInterruptBudget(TruncateIntPtrToInt32(jump_offset), backward);
+ TNode<IntPtrT> new_bytecode_offset = Advance(jump_offset, backward);
+ TNode<RawPtrT> target_bytecode =
+ UncheckedCast<RawPtrT>(LoadBytecode(new_bytecode_offset));
+ DispatchToBytecode(target_bytecode, new_bytecode_offset);
}
-Node* InterpreterAssembler::Jump(Node* delta) { return Jump(delta, false); }
+void InterpreterAssembler::Jump(TNode<IntPtrT> jump_offset) {
+ Jump(jump_offset, false);
+}
-Node* InterpreterAssembler::JumpBackward(Node* delta) {
- return Jump(delta, true);
+void InterpreterAssembler::JumpBackward(TNode<IntPtrT> jump_offset) {
+ Jump(jump_offset, true);
}
-void InterpreterAssembler::JumpConditional(Node* condition, Node* delta) {
+void InterpreterAssembler::JumpConditional(TNode<BoolT> condition,
+ TNode<IntPtrT> jump_offset) {
Label match(this), no_match(this);
Branch(condition, &match, &no_match);
BIND(&match);
- Jump(delta);
+ Jump(jump_offset);
BIND(&no_match);
Dispatch();
}
void InterpreterAssembler::JumpIfTaggedEqual(TNode<Object> lhs,
- TNode<Object> rhs, Node* delta) {
- JumpConditional(TaggedEqual(lhs, rhs), delta);
+ TNode<Object> rhs,
+ TNode<IntPtrT> jump_offset) {
+ JumpConditional(TaggedEqual(lhs, rhs), jump_offset);
}
void InterpreterAssembler::JumpIfTaggedNotEqual(TNode<Object> lhs,
TNode<Object> rhs,
- Node* delta) {
- JumpConditional(TaggedNotEqual(lhs, rhs), delta);
+ TNode<IntPtrT> jump_offset) {
+ JumpConditional(TaggedNotEqual(lhs, rhs), jump_offset);
}
-TNode<WordT> InterpreterAssembler::LoadBytecode(Node* bytecode_offset) {
- Node* bytecode =
- Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(), bytecode_offset);
+TNode<WordT> InterpreterAssembler::LoadBytecode(
+ TNode<IntPtrT> bytecode_offset) {
+ TNode<Uint8T> bytecode =
+ Load<Uint8T>(BytecodeArrayTaggedPointer(), bytecode_offset);
return ChangeUint32ToWord(bytecode);
}
@@ -1418,51 +1303,39 @@ void InterpreterAssembler::InlineStar() {
accumulator_use_ = previous_acc_use;
}
-Node* InterpreterAssembler::Dispatch() {
+void InterpreterAssembler::Dispatch() {
Comment("========= Dispatch");
DCHECK_IMPLIES(Bytecodes::MakesCallAlongCriticalPath(bytecode_), made_call_);
- Node* target_offset = Advance();
+ TNode<IntPtrT> target_offset = Advance();
TNode<WordT> target_bytecode = LoadBytecode(target_offset);
if (Bytecodes::IsStarLookahead(bytecode_, operand_scale_)) {
target_bytecode = StarDispatchLookahead(target_bytecode);
}
- return DispatchToBytecode(target_bytecode, BytecodeOffset());
+ DispatchToBytecode(target_bytecode, BytecodeOffset());
}
-Node* InterpreterAssembler::DispatchToBytecode(Node* target_bytecode,
- Node* new_bytecode_offset) {
+void InterpreterAssembler::DispatchToBytecode(
+ TNode<WordT> target_bytecode, TNode<IntPtrT> new_bytecode_offset) {
if (FLAG_trace_ignition_dispatches) {
TraceBytecodeDispatch(target_bytecode);
}
- Node* target_code_entry =
- Load(MachineType::Pointer(), DispatchTableRawPointer(),
- TimesSystemPointerSize(target_bytecode));
-
- return DispatchToBytecodeHandlerEntry(target_code_entry, new_bytecode_offset,
- target_bytecode);
-}
+ TNode<RawPtrT> target_code_entry = Load<RawPtrT>(
+ DispatchTablePointer(), TimesSystemPointerSize(target_bytecode));
-Node* InterpreterAssembler::DispatchToBytecodeHandler(Node* handler,
- Node* bytecode_offset,
- Node* target_bytecode) {
- // TODO(ishell): Add CSA::CodeEntryPoint(code).
- TNode<IntPtrT> handler_entry =
- IntPtrAdd(BitcastTaggedToWord(handler),
- IntPtrConstant(Code::kHeaderSize - kHeapObjectTag));
- return DispatchToBytecodeHandlerEntry(handler_entry, bytecode_offset,
- target_bytecode);
+ DispatchToBytecodeHandlerEntry(target_code_entry, new_bytecode_offset);
}
-Node* InterpreterAssembler::DispatchToBytecodeHandlerEntry(
- Node* handler_entry, Node* bytecode_offset, Node* target_bytecode) {
+void InterpreterAssembler::DispatchToBytecodeHandlerEntry(
+ TNode<RawPtrT> handler_entry, TNode<IntPtrT> bytecode_offset) {
// Propagate speculation poisoning.
- TNode<WordT> poisoned_handler_entry = WordPoisonOnSpeculation(handler_entry);
- return TailCallBytecodeDispatch(
- InterpreterDispatchDescriptor{}, poisoned_handler_entry,
- GetAccumulatorUnchecked(), bytecode_offset, BytecodeArrayTaggedPointer(),
- DispatchTableRawPointer());
+ TNode<RawPtrT> poisoned_handler_entry =
+ UncheckedCast<RawPtrT>(WordPoisonOnSpeculation(handler_entry));
+ TailCallBytecodeDispatch(InterpreterDispatchDescriptor{},
+ poisoned_handler_entry, GetAccumulatorUnchecked(),
+ bytecode_offset, BytecodeArrayTaggedPointer(),
+ DispatchTablePointer());
}
void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
@@ -1474,14 +1347,14 @@ void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
// Indices 256-511 correspond to bytecodes with operand_scale == 1
// Indices 512-767 correspond to bytecodes with operand_scale == 2
DCHECK_IMPLIES(Bytecodes::MakesCallAlongCriticalPath(bytecode_), made_call_);
- Node* next_bytecode_offset = Advance(1);
+ TNode<IntPtrT> next_bytecode_offset = Advance(1);
TNode<WordT> next_bytecode = LoadBytecode(next_bytecode_offset);
if (FLAG_trace_ignition_dispatches) {
TraceBytecodeDispatch(next_bytecode);
}
- Node* base_index;
+ TNode<IntPtrT> base_index;
switch (operand_scale) {
case OperandScale::kDouble:
base_index = IntPtrConstant(1 << kBitsPerByte);
@@ -1493,12 +1366,10 @@ void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
UNREACHABLE();
}
TNode<WordT> target_index = IntPtrAdd(base_index, next_bytecode);
- Node* target_code_entry =
- Load(MachineType::Pointer(), DispatchTableRawPointer(),
- TimesSystemPointerSize(target_index));
+ TNode<RawPtrT> target_code_entry = Load<RawPtrT>(
+ DispatchTablePointer(), TimesSystemPointerSize(target_index));
- DispatchToBytecodeHandlerEntry(target_code_entry, next_bytecode_offset,
- next_bytecode);
+ DispatchToBytecodeHandlerEntry(target_code_entry, next_bytecode_offset);
}
void InterpreterAssembler::UpdateInterruptBudgetOnReturn() {
@@ -1527,10 +1398,9 @@ void InterpreterAssembler::UpdateInterruptBudgetOnReturn() {
UpdateInterruptBudget(profiling_weight, true);
}
-Node* InterpreterAssembler::LoadOsrNestingLevel() {
- return LoadObjectField(BytecodeArrayTaggedPointer(),
- BytecodeArray::kOsrNestingLevelOffset,
- MachineType::Int8());
+TNode<Int8T> InterpreterAssembler::LoadOsrNestingLevel() {
+ return LoadObjectField<Int8T>(BytecodeArrayTaggedPointer(),
+ BytecodeArray::kOsrNestingLevelOffset);
}
void InterpreterAssembler::Abort(AbortReason abort_reason) {
@@ -1551,7 +1421,7 @@ void InterpreterAssembler::AbortIfWordNotEqual(TNode<WordT> lhs,
BIND(&ok);
}
-void InterpreterAssembler::MaybeDropFrames(Node* context) {
+void InterpreterAssembler::MaybeDropFrames(TNode<Context> context) {
TNode<ExternalReference> restart_fp_address =
ExternalConstant(ExternalReference::debug_restart_fp_address(isolate()));
@@ -1576,7 +1446,7 @@ void InterpreterAssembler::TraceBytecode(Runtime::FunctionId function_id) {
SmiTag(BytecodeOffset()), GetAccumulatorUnchecked());
}
-void InterpreterAssembler::TraceBytecodeDispatch(Node* target_bytecode) {
+void InterpreterAssembler::TraceBytecodeDispatch(TNode<WordT> target_bytecode) {
TNode<ExternalReference> counters_table = ExternalConstant(
ExternalReference::interpreter_dispatch_counters(isolate()));
TNode<IntPtrT> source_bytecode_table_index = IntPtrConstant(
@@ -1616,8 +1486,8 @@ bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
}
void InterpreterAssembler::AbortIfRegisterCountInvalid(
- Node* parameters_and_registers, Node* formal_parameter_count,
- Node* register_count) {
+ TNode<FixedArrayBase> parameters_and_registers,
+ TNode<IntPtrT> formal_parameter_count, TNode<UintPtrT> register_count) {
TNode<IntPtrT> array_size =
LoadAndUntagFixedArrayBaseLength(parameters_and_registers);
@@ -1633,13 +1503,13 @@ void InterpreterAssembler::AbortIfRegisterCountInvalid(
BIND(&ok);
}
-Node* InterpreterAssembler::ExportParametersAndRegisterFile(
+TNode<FixedArray> InterpreterAssembler::ExportParametersAndRegisterFile(
TNode<FixedArray> array, const RegListNodePair& registers,
TNode<Int32T> formal_parameter_count) {
// Store the formal parameters (without receiver) followed by the
// registers into the generator's internal parameters_and_registers field.
TNode<IntPtrT> formal_parameter_count_intptr =
- ChangeInt32ToIntPtr(formal_parameter_count);
+ Signed(ChangeUint32ToWord(formal_parameter_count));
TNode<UintPtrT> register_count = ChangeUint32ToWord(registers.reg_count());
if (FLAG_debug_code) {
CSA_ASSERT(this, IntPtrEqual(registers.base_reg_location(),
@@ -1649,8 +1519,8 @@ Node* InterpreterAssembler::ExportParametersAndRegisterFile(
}
{
- Variable var_index(this, MachineType::PointerRepresentation());
- var_index.Bind(IntPtrConstant(0));
+ TVARIABLE(IntPtrT, var_index);
+ var_index = IntPtrConstant(0);
// Iterate over parameters and write them into the array.
Label loop(this, &var_index), done_loop(this);
@@ -1662,16 +1532,16 @@ Node* InterpreterAssembler::ExportParametersAndRegisterFile(
Goto(&loop);
BIND(&loop);
{
- Node* index = var_index.value();
+ TNode<IntPtrT> index = var_index.value();
GotoIfNot(UintPtrLessThan(index, formal_parameter_count_intptr),
&done_loop);
- TNode<WordT> reg_index = IntPtrSub(reg_base, index);
+ TNode<IntPtrT> reg_index = IntPtrSub(reg_base, index);
TNode<Object> value = LoadRegister(reg_index);
StoreFixedArrayElement(array, index, value);
- var_index.Bind(IntPtrAdd(index, IntPtrConstant(1)));
+ var_index = IntPtrAdd(index, IntPtrConstant(1));
Goto(&loop);
}
BIND(&done_loop);
@@ -1681,25 +1551,25 @@ Node* InterpreterAssembler::ExportParametersAndRegisterFile(
// Iterate over register file and write values into array.
// The mapping of register to array index must match that used in
// BytecodeGraphBuilder::VisitResumeGenerator.
- Variable var_index(this, MachineType::PointerRepresentation());
- var_index.Bind(IntPtrConstant(0));
+ TVARIABLE(IntPtrT, var_index);
+ var_index = IntPtrConstant(0);
Label loop(this, &var_index), done_loop(this);
Goto(&loop);
BIND(&loop);
{
- Node* index = var_index.value();
+ TNode<IntPtrT> index = var_index.value();
GotoIfNot(UintPtrLessThan(index, register_count), &done_loop);
- TNode<WordT> reg_index =
+ TNode<IntPtrT> reg_index =
IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index);
TNode<Object> value = LoadRegister(reg_index);
- TNode<WordT> array_index =
+ TNode<IntPtrT> array_index =
IntPtrAdd(formal_parameter_count_intptr, index);
StoreFixedArrayElement(array, array_index, value);
- var_index.Bind(IntPtrAdd(index, IntPtrConstant(1)));
+ var_index = IntPtrAdd(index, IntPtrConstant(1));
Goto(&loop);
}
BIND(&done_loop);
@@ -1708,11 +1578,11 @@ Node* InterpreterAssembler::ExportParametersAndRegisterFile(
return array;
}
-Node* InterpreterAssembler::ImportRegisterFile(
+TNode<FixedArray> InterpreterAssembler::ImportRegisterFile(
TNode<FixedArray> array, const RegListNodePair& registers,
TNode<Int32T> formal_parameter_count) {
TNode<IntPtrT> formal_parameter_count_intptr =
- ChangeInt32ToIntPtr(formal_parameter_count);
+ Signed(ChangeUint32ToWord(formal_parameter_count));
TNode<UintPtrT> register_count = ChangeUint32ToWord(registers.reg_count());
if (FLAG_debug_code) {
CSA_ASSERT(this, IntPtrEqual(registers.base_reg_location(),
@@ -1758,8 +1628,8 @@ void InterpreterAssembler::ToNumberOrNumeric(Object::Conversion mode) {
TNode<Object> object = GetAccumulator();
TNode<Context> context = GetContext();
- Variable var_type_feedback(this, MachineRepresentation::kTaggedSigned);
- Variable var_result(this, MachineRepresentation::kTagged);
+ TVARIABLE(Smi, var_type_feedback);
+ TVARIABLE(Numeric, var_result);
Label if_done(this), if_objectissmi(this), if_objectisheapnumber(this),
if_objectisother(this, Label::kDeferred);
@@ -1768,15 +1638,15 @@ void InterpreterAssembler::ToNumberOrNumeric(Object::Conversion mode) {
BIND(&if_objectissmi);
{
- var_result.Bind(object);
- var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kSignedSmall));
+ var_result = CAST(object);
+ var_type_feedback = SmiConstant(BinaryOperationFeedback::kSignedSmall);
Goto(&if_done);
}
BIND(&if_objectisheapnumber);
{
- var_result.Bind(object);
- var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kNumber));
+ var_result = CAST(object);
+ var_type_feedback = SmiConstant(BinaryOperationFeedback::kNumber);
Goto(&if_done);
}
@@ -1789,23 +1659,23 @@ void InterpreterAssembler::ToNumberOrNumeric(Object::Conversion mode) {
Label not_bigint(this);
GotoIfNot(IsBigInt(CAST(object)), &not_bigint);
{
- var_result.Bind(object);
- var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kBigInt));
+ var_result = CAST(object);
+ var_type_feedback = SmiConstant(BinaryOperationFeedback::kBigInt);
Goto(&if_done);
}
BIND(&not_bigint);
}
// Convert {object} by calling out to the appropriate builtin.
- var_result.Bind(CallBuiltin(builtin, context, object));
- var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kAny));
+ var_result = CAST(CallBuiltin(builtin, context, object));
+ var_type_feedback = SmiConstant(BinaryOperationFeedback::kAny);
Goto(&if_done);
}
BIND(&if_done);
// Record the type feedback collected for {object}.
- Node* slot_index = BytecodeOperandIdx(0);
+ TNode<UintPtrT> slot_index = BytecodeOperandIdx(0);
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_index);
diff --git a/deps/v8/src/interpreter/interpreter-assembler.h b/deps/v8/src/interpreter/interpreter-assembler.h
index 33fa987595..4a1882b82c 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.h
+++ b/deps/v8/src/interpreter/interpreter-assembler.h
@@ -25,64 +25,62 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// Returns the 32-bit unsigned count immediate for bytecode operand
// |operand_index| in the current bytecode.
- compiler::TNode<Uint32T> BytecodeOperandCount(int operand_index);
+ TNode<Uint32T> BytecodeOperandCount(int operand_index);
// Returns the 32-bit unsigned flag for bytecode operand |operand_index|
// in the current bytecode.
- compiler::Node* BytecodeOperandFlag(int operand_index);
+ TNode<Uint32T> BytecodeOperandFlag(int operand_index);
// Returns the 32-bit zero-extended index immediate for bytecode operand
// |operand_index| in the current bytecode.
- compiler::Node* BytecodeOperandIdxInt32(int operand_index);
+ TNode<Uint32T> BytecodeOperandIdxInt32(int operand_index);
// Returns the word zero-extended index immediate for bytecode operand
// |operand_index| in the current bytecode.
- compiler::Node* BytecodeOperandIdx(int operand_index);
+ TNode<UintPtrT> BytecodeOperandIdx(int operand_index);
// Returns the smi index immediate for bytecode operand |operand_index|
// in the current bytecode.
- compiler::Node* BytecodeOperandIdxSmi(int operand_index);
+ TNode<Smi> BytecodeOperandIdxSmi(int operand_index);
// Returns the 32-bit unsigned immediate for bytecode operand |operand_index|
// in the current bytecode.
- compiler::TNode<Uint32T> BytecodeOperandUImm(int operand_index);
+ TNode<Uint32T> BytecodeOperandUImm(int operand_index);
// Returns the word-size unsigned immediate for bytecode operand
// |operand_index| in the current bytecode.
- compiler::Node* BytecodeOperandUImmWord(int operand_index);
+ TNode<UintPtrT> BytecodeOperandUImmWord(int operand_index);
// Returns the unsigned smi immediate for bytecode operand |operand_index| in
// the current bytecode.
- compiler::Node* BytecodeOperandUImmSmi(int operand_index);
+ TNode<Smi> BytecodeOperandUImmSmi(int operand_index);
// Returns the 32-bit signed immediate for bytecode operand |operand_index|
// in the current bytecode.
- compiler::Node* BytecodeOperandImm(int operand_index);
+ TNode<Int32T> BytecodeOperandImm(int operand_index);
// Returns the word-size signed immediate for bytecode operand |operand_index|
// in the current bytecode.
- compiler::Node* BytecodeOperandImmIntPtr(int operand_index);
+ TNode<IntPtrT> BytecodeOperandImmIntPtr(int operand_index);
// Returns the smi immediate for bytecode operand |operand_index| in the
// current bytecode.
- compiler::Node* BytecodeOperandImmSmi(int operand_index);
+ TNode<Smi> BytecodeOperandImmSmi(int operand_index);
// Returns the 32-bit unsigned runtime id immediate for bytecode operand
// |operand_index| in the current bytecode.
- compiler::Node* BytecodeOperandRuntimeId(int operand_index);
- // Returns the 32-bit unsigned native context index immediate for bytecode
+ TNode<Uint32T> BytecodeOperandRuntimeId(int operand_index);
+ // Returns the word zero-extended native context index immediate for bytecode
// operand |operand_index| in the current bytecode.
- compiler::Node* BytecodeOperandNativeContextIndex(int operand_index);
+ TNode<UintPtrT> BytecodeOperandNativeContextIndex(int operand_index);
// Returns the 32-bit unsigned intrinsic id immediate for bytecode operand
// |operand_index| in the current bytecode.
- compiler::Node* BytecodeOperandIntrinsicId(int operand_index);
-
+ TNode<Uint32T> BytecodeOperandIntrinsicId(int operand_index);
// Accumulator.
- compiler::TNode<Object> GetAccumulator();
- void SetAccumulator(compiler::Node* value);
+ TNode<Object> GetAccumulator();
+ void SetAccumulator(SloppyTNode<Object> value);
// Context.
- compiler::TNode<Context> GetContext();
- void SetContext(compiler::TNode<Context> value);
+ TNode<Context> GetContext();
+ void SetContext(TNode<Context> value);
// Context at |depth| in the context chain starting at |context|.
- compiler::Node* GetContextAtDepth(compiler::TNode<Context> context,
- compiler::TNode<Uint32T> depth);
+ TNode<Context> GetContextAtDepth(TNode<Context> context,
+ TNode<Uint32T> depth);
// Goto the given |target| if the context chain starting at |context| has any
// extensions up to the given |depth|.
- void GotoIfHasContextExtensionUpToDepth(compiler::TNode<Context> context,
- compiler::TNode<Uint32T> depth,
- Label* target);
+ void GotoIfHasContextExtensionUpToDepth(TNode<Context> context,
+ TNode<Uint32T> depth, Label* target);
// A RegListNodePair provides an abstraction over lists of registers.
class RegListNodePair {
@@ -90,14 +88,12 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
RegListNodePair(TNode<IntPtrT> base_reg_location, TNode<Word32T> reg_count)
: base_reg_location_(base_reg_location), reg_count_(reg_count) {}
- compiler::TNode<Word32T> reg_count() const { return reg_count_; }
- compiler::TNode<IntPtrT> base_reg_location() const {
- return base_reg_location_;
- }
+ TNode<Word32T> reg_count() const { return reg_count_; }
+ TNode<IntPtrT> base_reg_location() const { return base_reg_location_; }
private:
- compiler::TNode<IntPtrT> base_reg_location_;
- compiler::TNode<Word32T> reg_count_;
+ TNode<IntPtrT> base_reg_location_;
+ TNode<Word32T> reg_count_;
};
// Backup/restore register file to/from a fixed array of the correct length.
@@ -105,72 +101,53 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// - Suspend copies arguments and registers to the generator.
// - Resume copies only the registers from the generator, the arguments
// are copied by the ResumeGenerator trampoline.
- compiler::Node* ExportParametersAndRegisterFile(
+ TNode<FixedArray> ExportParametersAndRegisterFile(
TNode<FixedArray> array, const RegListNodePair& registers,
TNode<Int32T> formal_parameter_count);
- compiler::Node* ImportRegisterFile(TNode<FixedArray> array,
- const RegListNodePair& registers,
- TNode<Int32T> formal_parameter_count);
+ TNode<FixedArray> ImportRegisterFile(TNode<FixedArray> array,
+ const RegListNodePair& registers,
+ TNode<Int32T> formal_parameter_count);
// Loads from and stores to the interpreter register file.
- compiler::TNode<Object> LoadRegister(Register reg);
- compiler::TNode<IntPtrT> LoadAndUntagRegister(Register reg);
- compiler::TNode<Object> LoadRegisterAtOperandIndex(int operand_index);
- std::pair<compiler::TNode<Object>, compiler::TNode<Object>>
- LoadRegisterPairAtOperandIndex(int operand_index);
- void StoreRegister(compiler::Node* value, Register reg);
- void StoreRegisterAtOperandIndex(compiler::Node* value, int operand_index);
- void StoreRegisterPairAtOperandIndex(compiler::Node* value1,
- compiler::Node* value2,
- int operand_index);
- void StoreRegisterTripleAtOperandIndex(compiler::Node* value1,
- compiler::Node* value2,
- compiler::Node* value3,
+ TNode<Object> LoadRegister(Register reg);
+ TNode<IntPtrT> LoadAndUntagRegister(Register reg);
+ TNode<Object> LoadRegisterAtOperandIndex(int operand_index);
+ std::pair<TNode<Object>, TNode<Object>> LoadRegisterPairAtOperandIndex(
+ int operand_index);
+ void StoreRegister(TNode<Object> value, Register reg);
+ void StoreRegisterAtOperandIndex(TNode<Object> value, int operand_index);
+ void StoreRegisterPairAtOperandIndex(TNode<Object> value1,
+ TNode<Object> value2, int operand_index);
+ void StoreRegisterTripleAtOperandIndex(TNode<Object> value1,
+ TNode<Object> value2,
+ TNode<Object> value3,
int operand_index);
RegListNodePair GetRegisterListAtOperandIndex(int operand_index);
- Node* LoadRegisterFromRegisterList(const RegListNodePair& reg_list,
- int index);
+ TNode<Object> LoadRegisterFromRegisterList(const RegListNodePair& reg_list,
+ int index);
TNode<IntPtrT> RegisterLocationInRegisterList(const RegListNodePair& reg_list,
int index);
// Load constant at the index specified in operand |operand_index| from the
// constant pool.
- compiler::Node* LoadConstantPoolEntryAtOperandIndex(int operand_index);
+ TNode<Object> LoadConstantPoolEntryAtOperandIndex(int operand_index);
// Load and untag constant at the index specified in operand |operand_index|
// from the constant pool.
TNode<IntPtrT> LoadAndUntagConstantPoolEntryAtOperandIndex(int operand_index);
// Load constant at |index| in the constant pool.
- compiler::Node* LoadConstantPoolEntry(compiler::Node* index);
+ TNode<Object> LoadConstantPoolEntry(TNode<WordT> index);
// Load and untag constant at |index| in the constant pool.
- TNode<IntPtrT> LoadAndUntagConstantPoolEntry(compiler::Node* index);
+ TNode<IntPtrT> LoadAndUntagConstantPoolEntry(TNode<WordT> index);
// Load the FeedbackVector for the current function. The retuned node could be
// undefined.
- compiler::TNode<HeapObject> LoadFeedbackVector();
-
- // Increment the call count for a CALL_IC or construct call.
- // The call count is located at feedback_vector[slot_id + 1].
- void IncrementCallCount(compiler::Node* feedback_vector,
- compiler::Node* slot_id);
-
- // Collect the callable |target| feedback for either a CALL_IC or
- // an INSTANCEOF_IC in the |feedback_vector| at |slot_id|.
- void CollectCallableFeedback(compiler::Node* target, compiler::Node* context,
- compiler::Node* feedback_vector,
- compiler::Node* slot_id);
-
- // Collect CALL_IC feedback for |target| function in the
- // |feedback_vector| at |slot_id|, and the call counts in
- // the |feedback_vector| at |slot_id+1|.
- void CollectCallFeedback(compiler::Node* target, compiler::Node* context,
- compiler::Node* maybe_feedback_vector,
- compiler::Node* slot_id);
+ TNode<HeapObject> LoadFeedbackVector();
// Call JSFunction or Callable |function| with |args| arguments, possibly
// including the receiver depending on |receiver_mode|. After the call returns
// directly dispatches to the next bytecode.
- void CallJSAndDispatch(compiler::Node* function, compiler::Node* context,
+ void CallJSAndDispatch(TNode<Object> function, TNode<Context> context,
const RegListNodePair& args,
ConvertReceiverMode receiver_mode);
@@ -179,93 +156,89 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// depending on |receiver_mode|. After the call returns directly dispatches to
// the next bytecode.
template <class... TArgs>
- void CallJSAndDispatch(Node* function, Node* context, Node* arg_count,
+ void CallJSAndDispatch(TNode<Object> function, TNode<Context> context,
+ TNode<Word32T> arg_count,
ConvertReceiverMode receiver_mode, TArgs... args);
// Call JSFunction or Callable |function| with |args|
// arguments (not including receiver), and the final argument being spread.
// After the call returns directly dispatches to the next bytecode.
- void CallJSWithSpreadAndDispatch(compiler::Node* function,
- compiler::Node* context,
+ void CallJSWithSpreadAndDispatch(TNode<Object> function,
+ TNode<Context> context,
const RegListNodePair& args,
- compiler::Node* slot_id,
- compiler::Node* feedback_vector);
+ TNode<UintPtrT> slot_id,
+ TNode<HeapObject> maybe_feedback_vector);
// Call constructor |target| with |args| arguments (not including receiver).
// The |new_target| is the same as the |target| for the new keyword, but
// differs for the super keyword.
- compiler::Node* Construct(compiler::SloppyTNode<Object> target,
- compiler::Node* context,
- compiler::SloppyTNode<Object> new_target,
- const RegListNodePair& args,
- compiler::Node* slot_id,
- compiler::Node* feedback_vector);
+ TNode<Object> Construct(TNode<Object> target, TNode<Context> context,
+ TNode<Object> new_target, const RegListNodePair& args,
+ TNode<UintPtrT> slot_id,
+ TNode<HeapObject> maybe_feedback_vector);
// Call constructor |target| with |args| arguments (not including
// receiver). The last argument is always a spread. The |new_target| is the
// same as the |target| for the new keyword, but differs for the super
// keyword.
- compiler::Node* ConstructWithSpread(compiler::Node* target,
- compiler::Node* context,
- compiler::Node* new_target,
- const RegListNodePair& args,
- compiler::Node* slot_id,
- compiler::Node* feedback_vector);
+ TNode<Object> ConstructWithSpread(TNode<Object> target,
+ TNode<Context> context,
+ TNode<Object> new_target,
+ const RegListNodePair& args,
+ TNode<UintPtrT> slot_id,
+ TNode<HeapObject> maybe_feedback_vector);
// Call runtime function with |args| arguments which will return |return_size|
// number of values.
- compiler::Node* CallRuntimeN(compiler::Node* function_id,
- compiler::Node* context,
+ compiler::Node* CallRuntimeN(TNode<Uint32T> function_id,
+ TNode<Context> context,
const RegListNodePair& args,
int return_size = 1);
// Jump forward relative to the current bytecode by the |jump_offset|.
- compiler::Node* Jump(compiler::Node* jump_offset);
+ void Jump(TNode<IntPtrT> jump_offset);
// Jump backward relative to the current bytecode by the |jump_offset|.
- compiler::Node* JumpBackward(compiler::Node* jump_offset);
+ void JumpBackward(TNode<IntPtrT> jump_offset);
// Jump forward relative to the current bytecode by |jump_offset| if the
// word values |lhs| and |rhs| are equal.
- void JumpIfTaggedEqual(compiler::TNode<Object> lhs,
- compiler::TNode<Object> rhs,
- compiler::Node* jump_offset);
+ void JumpIfTaggedEqual(TNode<Object> lhs, TNode<Object> rhs,
+ TNode<IntPtrT> jump_offset);
// Jump forward relative to the current bytecode by |jump_offset| if the
// word values |lhs| and |rhs| are not equal.
- void JumpIfTaggedNotEqual(compiler::TNode<Object> lhs,
- compiler::TNode<Object> rhs,
- compiler::Node* jump_offset);
+ void JumpIfTaggedNotEqual(TNode<Object> lhs, TNode<Object> rhs,
+ TNode<IntPtrT> jump_offset);
// Updates the profiler interrupt budget for a return.
void UpdateInterruptBudgetOnReturn();
// Returns the OSR nesting level from the bytecode header.
- compiler::Node* LoadOsrNestingLevel();
+ TNode<Int8T> LoadOsrNestingLevel();
// Dispatch to the bytecode.
- compiler::Node* Dispatch();
+ void Dispatch();
// Dispatch bytecode as wide operand variant.
void DispatchWide(OperandScale operand_scale);
// Dispatch to |target_bytecode| at |new_bytecode_offset|.
// |target_bytecode| should be equivalent to loading from the offset.
- compiler::Node* DispatchToBytecode(compiler::Node* target_bytecode,
- compiler::Node* new_bytecode_offset);
+ void DispatchToBytecode(TNode<WordT> target_bytecode,
+ TNode<IntPtrT> new_bytecode_offset);
// Abort with the given abort reason.
void Abort(AbortReason abort_reason);
- void AbortIfWordNotEqual(compiler::TNode<WordT> lhs,
- compiler::TNode<WordT> rhs,
+ void AbortIfWordNotEqual(TNode<WordT> lhs, TNode<WordT> rhs,
AbortReason abort_reason);
// Abort if |register_count| is invalid for given register file array.
- void AbortIfRegisterCountInvalid(compiler::Node* parameters_and_registers,
- compiler::Node* formal_parameter_count,
- compiler::Node* register_count);
+ void AbortIfRegisterCountInvalid(
+ TNode<FixedArrayBase> parameters_and_registers,
+ TNode<IntPtrT> formal_parameter_count, TNode<UintPtrT> register_count);
// Dispatch to frame dropper trampoline if necessary.
- void MaybeDropFrames(compiler::Node* context);
+ void MaybeDropFrames(TNode<Context> context);
// Returns the offset from the BytecodeArrayPointer of the current bytecode.
TNode<IntPtrT> BytecodeOffset();
@@ -277,27 +250,27 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
void ToNumberOrNumeric(Object::Conversion mode);
private:
- // Returns a tagged pointer to the current function's BytecodeArray object.
- compiler::Node* BytecodeArrayTaggedPointer();
+ // Returns a pointer to the current function's BytecodeArray object.
+ TNode<BytecodeArray> BytecodeArrayTaggedPointer();
- // Returns a raw pointer to first entry in the interpreter dispatch table.
- compiler::Node* DispatchTableRawPointer();
+ // Returns a pointer to first entry in the interpreter dispatch table.
+ TNode<ExternalReference> DispatchTablePointer();
// Returns the accumulator value without checking whether bytecode
// uses it. This is intended to be used only in dispatch and in
// tracing as these need to bypass accumulator use validity checks.
- compiler::Node* GetAccumulatorUnchecked();
+ TNode<Object> GetAccumulatorUnchecked();
// Returns the frame pointer for the interpreted frame of the function being
// interpreted.
TNode<RawPtrT> GetInterpretedFramePointer();
// Operations on registers.
- compiler::TNode<IntPtrT> RegisterLocation(Register reg);
- compiler::TNode<IntPtrT> RegisterLocation(compiler::Node* reg_index);
- compiler::TNode<IntPtrT> NextRegister(compiler::Node* reg_index);
- compiler::TNode<Object> LoadRegister(Node* reg_index);
- void StoreRegister(compiler::Node* value, compiler::Node* reg_index);
+ TNode<IntPtrT> RegisterLocation(Register reg);
+ TNode<IntPtrT> RegisterLocation(TNode<IntPtrT> reg_index);
+ TNode<IntPtrT> NextRegister(TNode<IntPtrT> reg_index);
+ TNode<Object> LoadRegister(TNode<IntPtrT> reg_index);
+ void StoreRegister(TNode<Object> value, TNode<IntPtrT> reg_index);
// Saves and restores interpreter bytecode offset to the interpreter stack
// frame when performing a call.
@@ -305,7 +278,7 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
void CallEpilogue();
// Increment the dispatch counter for the (current, next) bytecode pair.
- void TraceBytecodeDispatch(compiler::Node* target_index);
+ void TraceBytecodeDispatch(TNode<WordT> target_bytecode);
// Traces the current bytecode by calling |function_id|.
void TraceBytecode(Runtime::FunctionId function_id);
@@ -313,74 +286,74 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// Updates the bytecode array's interrupt budget by a 32-bit unsigned |weight|
// and calls Runtime::kInterrupt if counter reaches zero. If |backward|, then
// the interrupt budget is decremented, otherwise it is incremented.
- void UpdateInterruptBudget(compiler::Node* weight, bool backward);
+ void UpdateInterruptBudget(TNode<Int32T> weight, bool backward);
// Returns the offset of register |index| relative to RegisterFilePointer().
- compiler::TNode<IntPtrT> RegisterFrameOffset(compiler::Node* index);
+ TNode<IntPtrT> RegisterFrameOffset(TNode<IntPtrT> index);
// Returns the offset of an operand relative to the current bytecode offset.
- compiler::Node* OperandOffset(int operand_index);
+ TNode<IntPtrT> OperandOffset(int operand_index);
// Returns a value built from an sequence of bytes in the bytecode
// array starting at |relative_offset| from the current bytecode.
// The |result_type| determines the size and signedness. of the
// value read. This method should only be used on architectures that
// do not support unaligned memory accesses.
- compiler::TNode<Word32T> BytecodeOperandReadUnaligned(
+ TNode<Word32T> BytecodeOperandReadUnaligned(
int relative_offset, MachineType result_type,
LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
// Returns zero- or sign-extended to word32 value of the operand.
- compiler::TNode<Uint8T> BytecodeOperandUnsignedByte(
+ TNode<Uint8T> BytecodeOperandUnsignedByte(
int operand_index,
LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
- compiler::TNode<Int8T> BytecodeOperandSignedByte(
+ TNode<Int8T> BytecodeOperandSignedByte(
int operand_index,
LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
- compiler::TNode<Uint16T> BytecodeOperandUnsignedShort(
+ TNode<Uint16T> BytecodeOperandUnsignedShort(
int operand_index,
LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
- compiler::TNode<Int16T> BytecodeOperandSignedShort(
+ TNode<Int16T> BytecodeOperandSignedShort(
int operand_index,
LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
- compiler::TNode<Uint32T> BytecodeOperandUnsignedQuad(
+ TNode<Uint32T> BytecodeOperandUnsignedQuad(
int operand_index,
LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
- compiler::TNode<Int32T> BytecodeOperandSignedQuad(
+ TNode<Int32T> BytecodeOperandSignedQuad(
int operand_index,
LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
// Returns zero- or sign-extended to word32 value of the operand of
// given size.
- compiler::TNode<Int32T> BytecodeSignedOperand(
+ TNode<Int32T> BytecodeSignedOperand(
int operand_index, OperandSize operand_size,
LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
- compiler::TNode<Uint32T> BytecodeUnsignedOperand(
+ TNode<Uint32T> BytecodeUnsignedOperand(
int operand_index, OperandSize operand_size,
LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
// Returns the word-size sign-extended register index for bytecode operand
// |operand_index| in the current bytecode. Value is not poisoned on
// speculation since the value loaded from the register is poisoned instead.
- compiler::Node* BytecodeOperandReg(
+ TNode<IntPtrT> BytecodeOperandReg(
int operand_index,
LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
// Returns the word zero-extended index immediate for bytecode operand
// |operand_index| in the current bytecode for use when loading a .
- compiler::Node* BytecodeOperandConstantPoolIdx(
+ TNode<UintPtrT> BytecodeOperandConstantPoolIdx(
int operand_index,
LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
// Jump relative to the current bytecode by the |jump_offset|. If |backward|,
// then jump backward (subtract the offset), otherwise jump forward (add the
// offset). Helper function for Jump and JumpBackward.
- compiler::Node* Jump(compiler::Node* jump_offset, bool backward);
+ void Jump(TNode<IntPtrT> jump_offset, bool backward);
// Jump forward relative to the current bytecode by |jump_offset| if the
// |condition| is true. Helper function for JumpIfTaggedEqual and
// JumpIfTaggedNotEqual.
- void JumpConditional(compiler::Node* condition, compiler::Node* jump_offset);
+ void JumpConditional(TNode<BoolT> condition, TNode<IntPtrT> jump_offset);
// Save the bytecode offset to the interpreter frame.
void SaveBytecodeOffset();
@@ -394,29 +367,22 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// Updates and returns BytecodeOffset() advanced by delta bytecodes.
// Traces the exit of the current bytecode.
TNode<IntPtrT> Advance(int delta);
- TNode<IntPtrT> Advance(SloppyTNode<IntPtrT> delta, bool backward = false);
+ TNode<IntPtrT> Advance(TNode<IntPtrT> delta, bool backward = false);
// Load the bytecode at |bytecode_offset|.
- compiler::TNode<WordT> LoadBytecode(compiler::Node* bytecode_offset);
+ TNode<WordT> LoadBytecode(TNode<IntPtrT> bytecode_offset);
// Look ahead for Star and inline it in a branch. Returns a new target
// bytecode node for dispatch.
- compiler::TNode<WordT> StarDispatchLookahead(
- compiler::TNode<WordT> target_bytecode);
+ TNode<WordT> StarDispatchLookahead(TNode<WordT> target_bytecode);
// Build code for Star at the current BytecodeOffset() and Advance() to the
// next dispatch offset.
void InlineStar();
- // Dispatch to the bytecode handler with code offset |handler|.
- compiler::Node* DispatchToBytecodeHandler(compiler::Node* handler,
- compiler::Node* bytecode_offset,
- compiler::Node* target_bytecode);
-
// Dispatch to the bytecode handler with code entry point |handler_entry|.
- compiler::Node* DispatchToBytecodeHandlerEntry(
- compiler::Node* handler_entry, compiler::Node* bytecode_offset,
- compiler::Node* target_bytecode);
+ void DispatchToBytecodeHandlerEntry(TNode<RawPtrT> handler_entry,
+ TNode<IntPtrT> bytecode_offset);
int CurrentBytecodeSize() const;
@@ -424,11 +390,11 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
Bytecode bytecode_;
OperandScale operand_scale_;
- TVariable<RawPtrT> interpreted_frame_pointer_;
- CodeStubAssembler::Variable bytecode_array_;
- TVariable<IntPtrT> bytecode_offset_;
- CodeStubAssembler::Variable dispatch_table_;
- CodeStubAssembler::Variable accumulator_;
+ CodeStubAssembler::TVariable<RawPtrT> interpreted_frame_pointer_;
+ CodeStubAssembler::TVariable<BytecodeArray> bytecode_array_;
+ CodeStubAssembler::TVariable<IntPtrT> bytecode_offset_;
+ CodeStubAssembler::TVariable<ExternalReference> dispatch_table_;
+ CodeStubAssembler::TVariable<Object> accumulator_;
AccumulatorUse accumulator_use_;
bool made_call_;
bool reloaded_frame_ptr_;
diff --git a/deps/v8/src/interpreter/interpreter-generator.cc b/deps/v8/src/interpreter/interpreter-generator.cc
index e8569ecd55..5f686f86b8 100644
--- a/deps/v8/src/interpreter/interpreter-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-generator.cc
@@ -35,7 +35,6 @@ namespace {
using compiler::Node;
using Label = CodeStubAssembler::Label;
-using Variable = CodeStubAssembler::Variable;
#define IGNITION_HANDLER(Name, BaseAssembler) \
class Name##Assembler : public BaseAssembler { \
@@ -71,7 +70,7 @@ IGNITION_HANDLER(LdaZero, InterpreterAssembler) {
//
// Load an integer literal into the accumulator as a Smi.
IGNITION_HANDLER(LdaSmi, InterpreterAssembler) {
- Node* smi_int = BytecodeOperandImmSmi(0);
+ TNode<Smi> smi_int = BytecodeOperandImmSmi(0);
SetAccumulator(smi_int);
Dispatch();
}
@@ -80,7 +79,7 @@ IGNITION_HANDLER(LdaSmi, InterpreterAssembler) {
//
// Load constant literal at |idx| in the constant pool into the accumulator.
IGNITION_HANDLER(LdaConstant, InterpreterAssembler) {
- Node* constant = LoadConstantPoolEntryAtOperandIndex(0);
+ TNode<Object> constant = LoadConstantPoolEntryAtOperandIndex(0);
SetAccumulator(constant);
Dispatch();
}
@@ -161,7 +160,6 @@ class InterpreterLoadGlobalAssembler : public InterpreterAssembler {
void LdaGlobal(int slot_operand_index, int name_operand_index,
TypeofMode typeof_mode) {
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
- Node* feedback_slot = BytecodeOperandIdx(slot_operand_index);
AccessorAssembler accessor_asm(state());
ExitPoint exit_point(this, [=](Node* result) {
@@ -169,17 +167,25 @@ class InterpreterLoadGlobalAssembler : public InterpreterAssembler {
Dispatch();
});
+ LazyNode<Smi> lazy_smi_slot = [=] {
+ return SmiTag(Signed(BytecodeOperandIdx(slot_operand_index)));
+ };
+
+ LazyNode<UintPtrT> lazy_slot = [=] {
+ return BytecodeOperandIdx(slot_operand_index);
+ };
+
LazyNode<Context> lazy_context = [=] { return GetContext(); };
LazyNode<Name> lazy_name = [=] {
- Node* name = LoadConstantPoolEntryAtOperandIndex(name_operand_index);
- return CAST(name);
+ TNode<Name> name =
+ CAST(LoadConstantPoolEntryAtOperandIndex(name_operand_index));
+ return name;
};
- ParameterMode slot_mode = CodeStubAssembler::INTPTR_PARAMETERS;
- accessor_asm.LoadGlobalIC(maybe_feedback_vector, feedback_slot,
- lazy_context, lazy_name, typeof_mode, &exit_point,
- slot_mode);
+ accessor_asm.LoadGlobalIC(maybe_feedback_vector, lazy_smi_slot, lazy_slot,
+ lazy_context, lazy_name, typeof_mode,
+ &exit_point);
}
};
@@ -213,9 +219,9 @@ IGNITION_HANDLER(StaGlobal, InterpreterAssembler) {
TNode<Context> context = GetContext();
// Store the global via the StoreGlobalIC.
- Node* name = LoadConstantPoolEntryAtOperandIndex(0);
+ TNode<Name> name = CAST(LoadConstantPoolEntryAtOperandIndex(0));
TNode<Object> value = GetAccumulator();
- Node* raw_slot = BytecodeOperandIdx(1);
+ TNode<IntPtrT> raw_slot = Signed(BytecodeOperandIdx(1));
TNode<Smi> smi_slot = SmiTag(raw_slot);
TNode<HeapObject> maybe_vector = LoadFeedbackVector();
@@ -240,9 +246,9 @@ IGNITION_HANDLER(StaGlobal, InterpreterAssembler) {
// chain starting at |context| into the accumulator.
IGNITION_HANDLER(LdaContextSlot, InterpreterAssembler) {
TNode<Context> context = CAST(LoadRegisterAtOperandIndex(0));
- Node* slot_index = BytecodeOperandIdx(1);
+ TNode<IntPtrT> slot_index = Signed(BytecodeOperandIdx(1));
TNode<Uint32T> depth = BytecodeOperandUImm(2);
- Node* slot_context = GetContextAtDepth(context, depth);
+ TNode<Context> slot_context = GetContextAtDepth(context, depth);
TNode<Object> result = LoadContextElement(slot_context, slot_index);
SetAccumulator(result);
Dispatch();
@@ -254,9 +260,9 @@ IGNITION_HANDLER(LdaContextSlot, InterpreterAssembler) {
// chain starting at |context| into the accumulator.
IGNITION_HANDLER(LdaImmutableContextSlot, InterpreterAssembler) {
TNode<Context> context = CAST(LoadRegisterAtOperandIndex(0));
- Node* slot_index = BytecodeOperandIdx(1);
+ TNode<IntPtrT> slot_index = Signed(BytecodeOperandIdx(1));
TNode<Uint32T> depth = BytecodeOperandUImm(2);
- Node* slot_context = GetContextAtDepth(context, depth);
+ TNode<Context> slot_context = GetContextAtDepth(context, depth);
TNode<Object> result = LoadContextElement(slot_context, slot_index);
SetAccumulator(result);
Dispatch();
@@ -266,7 +272,7 @@ IGNITION_HANDLER(LdaImmutableContextSlot, InterpreterAssembler) {
//
// Load the object in |slot_index| of the current context into the accumulator.
IGNITION_HANDLER(LdaCurrentContextSlot, InterpreterAssembler) {
- Node* slot_index = BytecodeOperandIdx(0);
+ TNode<IntPtrT> slot_index = Signed(BytecodeOperandIdx(0));
TNode<Context> slot_context = GetContext();
TNode<Object> result = LoadContextElement(slot_context, slot_index);
SetAccumulator(result);
@@ -277,7 +283,7 @@ IGNITION_HANDLER(LdaCurrentContextSlot, InterpreterAssembler) {
//
// Load the object in |slot_index| of the current context into the accumulator.
IGNITION_HANDLER(LdaImmutableCurrentContextSlot, InterpreterAssembler) {
- Node* slot_index = BytecodeOperandIdx(0);
+ TNode<IntPtrT> slot_index = Signed(BytecodeOperandIdx(0));
TNode<Context> slot_context = GetContext();
TNode<Object> result = LoadContextElement(slot_context, slot_index);
SetAccumulator(result);
@@ -291,9 +297,9 @@ IGNITION_HANDLER(LdaImmutableCurrentContextSlot, InterpreterAssembler) {
IGNITION_HANDLER(StaContextSlot, InterpreterAssembler) {
TNode<Object> value = GetAccumulator();
TNode<Context> context = CAST(LoadRegisterAtOperandIndex(0));
- Node* slot_index = BytecodeOperandIdx(1);
+ TNode<IntPtrT> slot_index = Signed(BytecodeOperandIdx(1));
TNode<Uint32T> depth = BytecodeOperandUImm(2);
- Node* slot_context = GetContextAtDepth(context, depth);
+ TNode<Context> slot_context = GetContextAtDepth(context, depth);
StoreContextElement(slot_context, slot_index, value);
Dispatch();
}
@@ -304,7 +310,7 @@ IGNITION_HANDLER(StaContextSlot, InterpreterAssembler) {
// context.
IGNITION_HANDLER(StaCurrentContextSlot, InterpreterAssembler) {
TNode<Object> value = GetAccumulator();
- Node* slot_index = BytecodeOperandIdx(0);
+ TNode<IntPtrT> slot_index = Signed(BytecodeOperandIdx(0));
TNode<Context> slot_context = GetContext();
StoreContextElement(slot_context, slot_index, value);
Dispatch();
@@ -315,7 +321,7 @@ IGNITION_HANDLER(StaCurrentContextSlot, InterpreterAssembler) {
// Lookup the object with the name in constant pool entry |name_index|
// dynamically.
IGNITION_HANDLER(LdaLookupSlot, InterpreterAssembler) {
- Node* name = LoadConstantPoolEntryAtOperandIndex(0);
+ TNode<Name> name = CAST(LoadConstantPoolEntryAtOperandIndex(0));
TNode<Context> context = GetContext();
TNode<Object> result = CallRuntime(Runtime::kLoadLookupSlot, context, name);
SetAccumulator(result);
@@ -327,7 +333,7 @@ IGNITION_HANDLER(LdaLookupSlot, InterpreterAssembler) {
// Lookup the object with the name in constant pool entry |name_index|
// dynamically without causing a NoReferenceError.
IGNITION_HANDLER(LdaLookupSlotInsideTypeof, InterpreterAssembler) {
- Node* name = LoadConstantPoolEntryAtOperandIndex(0);
+ TNode<Name> name = CAST(LoadConstantPoolEntryAtOperandIndex(0));
TNode<Context> context = GetContext();
TNode<Object> result =
CallRuntime(Runtime::kLoadLookupSlotInsideTypeof, context, name);
@@ -344,7 +350,7 @@ class InterpreterLookupContextSlotAssembler : public InterpreterAssembler {
void LookupContextSlot(Runtime::FunctionId function_id) {
TNode<Context> context = GetContext();
- Node* slot_index = BytecodeOperandIdx(1);
+ TNode<IntPtrT> slot_index = Signed(BytecodeOperandIdx(1));
TNode<Uint32T> depth = BytecodeOperandUImm(2);
Label slowpath(this, Label::kDeferred);
@@ -354,7 +360,7 @@ class InterpreterLookupContextSlotAssembler : public InterpreterAssembler {
// Fast path does a normal load context.
{
- Node* slot_context = GetContextAtDepth(context, depth);
+ TNode<Context> slot_context = GetContextAtDepth(context, depth);
TNode<Object> result = LoadContextElement(slot_context, slot_index);
SetAccumulator(result);
Dispatch();
@@ -363,7 +369,7 @@ class InterpreterLookupContextSlotAssembler : public InterpreterAssembler {
// Slow path when we have to call out to the runtime.
BIND(&slowpath);
{
- Node* name = LoadConstantPoolEntryAtOperandIndex(0);
+ TNode<Name> name = CAST(LoadConstantPoolEntryAtOperandIndex(0));
TNode<Object> result = CallRuntime(function_id, context, name);
SetAccumulator(result);
Dispatch();
@@ -419,7 +425,7 @@ class InterpreterLookupGlobalAssembler : public InterpreterLoadGlobalAssembler {
// Slow path when we have to call out to the runtime
BIND(&slowpath);
{
- Node* name = LoadConstantPoolEntryAtOperandIndex(0);
+ TNode<Name> name = CAST(LoadConstantPoolEntryAtOperandIndex(0));
TNode<Object> result = CallRuntime(function_id, context, name);
SetAccumulator(result);
Dispatch();
@@ -450,10 +456,10 @@ IGNITION_HANDLER(LdaLookupGlobalSlotInsideTypeof,
// pool entry |name_index|.
IGNITION_HANDLER(StaLookupSlot, InterpreterAssembler) {
TNode<Object> value = GetAccumulator();
- Node* name = LoadConstantPoolEntryAtOperandIndex(0);
- Node* bytecode_flags = BytecodeOperandFlag(1);
+ TNode<Name> name = CAST(LoadConstantPoolEntryAtOperandIndex(0));
+ TNode<Uint32T> bytecode_flags = BytecodeOperandFlag(1);
TNode<Context> context = GetContext();
- Variable var_result(this, MachineRepresentation::kTagged);
+ TVARIABLE(Object, var_result);
Label sloppy(this), strict(this), end(this);
DCHECK_EQ(0, LanguageMode::kSloppy);
@@ -467,8 +473,8 @@ IGNITION_HANDLER(StaLookupSlot, InterpreterAssembler) {
{
CSA_ASSERT(this, IsClearWord32<StoreLookupSlotFlags::LookupHoistingModeBit>(
bytecode_flags));
- var_result.Bind(
- CallRuntime(Runtime::kStoreLookupSlot_Strict, context, name, value));
+ var_result =
+ CallRuntime(Runtime::kStoreLookupSlot_Strict, context, name, value);
Goto(&end);
}
@@ -481,15 +487,15 @@ IGNITION_HANDLER(StaLookupSlot, InterpreterAssembler) {
BIND(&hoisting);
{
- var_result.Bind(CallRuntime(Runtime::kStoreLookupSlot_SloppyHoisting,
- context, name, value));
+ var_result = CallRuntime(Runtime::kStoreLookupSlot_SloppyHoisting,
+ context, name, value);
Goto(&end);
}
BIND(&ordinary);
{
- var_result.Bind(
- CallRuntime(Runtime::kStoreLookupSlot_Sloppy, context, name, value));
+ var_result =
+ CallRuntime(Runtime::kStoreLookupSlot_Sloppy, context, name, value);
Goto(&end);
}
}
@@ -507,24 +513,24 @@ IGNITION_HANDLER(StaLookupSlot, InterpreterAssembler) {
// constant pool entry <name_index>.
IGNITION_HANDLER(LdaNamedProperty, InterpreterAssembler) {
TNode<HeapObject> feedback_vector = LoadFeedbackVector();
- Node* feedback_slot = BytecodeOperandIdx(2);
- TNode<Smi> smi_slot = SmiTag(feedback_slot);
+ TNode<UintPtrT> feedback_slot = BytecodeOperandIdx(2);
// Load receiver.
TNode<Object> recv = LoadRegisterAtOperandIndex(0);
// Load the name and context lazily.
- LazyNode<Name> name = [=] {
+ LazyNode<Smi> lazy_smi_slot = [=] { return SmiTag(Signed(feedback_slot)); };
+ LazyNode<Name> lazy_name = [=] {
return CAST(LoadConstantPoolEntryAtOperandIndex(1));
};
- LazyNode<Context> context = [=] { return GetContext(); };
+ LazyNode<Context> lazy_context = [=] { return GetContext(); };
Label done(this);
- Variable var_result(this, MachineRepresentation::kTagged);
+ TVARIABLE(Object, var_result);
ExitPoint exit_point(this, &done, &var_result);
- AccessorAssembler::LazyLoadICParameters params(context, recv, name, smi_slot,
- feedback_vector);
+ AccessorAssembler::LazyLoadICParameters params(
+ lazy_context, recv, lazy_name, lazy_smi_slot, feedback_vector);
AccessorAssembler accessor_asm(state());
accessor_asm.LoadIC_BytecodeHandler(&params, &exit_point);
@@ -540,7 +546,7 @@ IGNITION_HANDLER(LdaNamedProperty, InterpreterAssembler) {
// Calls the GetProperty builtin for <object> and the key in the accumulator.
IGNITION_HANDLER(LdaNamedPropertyNoFeedback, InterpreterAssembler) {
TNode<Object> object = LoadRegisterAtOperandIndex(0);
- Node* name = LoadConstantPoolEntryAtOperandIndex(1);
+ TNode<Name> name = CAST(LoadConstantPoolEntryAtOperandIndex(1));
TNode<Context> context = GetContext();
TNode<Object> result =
CallBuiltin(Builtins::kGetProperty, context, object, name);
@@ -555,14 +561,14 @@ IGNITION_HANDLER(LdaNamedPropertyNoFeedback, InterpreterAssembler) {
IGNITION_HANDLER(LdaKeyedProperty, InterpreterAssembler) {
TNode<Object> object = LoadRegisterAtOperandIndex(0);
TNode<Object> name = GetAccumulator();
- Node* raw_slot = BytecodeOperandIdx(1);
+ TNode<IntPtrT> raw_slot = Signed(BytecodeOperandIdx(1));
TNode<Smi> smi_slot = SmiTag(raw_slot);
TNode<HeapObject> feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
- VARIABLE(var_result, MachineRepresentation::kTagged);
- var_result.Bind(CallBuiltin(Builtins::kKeyedLoadIC, context, object, name,
- smi_slot, feedback_vector));
+ TVARIABLE(Object, var_result);
+ var_result = CallBuiltin(Builtins::kKeyedLoadIC, context, object, name,
+ smi_slot, feedback_vector);
SetAccumulator(var_result.value());
Dispatch();
}
@@ -577,16 +583,16 @@ class InterpreterStoreNamedPropertyAssembler : public InterpreterAssembler {
void StaNamedProperty(Callable ic, NamedPropertyType property_type) {
TNode<Code> code_target = HeapConstant(ic.code());
TNode<Object> object = LoadRegisterAtOperandIndex(0);
- Node* name = LoadConstantPoolEntryAtOperandIndex(1);
+ TNode<Name> name = CAST(LoadConstantPoolEntryAtOperandIndex(1));
TNode<Object> value = GetAccumulator();
- Node* raw_slot = BytecodeOperandIdx(2);
+ TNode<IntPtrT> raw_slot = Signed(BytecodeOperandIdx(2));
TNode<Smi> smi_slot = SmiTag(raw_slot);
TNode<HeapObject> maybe_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
- VARIABLE(var_result, MachineRepresentation::kTagged);
- var_result.Bind(CallStub(ic.descriptor(), code_target, context, object,
- name, value, smi_slot, maybe_vector));
+ TVARIABLE(Object, var_result);
+ var_result = CallStub(ic.descriptor(), code_target, context, object, name,
+ value, smi_slot, maybe_vector);
// To avoid special logic in the deoptimizer to re-materialize the value in
// the accumulator, we overwrite the accumulator after the IC call. It
// doesn't really matter what we write to the accumulator here, since we
@@ -624,7 +630,7 @@ IGNITION_HANDLER(StaNamedOwnProperty, InterpreterStoreNamedPropertyAssembler) {
IGNITION_HANDLER(StaNamedPropertyNoFeedback,
InterpreterStoreNamedPropertyAssembler) {
TNode<Object> object = LoadRegisterAtOperandIndex(0);
- Node* name = LoadConstantPoolEntryAtOperandIndex(1);
+ TNode<Name> name = CAST(LoadConstantPoolEntryAtOperandIndex(1));
TNode<Object> value = GetAccumulator();
TNode<Context> context = GetContext();
@@ -642,14 +648,14 @@ IGNITION_HANDLER(StaKeyedProperty, InterpreterAssembler) {
TNode<Object> object = LoadRegisterAtOperandIndex(0);
TNode<Object> name = LoadRegisterAtOperandIndex(1);
TNode<Object> value = GetAccumulator();
- Node* raw_slot = BytecodeOperandIdx(2);
+ TNode<IntPtrT> raw_slot = Signed(BytecodeOperandIdx(2));
TNode<Smi> smi_slot = SmiTag(raw_slot);
TNode<HeapObject> maybe_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
- VARIABLE(var_result, MachineRepresentation::kTagged);
- var_result.Bind(CallBuiltin(Builtins::kKeyedStoreIC, context, object, name,
- value, smi_slot, maybe_vector));
+ TVARIABLE(Object, var_result);
+ var_result = CallBuiltin(Builtins::kKeyedStoreIC, context, object, name,
+ value, smi_slot, maybe_vector);
// To avoid special logic in the deoptimizer to re-materialize the value in
// the accumulator, we overwrite the accumulator after the IC call. It
// doesn't really matter what we write to the accumulator here, since we
@@ -667,14 +673,14 @@ IGNITION_HANDLER(StaInArrayLiteral, InterpreterAssembler) {
TNode<Object> array = LoadRegisterAtOperandIndex(0);
TNode<Object> index = LoadRegisterAtOperandIndex(1);
TNode<Object> value = GetAccumulator();
- Node* raw_slot = BytecodeOperandIdx(2);
+ TNode<IntPtrT> raw_slot = Signed(BytecodeOperandIdx(2));
TNode<Smi> smi_slot = SmiTag(raw_slot);
TNode<HeapObject> feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
- VARIABLE(var_result, MachineRepresentation::kTagged);
- var_result.Bind(CallBuiltin(Builtins::kStoreInArrayLiteralIC, context, array,
- index, value, smi_slot, feedback_vector));
+ TVARIABLE(Object, var_result);
+ var_result = CallBuiltin(Builtins::kStoreInArrayLiteralIC, context, array,
+ index, value, smi_slot, feedback_vector);
// To avoid special logic in the deoptimizer to re-materialize the value in
// the accumulator, we overwrite the accumulator after the IC call. It
// doesn't really matter what we write to the accumulator here, since we
@@ -696,8 +702,9 @@ IGNITION_HANDLER(StaDataPropertyInLiteral, InterpreterAssembler) {
TNode<Object> object = LoadRegisterAtOperandIndex(0);
TNode<Object> name = LoadRegisterAtOperandIndex(1);
TNode<Object> value = GetAccumulator();
- TNode<Smi> flags = SmiFromInt32(BytecodeOperandFlag(2));
- TNode<Smi> vector_index = SmiTag(BytecodeOperandIdx(3));
+ TNode<Smi> flags =
+ SmiFromInt32(UncheckedCast<Int32T>(BytecodeOperandFlag(2)));
+ TNode<Smi> vector_index = BytecodeOperandIdxSmi(3);
TNode<HeapObject> feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
@@ -708,7 +715,7 @@ IGNITION_HANDLER(StaDataPropertyInLiteral, InterpreterAssembler) {
}
IGNITION_HANDLER(CollectTypeProfile, InterpreterAssembler) {
- Node* position = BytecodeOperandImmSmi(0);
+ TNode<Smi> position = BytecodeOperandImmSmi(0);
TNode<Object> value = GetAccumulator();
TNode<HeapObject> feedback_vector = LoadFeedbackVector();
@@ -725,10 +732,10 @@ IGNITION_HANDLER(CollectTypeProfile, InterpreterAssembler) {
// identified by <cell_index>. <depth> is the depth of the current context
// relative to the module context.
IGNITION_HANDLER(LdaModuleVariable, InterpreterAssembler) {
- Node* cell_index = BytecodeOperandImmIntPtr(0);
+ TNode<IntPtrT> cell_index = BytecodeOperandImmIntPtr(0);
TNode<Uint32T> depth = BytecodeOperandUImm(1);
- Node* module_context = GetContextAtDepth(GetContext(), depth);
+ TNode<Context> module_context = GetContextAtDepth(GetContext(), depth);
TNode<SourceTextModule> module =
CAST(LoadContextElement(module_context, Context::EXTENSION_INDEX));
@@ -741,7 +748,7 @@ IGNITION_HANDLER(LdaModuleVariable, InterpreterAssembler) {
TNode<FixedArray> regular_exports = LoadObjectField<FixedArray>(
module, SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
- TNode<WordT> export_index = IntPtrSub(cell_index, IntPtrConstant(1));
+ TNode<IntPtrT> export_index = IntPtrSub(cell_index, IntPtrConstant(1));
TNode<Cell> cell =
CAST(LoadFixedArrayElement(regular_exports, export_index));
SetAccumulator(LoadObjectField(cell, Cell::kValueOffset));
@@ -753,7 +760,7 @@ IGNITION_HANDLER(LdaModuleVariable, InterpreterAssembler) {
TNode<FixedArray> regular_imports = LoadObjectField<FixedArray>(
module, SourceTextModule::kRegularImportsOffset);
// The actual array index is (-cell_index - 1).
- TNode<WordT> import_index = IntPtrSub(IntPtrConstant(-1), cell_index);
+ TNode<IntPtrT> import_index = IntPtrSub(IntPtrConstant(-1), cell_index);
TNode<Cell> cell =
CAST(LoadFixedArrayElement(regular_imports, import_index));
SetAccumulator(LoadObjectField(cell, Cell::kValueOffset));
@@ -770,10 +777,10 @@ IGNITION_HANDLER(LdaModuleVariable, InterpreterAssembler) {
// <depth> is the depth of the current context relative to the module context.
IGNITION_HANDLER(StaModuleVariable, InterpreterAssembler) {
TNode<Object> value = GetAccumulator();
- Node* cell_index = BytecodeOperandImmIntPtr(0);
+ TNode<IntPtrT> cell_index = BytecodeOperandImmIntPtr(0);
TNode<Uint32T> depth = BytecodeOperandUImm(1);
- Node* module_context = GetContextAtDepth(GetContext(), depth);
+ TNode<Context> module_context = GetContextAtDepth(GetContext(), depth);
TNode<SourceTextModule> module =
CAST(LoadContextElement(module_context, Context::EXTENSION_INDEX));
@@ -786,7 +793,7 @@ IGNITION_HANDLER(StaModuleVariable, InterpreterAssembler) {
TNode<FixedArray> regular_exports = LoadObjectField<FixedArray>(
module, SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
- TNode<WordT> export_index = IntPtrSub(cell_index, IntPtrConstant(1));
+ TNode<IntPtrT> export_index = IntPtrSub(cell_index, IntPtrConstant(1));
TNode<Object> cell = LoadFixedArrayElement(regular_exports, export_index);
StoreObjectField(cell, Cell::kValueOffset, value);
Goto(&end);
@@ -830,34 +837,35 @@ class InterpreterBinaryOpAssembler : public InterpreterAssembler {
OperandScale operand_scale)
: InterpreterAssembler(state, bytecode, operand_scale) {}
- using BinaryOpGenerator =
- Node* (BinaryOpAssembler::*)(Node* context, Node* left, Node* right,
- Node* slot, Node* vector, bool lhs_is_smi);
+ using BinaryOpGenerator = TNode<Object> (BinaryOpAssembler::*)(
+ TNode<Context> context, TNode<Object> left, TNode<Object> right,
+ TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
+ bool rhs_known_smi);
void BinaryOpWithFeedback(BinaryOpGenerator generator) {
TNode<Object> lhs = LoadRegisterAtOperandIndex(0);
TNode<Object> rhs = GetAccumulator();
TNode<Context> context = GetContext();
- Node* slot_index = BytecodeOperandIdx(1);
+ TNode<UintPtrT> slot_index = BytecodeOperandIdx(1);
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
BinaryOpAssembler binop_asm(state());
- Node* result = (binop_asm.*generator)(context, lhs, rhs, slot_index,
- maybe_feedback_vector, false);
+ TNode<Object> result = (binop_asm.*generator)(context, lhs, rhs, slot_index,
+ maybe_feedback_vector, false);
SetAccumulator(result);
Dispatch();
}
void BinaryOpSmiWithFeedback(BinaryOpGenerator generator) {
TNode<Object> lhs = GetAccumulator();
- Node* rhs = BytecodeOperandImmSmi(0);
+ TNode<Smi> rhs = BytecodeOperandImmSmi(0);
TNode<Context> context = GetContext();
- Node* slot_index = BytecodeOperandIdx(1);
+ TNode<UintPtrT> slot_index = BytecodeOperandIdx(1);
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
BinaryOpAssembler binop_asm(state());
- Node* result = (binop_asm.*generator)(context, lhs, rhs, slot_index,
- maybe_feedback_vector, true);
+ TNode<Object> result = (binop_asm.*generator)(context, lhs, rhs, slot_index,
+ maybe_feedback_vector, true);
SetAccumulator(result);
Dispatch();
}
@@ -959,15 +967,15 @@ class InterpreterBitwiseBinaryOpAssembler : public InterpreterAssembler {
TNode<Object> left = LoadRegisterAtOperandIndex(0);
TNode<Object> right = GetAccumulator();
TNode<Context> context = GetContext();
- Node* slot_index = BytecodeOperandIdx(1);
+ TNode<UintPtrT> slot_index = BytecodeOperandIdx(1);
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
TVARIABLE(Smi, var_left_feedback);
TVARIABLE(Smi, var_right_feedback);
- VARIABLE(var_left_word32, MachineRepresentation::kWord32);
- VARIABLE(var_right_word32, MachineRepresentation::kWord32);
- VARIABLE(var_left_bigint, MachineRepresentation::kTagged, left);
- VARIABLE(var_right_bigint, MachineRepresentation::kTagged);
+ TVARIABLE(Word32T, var_left_word32);
+ TVARIABLE(Word32T, var_right_word32);
+ TVARIABLE(Object, var_left_bigint, left);
+ TVARIABLE(Object, var_right_bigint);
Label if_left_number(this), do_number_op(this);
Label if_left_bigint(this), do_bigint_op(this);
@@ -1007,14 +1015,16 @@ class InterpreterBitwiseBinaryOpAssembler : public InterpreterAssembler {
void BitwiseBinaryOpWithSmi(Operation bitwise_op) {
TNode<Object> left = GetAccumulator();
- Node* right = BytecodeOperandImmSmi(0);
- Node* slot_index = BytecodeOperandIdx(1);
+ TNode<Smi> right = BytecodeOperandImmSmi(0);
+ TNode<UintPtrT> slot_index = BytecodeOperandIdx(1);
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
TVARIABLE(Smi, var_left_feedback);
- VARIABLE(var_left_word32, MachineRepresentation::kWord32);
- VARIABLE(var_left_bigint, MachineRepresentation::kTagged);
+ TVARIABLE(Word32T, var_left_word32);
+ // TODO(v8:6949): var_left_bigint should be BigInt, but before that we need
+ // to clean up TaggedToWord32OrBigIntWithFeedback and related methods.
+ TVARIABLE(Object, var_left_bigint);
Label do_smi_op(this), if_bigint_mix(this);
TaggedToWord32OrBigIntWithFeedback(context, left, &do_smi_op,
@@ -1115,13 +1125,15 @@ IGNITION_HANDLER(BitwiseAndSmi, InterpreterBitwiseBinaryOpAssembler) {
// Perform bitwise-not on the accumulator.
IGNITION_HANDLER(BitwiseNot, InterpreterAssembler) {
TNode<Object> operand = GetAccumulator();
- Node* slot_index = BytecodeOperandIdx(0);
+ TNode<UintPtrT> slot_index = BytecodeOperandIdx(0);
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
- VARIABLE(var_word32, MachineRepresentation::kWord32);
+ TVARIABLE(Word32T, var_word32);
TVARIABLE(Smi, var_feedback);
- VARIABLE(var_bigint, MachineRepresentation::kTagged);
+ // TODO(v8:6949): var_bigint should be BigInt, but before that we need to
+ // clean up TaggedToWord32OrBigIntWithFeedback and related methods.
+ TVARIABLE(Object, var_bigint);
Label if_number(this), if_bigint(this);
TaggedToWord32OrBigIntWithFeedback(context, operand, &if_number, &var_word32,
&if_bigint, &var_bigint, &var_feedback);
@@ -1184,20 +1196,20 @@ class UnaryNumericOpAssembler : public InterpreterAssembler {
virtual ~UnaryNumericOpAssembler() = default;
// Must return a tagged value.
- virtual TNode<Number> SmiOp(TNode<Smi> smi_value, Variable* var_feedback,
- Label* do_float_op, Variable* var_float) = 0;
+ virtual TNode<Number> SmiOp(TNode<Smi> smi_value,
+ TVariable<Smi>* var_feedback, Label* do_float_op,
+ TVariable<Float64T>* var_float) = 0;
// Must return a Float64 value.
- virtual Node* FloatOp(Node* float_value) = 0;
+ virtual TNode<Float64T> FloatOp(TNode<Float64T> float_value) = 0;
// Must return a tagged value.
- virtual Node* BigIntOp(Node* bigint_value) = 0;
+ virtual TNode<HeapObject> BigIntOp(TNode<HeapObject> bigint_value) = 0;
void UnaryOpWithFeedback() {
- VARIABLE(var_value, MachineRepresentation::kTagged, GetAccumulator());
- VARIABLE(var_result, MachineRepresentation::kTagged);
- VARIABLE(var_float_value, MachineRepresentation::kFloat64);
+ TVARIABLE(Object, var_value, GetAccumulator());
+ TVARIABLE(Object, var_result);
+ TVARIABLE(Float64T, var_float_value);
TVARIABLE(Smi, var_feedback, SmiConstant(BinaryOperationFeedback::kNone));
- Variable* loop_vars[] = {&var_value, &var_feedback};
- Label start(this, arraysize(loop_vars), loop_vars), end(this);
+ Label start(this, {&var_value, &var_feedback}), end(this);
Label do_float_op(this, &var_float_value);
Goto(&start);
// We might have to try again after ToNumeric conversion.
@@ -1206,9 +1218,11 @@ class UnaryNumericOpAssembler : public InterpreterAssembler {
Label if_smi(this), if_heapnumber(this), if_oddball(this);
Label if_bigint(this, Label::kDeferred);
Label if_other(this, Label::kDeferred);
- Node* value = var_value.value();
+ TNode<Object> value = var_value.value();
GotoIf(TaggedIsSmi(value), &if_smi);
- TNode<Map> map = LoadMap(value);
+
+ TNode<HeapObject> value_heap_object = CAST(value);
+ TNode<Map> map = LoadMap(value_heap_object);
GotoIf(IsHeapNumberMap(map), &if_heapnumber);
TNode<Uint16T> instance_type = LoadMapInstanceType(map);
GotoIf(IsBigIntInstanceType(instance_type), &if_bigint);
@@ -1217,20 +1231,20 @@ class UnaryNumericOpAssembler : public InterpreterAssembler {
BIND(&if_smi);
{
- var_result.Bind(
- SmiOp(CAST(value), &var_feedback, &do_float_op, &var_float_value));
+ var_result =
+ SmiOp(CAST(value), &var_feedback, &do_float_op, &var_float_value);
Goto(&end);
}
BIND(&if_heapnumber);
{
- var_float_value.Bind(LoadHeapNumberValue(value));
+ var_float_value = LoadHeapNumberValue(value_heap_object);
Goto(&do_float_op);
}
BIND(&if_bigint);
{
- var_result.Bind(BigIntOp(value));
+ var_result = BigIntOp(value_heap_object);
CombineFeedback(&var_feedback, BinaryOperationFeedback::kBigInt);
Goto(&end);
}
@@ -1244,7 +1258,8 @@ class UnaryNumericOpAssembler : public InterpreterAssembler {
SmiConstant(BinaryOperationFeedback::kNone)));
OverwriteFeedback(&var_feedback,
BinaryOperationFeedback::kNumberOrOddball);
- var_value.Bind(LoadObjectField(value, Oddball::kToNumberOffset));
+ var_value =
+ LoadObjectField(value_heap_object, Oddball::kToNumberOffset);
Goto(&start);
}
@@ -1256,8 +1271,8 @@ class UnaryNumericOpAssembler : public InterpreterAssembler {
CSA_ASSERT(this, SmiEqual(var_feedback.value(),
SmiConstant(BinaryOperationFeedback::kNone)));
OverwriteFeedback(&var_feedback, BinaryOperationFeedback::kAny);
- var_value.Bind(
- CallBuiltin(Builtins::kNonNumberToNumeric, GetContext(), value));
+ var_value = CallBuiltin(Builtins::kNonNumberToNumeric, GetContext(),
+ value_heap_object);
Goto(&start);
}
}
@@ -1265,13 +1280,13 @@ class UnaryNumericOpAssembler : public InterpreterAssembler {
BIND(&do_float_op);
{
CombineFeedback(&var_feedback, BinaryOperationFeedback::kNumber);
- var_result.Bind(
- AllocateHeapNumberWithValue(FloatOp(var_float_value.value())));
+ var_result =
+ AllocateHeapNumberWithValue(FloatOp(var_float_value.value()));
Goto(&end);
}
BIND(&end);
- Node* slot_index = BytecodeOperandIdx(0);
+ TNode<UintPtrT> slot_index = BytecodeOperandIdx(0);
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
UpdateFeedback(var_feedback.value(), maybe_feedback_vector, slot_index);
SetAccumulator(var_result.value());
@@ -1285,8 +1300,9 @@ class NegateAssemblerImpl : public UnaryNumericOpAssembler {
OperandScale operand_scale)
: UnaryNumericOpAssembler(state, bytecode, operand_scale) {}
- TNode<Number> SmiOp(TNode<Smi> smi_value, Variable* var_feedback,
- Label* do_float_op, Variable* var_float) override {
+ TNode<Number> SmiOp(TNode<Smi> smi_value, TVariable<Smi>* var_feedback,
+ Label* do_float_op,
+ TVariable<Float64T>* var_float) override {
TVARIABLE(Number, var_result);
Label if_zero(this), if_min_smi(this), end(this);
// Return -0 if operand is 0.
@@ -1306,18 +1322,20 @@ class NegateAssemblerImpl : public UnaryNumericOpAssembler {
Goto(&end);
BIND(&if_min_smi);
- var_float->Bind(SmiToFloat64(smi_value));
+ *var_float = SmiToFloat64(smi_value);
Goto(do_float_op);
BIND(&end);
return var_result.value();
}
- Node* FloatOp(Node* float_value) override { return Float64Neg(float_value); }
+ TNode<Float64T> FloatOp(TNode<Float64T> float_value) override {
+ return Float64Neg(float_value);
+ }
- Node* BigIntOp(Node* bigint_value) override {
- return CallRuntime(Runtime::kBigIntUnaryOp, GetContext(), bigint_value,
- SmiConstant(Operation::kNegate));
+ TNode<HeapObject> BigIntOp(TNode<HeapObject> bigint_value) override {
+ return CAST(CallRuntime(Runtime::kBigIntUnaryOp, GetContext(), bigint_value,
+ SmiConstant(Operation::kNegate)));
}
};
@@ -1381,8 +1399,9 @@ class IncDecAssembler : public UnaryNumericOpAssembler {
return op_;
}
- TNode<Number> SmiOp(TNode<Smi> value, Variable* var_feedback,
- Label* do_float_op, Variable* var_float) override {
+ TNode<Number> SmiOp(TNode<Smi> value, TVariable<Smi>* var_feedback,
+ Label* do_float_op,
+ TVariable<Float64T>* var_float) override {
TNode<Smi> one = SmiConstant(1);
Label if_overflow(this), if_notoverflow(this);
TNode<Smi> result = op() == Operation::kIncrement
@@ -1392,7 +1411,7 @@ class IncDecAssembler : public UnaryNumericOpAssembler {
BIND(&if_overflow);
{
- var_float->Bind(SmiToFloat64(value));
+ *var_float = SmiToFloat64(value);
Goto(do_float_op);
}
@@ -1401,15 +1420,15 @@ class IncDecAssembler : public UnaryNumericOpAssembler {
return result;
}
- Node* FloatOp(Node* float_value) override {
+ TNode<Float64T> FloatOp(TNode<Float64T> float_value) override {
return op() == Operation::kIncrement
? Float64Add(float_value, Float64Constant(1.0))
: Float64Sub(float_value, Float64Constant(1.0));
}
- Node* BigIntOp(Node* bigint_value) override {
- return CallRuntime(Runtime::kBigIntUnaryOp, GetContext(), bigint_value,
- SmiConstant(op()));
+ TNode<HeapObject> BigIntOp(TNode<HeapObject> bigint_value) override {
+ return CAST(CallRuntime(Runtime::kBigIntUnaryOp, GetContext(), bigint_value,
+ SmiConstant(op())));
}
void IncWithFeedback() {
@@ -1442,17 +1461,17 @@ IGNITION_HANDLER(Dec, IncDecAssembler) { DecWithFeedback(); }
// accumulator to a boolean value if required.
IGNITION_HANDLER(ToBooleanLogicalNot, InterpreterAssembler) {
TNode<Object> value = GetAccumulator();
- Variable result(this, MachineRepresentation::kTagged);
+ TVARIABLE(Oddball, result);
Label if_true(this), if_false(this), end(this);
BranchIfToBooleanIsTrue(value, &if_true, &if_false);
BIND(&if_true);
{
- result.Bind(FalseConstant());
+ result = FalseConstant();
Goto(&end);
}
BIND(&if_false);
{
- result.Bind(TrueConstant());
+ result = TrueConstant();
Goto(&end);
}
BIND(&end);
@@ -1466,20 +1485,20 @@ IGNITION_HANDLER(ToBooleanLogicalNot, InterpreterAssembler) {
// value.
IGNITION_HANDLER(LogicalNot, InterpreterAssembler) {
TNode<Object> value = GetAccumulator();
- Variable result(this, MachineRepresentation::kTagged);
+ TVARIABLE(Oddball, result);
Label if_true(this), if_false(this), end(this);
TNode<Oddball> true_value = TrueConstant();
TNode<Oddball> false_value = FalseConstant();
Branch(TaggedEqual(value, true_value), &if_true, &if_false);
BIND(&if_true);
{
- result.Bind(false_value);
+ result = false_value;
Goto(&end);
}
BIND(&if_false);
{
CSA_ASSERT(this, TaggedEqual(value, false_value));
- result.Bind(true_value);
+ result = true_value;
Goto(&end);
}
BIND(&end);
@@ -1493,7 +1512,7 @@ IGNITION_HANDLER(LogicalNot, InterpreterAssembler) {
// object in the accumulator.
IGNITION_HANDLER(TypeOf, InterpreterAssembler) {
TNode<Object> value = GetAccumulator();
- Node* result = Typeof(value);
+ TNode<String> result = Typeof(value);
SetAccumulator(result);
Dispatch();
}
@@ -1550,7 +1569,7 @@ class InterpreterJSCallAssembler : public InterpreterAssembler {
void JSCall(ConvertReceiverMode receiver_mode) {
TNode<Object> function = LoadRegisterAtOperandIndex(0);
RegListNodePair args = GetRegisterListAtOperandIndex(1);
- Node* slot_id = BytecodeOperandIdx(3);
+ TNode<UintPtrT> slot_id = BytecodeOperandIdx(3);
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
@@ -1583,7 +1602,7 @@ class InterpreterJSCallAssembler : public InterpreterAssembler {
kFirstArgumentOperandIndex + kRecieverAndArgOperandCount;
TNode<Object> function = LoadRegisterAtOperandIndex(0);
- Node* slot_id = BytecodeOperandIdx(kSlotOperandIndex);
+ TNode<UintPtrT> slot_id = BytecodeOperandIdx(kSlotOperandIndex);
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
@@ -1598,26 +1617,20 @@ class InterpreterJSCallAssembler : public InterpreterAssembler {
case 1:
CallJSAndDispatch(
function, context, Int32Constant(arg_count), receiver_mode,
- static_cast<Node*>(
- LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex)));
+ LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex));
break;
case 2:
CallJSAndDispatch(
function, context, Int32Constant(arg_count), receiver_mode,
- static_cast<Node*>(
- LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex)),
- static_cast<Node*>(
- LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 1)));
+ LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex),
+ LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 1));
break;
case 3:
CallJSAndDispatch(
function, context, Int32Constant(arg_count), receiver_mode,
- static_cast<Node*>(
- LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex)),
- static_cast<Node*>(
- LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 1)),
- static_cast<Node*>(
- LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 2)));
+ LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex),
+ LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 1),
+ LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 2));
break;
default:
UNREACHABLE();
@@ -1676,7 +1689,7 @@ IGNITION_HANDLER(CallNoFeedback, InterpreterJSCallAssembler) {
// register |first_arg| and |arg_count| arguments in subsequent
// registers.
IGNITION_HANDLER(CallRuntime, InterpreterAssembler) {
- Node* function_id = BytecodeOperandRuntimeId(0);
+ TNode<Uint32T> function_id = BytecodeOperandRuntimeId(0);
RegListNodePair args = GetRegisterListAtOperandIndex(1);
TNode<Context> context = GetContext();
Node* result = CallRuntimeN(function_id, context, args);
@@ -1690,10 +1703,11 @@ IGNITION_HANDLER(CallRuntime, InterpreterAssembler) {
// |function_id| with the first argument in |first_arg| and |arg_count|
// arguments in subsequent registers.
IGNITION_HANDLER(InvokeIntrinsic, InterpreterAssembler) {
- Node* function_id = BytecodeOperandIntrinsicId(0);
+ TNode<Uint32T> function_id = BytecodeOperandIntrinsicId(0);
RegListNodePair args = GetRegisterListAtOperandIndex(1);
TNode<Context> context = GetContext();
- Node* result = GenerateInvokeIntrinsic(this, function_id, context, args);
+ TNode<Object> result =
+ GenerateInvokeIntrinsic(this, function_id, context, args);
SetAccumulator(result);
Dispatch();
}
@@ -1706,13 +1720,13 @@ IGNITION_HANDLER(InvokeIntrinsic, InterpreterAssembler) {
// <first_return + 1>
IGNITION_HANDLER(CallRuntimeForPair, InterpreterAssembler) {
// Call the runtime function.
- Node* function_id = BytecodeOperandRuntimeId(0);
+ TNode<Uint32T> function_id = BytecodeOperandRuntimeId(0);
RegListNodePair args = GetRegisterListAtOperandIndex(1);
TNode<Context> context = GetContext();
Node* result_pair = CallRuntimeN(function_id, context, args, 2);
// Store the results in <first_return> and <first_return + 1>
- Node* result0 = Projection(0, result_pair);
- Node* result1 = Projection(1, result_pair);
+ TNode<Object> result0 = CAST(Projection(0, result_pair));
+ TNode<Object> result1 = CAST(Projection(1, result_pair));
StoreRegisterPairAtOperandIndex(result0, result1, 3);
Dispatch();
}
@@ -1722,12 +1736,12 @@ IGNITION_HANDLER(CallRuntimeForPair, InterpreterAssembler) {
// Call the JS runtime function that has the |context_index| with the receiver
// in register |receiver| and |arg_count| arguments in subsequent registers.
IGNITION_HANDLER(CallJSRuntime, InterpreterAssembler) {
- Node* context_index = BytecodeOperandNativeContextIndex(0);
+ TNode<IntPtrT> context_index = Signed(BytecodeOperandNativeContextIndex(0));
RegListNodePair args = GetRegisterListAtOperandIndex(1);
// Get the function to call from the native context.
TNode<Context> context = GetContext();
- TNode<Context> native_context = LoadNativeContext(context);
+ TNode<NativeContext> native_context = LoadNativeContext(context);
TNode<Object> function = LoadContextElement(native_context, context_index);
// Call the function.
@@ -1744,7 +1758,7 @@ IGNITION_HANDLER(CallJSRuntime, InterpreterAssembler) {
IGNITION_HANDLER(CallWithSpread, InterpreterAssembler) {
TNode<Object> callable = LoadRegisterAtOperandIndex(0);
RegListNodePair args = GetRegisterListAtOperandIndex(1);
- Node* slot_id = BytecodeOperandIdx(3);
+ TNode<UintPtrT> slot_id = BytecodeOperandIdx(3);
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
@@ -1763,11 +1777,11 @@ IGNITION_HANDLER(ConstructWithSpread, InterpreterAssembler) {
TNode<Object> new_target = GetAccumulator();
TNode<Object> constructor = LoadRegisterAtOperandIndex(0);
RegListNodePair args = GetRegisterListAtOperandIndex(1);
- Node* slot_id = BytecodeOperandIdx(3);
- TNode<HeapObject> feedback_vector = LoadFeedbackVector();
+ TNode<UintPtrT> slot_id = BytecodeOperandIdx(3);
+ TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
- Node* result = ConstructWithSpread(constructor, context, new_target, args,
- slot_id, feedback_vector);
+ TNode<Object> result = ConstructWithSpread(
+ constructor, context, new_target, args, slot_id, maybe_feedback_vector);
SetAccumulator(result);
Dispatch();
}
@@ -1782,11 +1796,11 @@ IGNITION_HANDLER(Construct, InterpreterAssembler) {
TNode<Object> new_target = GetAccumulator();
TNode<Object> constructor = LoadRegisterAtOperandIndex(0);
RegListNodePair args = GetRegisterListAtOperandIndex(1);
- Node* slot_id = BytecodeOperandIdx(3);
- TNode<HeapObject> feedback_vector = LoadFeedbackVector();
+ TNode<UintPtrT> slot_id = BytecodeOperandIdx(3);
+ TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
- Node* result = Construct(constructor, context, new_target, args, slot_id,
- feedback_vector);
+ TNode<Object> result = Construct(constructor, context, new_target, args,
+ slot_id, maybe_feedback_vector);
SetAccumulator(result);
Dispatch();
}
@@ -1802,8 +1816,8 @@ class InterpreterCompareOpAssembler : public InterpreterAssembler {
TNode<Object> rhs = GetAccumulator();
TNode<Context> context = GetContext();
- Variable var_type_feedback(this, MachineRepresentation::kTagged);
- Node* result;
+ TVARIABLE(Smi, var_type_feedback);
+ TNode<Oddball> result;
switch (compare_op) {
case Operation::kEqual:
result = Equal(lhs, rhs, context, &var_type_feedback);
@@ -1822,7 +1836,7 @@ class InterpreterCompareOpAssembler : public InterpreterAssembler {
UNREACHABLE();
}
- Node* slot_index = BytecodeOperandIdx(1);
+ TNode<UintPtrT> slot_index = BytecodeOperandIdx(1);
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector,
slot_index);
@@ -1894,14 +1908,14 @@ IGNITION_HANDLER(TestReferenceEqual, InterpreterAssembler) {
IGNITION_HANDLER(TestIn, InterpreterAssembler) {
TNode<Object> name = LoadRegisterAtOperandIndex(0);
TNode<Object> object = GetAccumulator();
- Node* raw_slot = BytecodeOperandIdx(1);
+ TNode<IntPtrT> raw_slot = Signed(BytecodeOperandIdx(1));
TNode<Smi> smi_slot = SmiTag(raw_slot);
TNode<HeapObject> feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
- VARIABLE(var_result, MachineRepresentation::kTagged);
- var_result.Bind(CallBuiltin(Builtins::kKeyedHasIC, context, object, name,
- smi_slot, feedback_vector));
+ TVARIABLE(Object, var_result);
+ var_result = CallBuiltin(Builtins::kKeyedHasIC, context, object, name,
+ smi_slot, feedback_vector);
SetAccumulator(var_result.value());
Dispatch();
}
@@ -1913,15 +1927,16 @@ IGNITION_HANDLER(TestIn, InterpreterAssembler) {
IGNITION_HANDLER(TestInstanceOf, InterpreterAssembler) {
TNode<Object> object = LoadRegisterAtOperandIndex(0);
TNode<Object> callable = GetAccumulator();
- Node* slot_id = BytecodeOperandIdx(1);
- TNode<HeapObject> feedback_vector = LoadFeedbackVector();
+ TNode<UintPtrT> slot_id = BytecodeOperandIdx(1);
+ TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
Label feedback_done(this);
- GotoIf(IsUndefined(feedback_vector), &feedback_done);
+ GotoIf(IsUndefined(maybe_feedback_vector), &feedback_done);
// Record feedback for the {callable} in the {feedback_vector}.
- CollectCallableFeedback(callable, context, feedback_vector, slot_id);
+ CollectCallableFeedback(callable, context, CAST(maybe_feedback_vector),
+ slot_id);
Goto(&feedback_done);
BIND(&feedback_done);
@@ -1980,7 +1995,7 @@ IGNITION_HANDLER(TestUndefined, InterpreterAssembler) {
// by |literal_flag|.
IGNITION_HANDLER(TestTypeOf, InterpreterAssembler) {
TNode<Object> object = GetAccumulator();
- Node* literal_flag = BytecodeOperandFlag(0);
+ TNode<Uint32T> literal_flag = BytecodeOperandFlag(0);
#define MAKE_LABEL(name, lower_case) Label if_##lower_case(this);
TYPEOF_LITERAL_LIST(MAKE_LABEL)
@@ -2097,7 +2112,7 @@ IGNITION_HANDLER(TestTypeOf, InterpreterAssembler) {
//
// Jump by the number of bytes represented by the immediate operand |imm|.
IGNITION_HANDLER(Jump, InterpreterAssembler) {
- Node* relative_jump = BytecodeOperandUImmWord(0);
+ TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0));
Jump(relative_jump);
}
@@ -2117,7 +2132,7 @@ IGNITION_HANDLER(JumpConstant, InterpreterAssembler) {
// will misbehave if passed arbitrary input values.
IGNITION_HANDLER(JumpIfTrue, InterpreterAssembler) {
TNode<Object> accumulator = GetAccumulator();
- Node* relative_jump = BytecodeOperandUImmWord(0);
+ TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0));
CSA_ASSERT(this, IsBoolean(CAST(accumulator)));
JumpIfTaggedEqual(accumulator, TrueConstant(), relative_jump);
}
@@ -2141,7 +2156,7 @@ IGNITION_HANDLER(JumpIfTrueConstant, InterpreterAssembler) {
// will misbehave if passed arbitrary input values.
IGNITION_HANDLER(JumpIfFalse, InterpreterAssembler) {
TNode<Object> accumulator = GetAccumulator();
- Node* relative_jump = BytecodeOperandUImmWord(0);
+ TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0));
CSA_ASSERT(this, IsBoolean(CAST(accumulator)));
JumpIfTaggedEqual(accumulator, FalseConstant(), relative_jump);
}
@@ -2164,7 +2179,7 @@ IGNITION_HANDLER(JumpIfFalseConstant, InterpreterAssembler) {
// referenced by the accumulator is true when the object is cast to boolean.
IGNITION_HANDLER(JumpIfToBooleanTrue, InterpreterAssembler) {
TNode<Object> value = GetAccumulator();
- Node* relative_jump = BytecodeOperandUImmWord(0);
+ TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0));
Label if_true(this), if_false(this);
BranchIfToBooleanIsTrue(value, &if_true, &if_false);
BIND(&if_true);
@@ -2195,7 +2210,7 @@ IGNITION_HANDLER(JumpIfToBooleanTrueConstant, InterpreterAssembler) {
// referenced by the accumulator is false when the object is cast to boolean.
IGNITION_HANDLER(JumpIfToBooleanFalse, InterpreterAssembler) {
TNode<Object> value = GetAccumulator();
- Node* relative_jump = BytecodeOperandUImmWord(0);
+ TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0));
Label if_true(this), if_false(this);
BranchIfToBooleanIsTrue(value, &if_true, &if_false);
BIND(&if_true);
@@ -2226,7 +2241,7 @@ IGNITION_HANDLER(JumpIfToBooleanFalseConstant, InterpreterAssembler) {
// referenced by the accumulator is the null constant.
IGNITION_HANDLER(JumpIfNull, InterpreterAssembler) {
TNode<Object> accumulator = GetAccumulator();
- Node* relative_jump = BytecodeOperandUImmWord(0);
+ TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0));
JumpIfTaggedEqual(accumulator, NullConstant(), relative_jump);
}
@@ -2246,7 +2261,7 @@ IGNITION_HANDLER(JumpIfNullConstant, InterpreterAssembler) {
// referenced by the accumulator is not the null constant.
IGNITION_HANDLER(JumpIfNotNull, InterpreterAssembler) {
TNode<Object> accumulator = GetAccumulator();
- Node* relative_jump = BytecodeOperandUImmWord(0);
+ TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0));
JumpIfTaggedNotEqual(accumulator, NullConstant(), relative_jump);
}
@@ -2266,7 +2281,7 @@ IGNITION_HANDLER(JumpIfNotNullConstant, InterpreterAssembler) {
// referenced by the accumulator is the undefined constant.
IGNITION_HANDLER(JumpIfUndefined, InterpreterAssembler) {
TNode<Object> accumulator = GetAccumulator();
- Node* relative_jump = BytecodeOperandUImmWord(0);
+ TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0));
JumpIfTaggedEqual(accumulator, UndefinedConstant(), relative_jump);
}
@@ -2286,7 +2301,7 @@ IGNITION_HANDLER(JumpIfUndefinedConstant, InterpreterAssembler) {
// referenced by the accumulator is not the undefined constant.
IGNITION_HANDLER(JumpIfNotUndefined, InterpreterAssembler) {
TNode<Object> accumulator = GetAccumulator();
- Node* relative_jump = BytecodeOperandUImmWord(0);
+ TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0));
JumpIfTaggedNotEqual(accumulator, UndefinedConstant(), relative_jump);
}
@@ -2314,7 +2329,7 @@ IGNITION_HANDLER(JumpIfUndefinedOrNull, InterpreterAssembler) {
Dispatch();
BIND(&do_jump);
- Node* relative_jump = BytecodeOperandUImmWord(0);
+ TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0));
Jump(relative_jump);
}
@@ -2342,7 +2357,7 @@ IGNITION_HANDLER(JumpIfUndefinedOrNullConstant, InterpreterAssembler) {
// referenced by the accumulator is a JSReceiver.
IGNITION_HANDLER(JumpIfJSReceiver, InterpreterAssembler) {
TNode<Object> accumulator = GetAccumulator();
- Node* relative_jump = BytecodeOperandUImmWord(0);
+ TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0));
Label if_object(this), if_notobject(this, Label::kDeferred), if_notsmi(this);
Branch(TaggedIsSmi(accumulator), &if_notobject, &if_notsmi);
@@ -2383,9 +2398,9 @@ IGNITION_HANDLER(JumpIfJSReceiverConstant, InterpreterAssembler) {
// performs a loop nesting check and potentially triggers OSR in case the
// current OSR level matches (or exceeds) the specified |loop_depth|.
IGNITION_HANDLER(JumpLoop, InterpreterAssembler) {
- Node* relative_jump = BytecodeOperandUImmWord(0);
- Node* loop_depth = BytecodeOperandImm(1);
- Node* osr_level = LoadOsrNestingLevel();
+ TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0));
+ TNode<Int32T> loop_depth = BytecodeOperandImm(1);
+ TNode<Int8T> osr_level = LoadOsrNestingLevel();
// Check if OSR points at the given {loop_depth} are armed by comparing it to
// the current {osr_level} loaded from the header of the BytecodeArray.
@@ -2415,9 +2430,9 @@ IGNITION_HANDLER(JumpLoop, InterpreterAssembler) {
// next bytecode.
IGNITION_HANDLER(SwitchOnSmiNoFeedback, InterpreterAssembler) {
TNode<Object> acc = GetAccumulator();
- Node* table_start = BytecodeOperandIdx(0);
- Node* table_length = BytecodeOperandUImmWord(1);
- Node* case_value_base = BytecodeOperandImmIntPtr(2);
+ TNode<UintPtrT> table_start = BytecodeOperandIdx(0);
+ TNode<UintPtrT> table_length = BytecodeOperandUImmWord(1);
+ TNode<IntPtrT> case_value_base = BytecodeOperandImmIntPtr(2);
Label fall_through(this);
@@ -2426,7 +2441,7 @@ IGNITION_HANDLER(SwitchOnSmiNoFeedback, InterpreterAssembler) {
// accumulator values.
CSA_ASSERT(this, TaggedIsSmi(acc));
- TNode<WordT> case_value = IntPtrSub(SmiUntag(CAST(acc)), case_value_base);
+ TNode<IntPtrT> case_value = IntPtrSub(SmiUntag(CAST(acc)), case_value_base);
GotoIf(IntPtrLessThan(case_value, IntPtrConstant(0)), &fall_through);
GotoIf(IntPtrGreaterThanOrEqual(case_value, table_length), &fall_through);
TNode<WordT> entry = IntPtrAdd(table_start, case_value);
@@ -2442,17 +2457,18 @@ IGNITION_HANDLER(SwitchOnSmiNoFeedback, InterpreterAssembler) {
// Creates a regular expression literal for literal index <literal_idx> with
// <flags> and the pattern in <pattern_idx>.
IGNITION_HANDLER(CreateRegExpLiteral, InterpreterAssembler) {
- Node* pattern = LoadConstantPoolEntryAtOperandIndex(0);
+ TNode<Object> pattern = LoadConstantPoolEntryAtOperandIndex(0);
TNode<HeapObject> feedback_vector = LoadFeedbackVector();
- Node* slot_id = BytecodeOperandIdx(1);
- TNode<Smi> flags = SmiFromInt32(BytecodeOperandFlag(2));
+ TNode<UintPtrT> slot_id = BytecodeOperandIdx(1);
+ TNode<Smi> flags =
+ SmiFromInt32(UncheckedCast<Int32T>(BytecodeOperandFlag(2)));
TNode<Context> context = GetContext();
- VARIABLE(result, MachineRepresentation::kTagged);
+ TVARIABLE(JSRegExp, result);
ConstructorBuiltinsAssembler constructor_assembler(state());
- result.Bind(constructor_assembler.EmitCreateRegExpLiteral(
- feedback_vector, slot_id, pattern, flags, context));
+ result = constructor_assembler.EmitCreateRegExpLiteral(
+ feedback_vector, slot_id, pattern, flags, context);
SetAccumulator(result.value());
Dispatch();
}
@@ -2463,9 +2479,9 @@ IGNITION_HANDLER(CreateRegExpLiteral, InterpreterAssembler) {
// CreateArrayLiteral flags <flags> and constant elements in <element_idx>.
IGNITION_HANDLER(CreateArrayLiteral, InterpreterAssembler) {
TNode<HeapObject> feedback_vector = LoadFeedbackVector();
- Node* slot_id = BytecodeOperandIdx(1);
+ TNode<UintPtrT> slot_id = BytecodeOperandIdx(1);
TNode<Context> context = GetContext();
- Node* bytecode_flags = BytecodeOperandFlag(2);
+ TNode<Uint32T> bytecode_flags = BytecodeOperandFlag(2);
Label fast_shallow_clone(this), call_runtime(this, Label::kDeferred);
// No feedback, so handle it as a slow case.
@@ -2478,8 +2494,8 @@ IGNITION_HANDLER(CreateArrayLiteral, InterpreterAssembler) {
BIND(&fast_shallow_clone);
{
ConstructorBuiltinsAssembler constructor_assembler(state());
- Node* result = constructor_assembler.EmitCreateShallowArrayLiteral(
- feedback_vector, slot_id, context, &call_runtime,
+ TNode<JSArray> result = constructor_assembler.EmitCreateShallowArrayLiteral(
+ CAST(feedback_vector), slot_id, context, &call_runtime,
TRACK_ALLOCATION_SITE);
SetAccumulator(result);
Dispatch();
@@ -2487,14 +2503,14 @@ IGNITION_HANDLER(CreateArrayLiteral, InterpreterAssembler) {
BIND(&call_runtime);
{
- TNode<WordT> flags_raw =
+ TNode<UintPtrT> flags_raw =
DecodeWordFromWord32<CreateArrayLiteralFlags::FlagsBits>(
bytecode_flags);
TNode<Smi> flags = SmiTag(Signed(flags_raw));
- Node* constant_elements = LoadConstantPoolEntryAtOperandIndex(0);
+ TNode<Object> constant_elements = LoadConstantPoolEntryAtOperandIndex(0);
TNode<Object> result =
CallRuntime(Runtime::kCreateArrayLiteral, context, feedback_vector,
- SmiTag(slot_id), constant_elements, flags);
+ SmiTag(Signed(slot_id)), constant_elements, flags);
SetAccumulator(result);
Dispatch();
}
@@ -2504,26 +2520,26 @@ IGNITION_HANDLER(CreateArrayLiteral, InterpreterAssembler) {
//
// Creates an empty JSArray literal for literal index <literal_idx>.
IGNITION_HANDLER(CreateEmptyArrayLiteral, InterpreterAssembler) {
- TNode<HeapObject> feedback_vector = LoadFeedbackVector();
- Node* slot_id = BytecodeOperandIdx(0);
+ TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
+ TNode<UintPtrT> slot_id = BytecodeOperandIdx(0);
TNode<Context> context = GetContext();
Label no_feedback(this, Label::kDeferred), end(this);
- VARIABLE(result, MachineRepresentation::kTagged);
- GotoIf(IsUndefined(feedback_vector), &no_feedback);
+ TVARIABLE(JSArray, result);
+ GotoIf(IsUndefined(maybe_feedback_vector), &no_feedback);
ConstructorBuiltinsAssembler constructor_assembler(state());
- result.Bind(constructor_assembler.EmitCreateEmptyArrayLiteral(
- feedback_vector, slot_id, context));
+ result = constructor_assembler.EmitCreateEmptyArrayLiteral(
+ CAST(maybe_feedback_vector), slot_id, context);
Goto(&end);
BIND(&no_feedback);
{
TNode<Map> array_map = LoadJSArrayElementsMap(GetInitialFastElementsKind(),
LoadNativeContext(context));
- result.Bind(AllocateJSArray(GetInitialFastElementsKind(), array_map,
- SmiConstant(0), SmiConstant(0), nullptr,
- ParameterMode::SMI_PARAMETERS));
+ result =
+ AllocateJSArray(GetInitialFastElementsKind(), array_map, SmiConstant(0),
+ SmiConstant(0), {}, ParameterMode::SMI_PARAMETERS);
Goto(&end);
}
@@ -2551,8 +2567,8 @@ IGNITION_HANDLER(CreateArrayFromIterable, InterpreterAssembler) {
// CreateObjectLiteralFlags <flags> and constant elements in <element_idx>.
IGNITION_HANDLER(CreateObjectLiteral, InterpreterAssembler) {
TNode<HeapObject> feedback_vector = LoadFeedbackVector();
- Node* slot_id = BytecodeOperandIdx(1);
- Node* bytecode_flags = BytecodeOperandFlag(2);
+ TNode<UintPtrT> slot_id = BytecodeOperandIdx(1);
+ TNode<Uint32T> bytecode_flags = BytecodeOperandFlag(2);
Label if_fast_clone(this), if_not_fast_clone(this, Label::kDeferred);
// No feedback, so handle it as a slow case.
@@ -2567,8 +2583,9 @@ IGNITION_HANDLER(CreateObjectLiteral, InterpreterAssembler) {
{
// If we can do a fast clone do the fast-path in CreateShallowObjectLiteral.
ConstructorBuiltinsAssembler constructor_assembler(state());
- Node* result = constructor_assembler.EmitCreateShallowObjectLiteral(
- feedback_vector, slot_id, &if_not_fast_clone);
+ TNode<HeapObject> result =
+ constructor_assembler.EmitCreateShallowObjectLiteral(
+ CAST(feedback_vector), slot_id, &if_not_fast_clone);
SetAccumulator(result);
Dispatch();
}
@@ -2576,18 +2593,18 @@ IGNITION_HANDLER(CreateObjectLiteral, InterpreterAssembler) {
BIND(&if_not_fast_clone);
{
// If we can't do a fast clone, call into the runtime.
- Node* object_boilerplate_description =
- LoadConstantPoolEntryAtOperandIndex(0);
+ TNode<ObjectBoilerplateDescription> object_boilerplate_description =
+ CAST(LoadConstantPoolEntryAtOperandIndex(0));
TNode<Context> context = GetContext();
- TNode<WordT> flags_raw =
+ TNode<UintPtrT> flags_raw =
DecodeWordFromWord32<CreateObjectLiteralFlags::FlagsBits>(
bytecode_flags);
TNode<Smi> flags = SmiTag(Signed(flags_raw));
- TNode<Object> result =
- CallRuntime(Runtime::kCreateObjectLiteral, context, feedback_vector,
- SmiTag(slot_id), object_boilerplate_description, flags);
+ TNode<Object> result = CallRuntime(Runtime::kCreateObjectLiteral, context,
+ feedback_vector, SmiTag(Signed(slot_id)),
+ object_boilerplate_description, flags);
SetAccumulator(result);
// TODO(klaasb) build a single dispatch once the call is inlined
Dispatch();
@@ -2600,7 +2617,8 @@ IGNITION_HANDLER(CreateObjectLiteral, InterpreterAssembler) {
IGNITION_HANDLER(CreateEmptyObjectLiteral, InterpreterAssembler) {
TNode<Context> context = GetContext();
ConstructorBuiltinsAssembler constructor_assembler(state());
- Node* result = constructor_assembler.EmitCreateEmptyObjectLiteral(context);
+ TNode<JSObject> result =
+ constructor_assembler.EmitCreateEmptyObjectLiteral(context);
SetAccumulator(result);
Dispatch();
}
@@ -2611,18 +2629,18 @@ IGNITION_HANDLER(CreateEmptyObjectLiteral, InterpreterAssembler) {
// {source}, converting getters into data properties.
IGNITION_HANDLER(CloneObject, InterpreterAssembler) {
TNode<Object> source = LoadRegisterAtOperandIndex(0);
- Node* bytecode_flags = BytecodeOperandFlag(1);
- TNode<WordT> raw_flags =
+ TNode<Uint32T> bytecode_flags = BytecodeOperandFlag(1);
+ TNode<UintPtrT> raw_flags =
DecodeWordFromWord32<CreateObjectLiteralFlags::FlagsBits>(bytecode_flags);
TNode<Smi> smi_flags = SmiTag(Signed(raw_flags));
- Node* raw_slot = BytecodeOperandIdx(2);
+ TNode<IntPtrT> raw_slot = Signed(BytecodeOperandIdx(2));
TNode<Smi> smi_slot = SmiTag(raw_slot);
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
- Variable var_result(this, MachineRepresentation::kTagged);
- var_result.Bind(CallBuiltin(Builtins::kCloneObjectIC, context, source,
- smi_flags, smi_slot, maybe_feedback_vector));
+ TVARIABLE(Object, var_result);
+ var_result = CallBuiltin(Builtins::kCloneObjectIC, context, source, smi_flags,
+ smi_slot, maybe_feedback_vector);
SetAccumulator(var_result.value());
Dispatch();
}
@@ -2633,14 +2651,14 @@ IGNITION_HANDLER(CloneObject, InterpreterAssembler) {
// accumulator, creating and caching the site object on-demand as per the
// specification.
IGNITION_HANDLER(GetTemplateObject, InterpreterAssembler) {
- TNode<HeapObject> feedback_vector = LoadFeedbackVector();
- Node* slot = BytecodeOperandIdx(1);
+ TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
+ TNode<UintPtrT> slot = BytecodeOperandIdx(1);
Label call_runtime(this, Label::kDeferred);
- GotoIf(IsUndefined(feedback_vector), &call_runtime);
+ GotoIf(IsUndefined(maybe_feedback_vector), &call_runtime);
TNode<Object> cached_value =
- CAST(LoadFeedbackVectorSlot(feedback_vector, slot, 0, INTPTR_PARAMETERS));
+ CAST(LoadFeedbackVectorSlot(CAST(maybe_feedback_vector), slot));
GotoIf(TaggedEqual(cached_value, SmiConstant(0)), &call_runtime);
@@ -2649,8 +2667,8 @@ IGNITION_HANDLER(GetTemplateObject, InterpreterAssembler) {
BIND(&call_runtime);
{
- Node* description = LoadConstantPoolEntryAtOperandIndex(0);
- TNode<Smi> slot_smi = SmiTag(slot);
+ TNode<Object> description = LoadConstantPoolEntryAtOperandIndex(0);
+ TNode<Smi> slot_smi = SmiTag(Signed(slot));
TNode<JSFunction> closure =
CAST(LoadRegister(Register::function_closure()));
TNode<SharedFunctionInfo> shared_info = LoadObjectField<SharedFunctionInfo>(
@@ -2660,8 +2678,8 @@ IGNITION_HANDLER(GetTemplateObject, InterpreterAssembler) {
description, shared_info, slot_smi);
Label end(this);
- GotoIf(IsUndefined(feedback_vector), &end);
- StoreFeedbackVectorSlot(feedback_vector, slot, result);
+ GotoIf(IsUndefined(maybe_feedback_vector), &end);
+ StoreFeedbackVectorSlot(CAST(maybe_feedback_vector), slot, result);
Goto(&end);
Bind(&end);
@@ -2675,10 +2693,10 @@ IGNITION_HANDLER(GetTemplateObject, InterpreterAssembler) {
// Creates a new closure for SharedFunctionInfo at position |index| in the
// constant pool and with pretenuring controlled by |flags|.
IGNITION_HANDLER(CreateClosure, InterpreterAssembler) {
- Node* shared = LoadConstantPoolEntryAtOperandIndex(0);
- Node* flags = BytecodeOperandFlag(2);
+ TNode<Object> shared = LoadConstantPoolEntryAtOperandIndex(0);
+ TNode<Uint32T> flags = BytecodeOperandFlag(2);
TNode<Context> context = GetContext();
- Node* slot = BytecodeOperandIdx(1);
+ TNode<UintPtrT> slot = BytecodeOperandIdx(1);
Label if_undefined(this);
TNode<ClosureFeedbackCellArray> feedback_cell_array =
@@ -2727,7 +2745,7 @@ IGNITION_HANDLER(CreateClosure, InterpreterAssembler) {
//
// Creates a new block context with the scope info constant at |index|.
IGNITION_HANDLER(CreateBlockContext, InterpreterAssembler) {
- Node* scope_info = LoadConstantPoolEntryAtOperandIndex(0);
+ TNode<ScopeInfo> scope_info = CAST(LoadConstantPoolEntryAtOperandIndex(0));
TNode<Context> context = GetContext();
SetAccumulator(CallRuntime(Runtime::kPushBlockContext, context, scope_info));
Dispatch();
@@ -2739,7 +2757,7 @@ IGNITION_HANDLER(CreateBlockContext, InterpreterAssembler) {
// and the ScopeInfo at |scope_info_idx|.
IGNITION_HANDLER(CreateCatchContext, InterpreterAssembler) {
TNode<Object> exception = LoadRegisterAtOperandIndex(0);
- Node* scope_info = LoadConstantPoolEntryAtOperandIndex(1);
+ TNode<ScopeInfo> scope_info = CAST(LoadConstantPoolEntryAtOperandIndex(1));
TNode<Context> context = GetContext();
SetAccumulator(
CallRuntime(Runtime::kPushCatchContext, context, exception, scope_info));
@@ -2750,8 +2768,8 @@ IGNITION_HANDLER(CreateCatchContext, InterpreterAssembler) {
//
// Creates a new context with number of |slots| for the function closure.
IGNITION_HANDLER(CreateFunctionContext, InterpreterAssembler) {
- Node* scope_info_idx = BytecodeOperandIdx(0);
- Node* scope_info = LoadConstantPoolEntry(scope_info_idx);
+ TNode<UintPtrT> scope_info_idx = BytecodeOperandIdx(0);
+ TNode<ScopeInfo> scope_info = CAST(LoadConstantPoolEntry(scope_info_idx));
TNode<Uint32T> slots = BytecodeOperandUImm(1);
TNode<Context> context = GetContext();
ConstructorBuiltinsAssembler constructor_assembler(state());
@@ -2764,8 +2782,8 @@ IGNITION_HANDLER(CreateFunctionContext, InterpreterAssembler) {
//
// Creates a new context with number of |slots| for an eval closure.
IGNITION_HANDLER(CreateEvalContext, InterpreterAssembler) {
- Node* scope_info_idx = BytecodeOperandIdx(0);
- Node* scope_info = LoadConstantPoolEntry(scope_info_idx);
+ TNode<UintPtrT> scope_info_idx = BytecodeOperandIdx(0);
+ TNode<ScopeInfo> scope_info = CAST(LoadConstantPoolEntry(scope_info_idx));
TNode<Uint32T> slots = BytecodeOperandUImm(1);
TNode<Context> context = GetContext();
ConstructorBuiltinsAssembler constructor_assembler(state());
@@ -2780,7 +2798,7 @@ IGNITION_HANDLER(CreateEvalContext, InterpreterAssembler) {
// with-statement with the object in |register|.
IGNITION_HANDLER(CreateWithContext, InterpreterAssembler) {
TNode<Object> object = LoadRegisterAtOperandIndex(0);
- Node* scope_info = LoadConstantPoolEntryAtOperandIndex(1);
+ TNode<ScopeInfo> scope_info = CAST(LoadConstantPoolEntryAtOperandIndex(1));
TNode<Context> context = GetContext();
SetAccumulator(
CallRuntime(Runtime::kPushWithContext, context, object, scope_info));
@@ -2802,8 +2820,8 @@ IGNITION_HANDLER(CreateMappedArguments, InterpreterAssembler) {
// duplicate parameters.
TNode<SharedFunctionInfo> shared_info = LoadObjectField<SharedFunctionInfo>(
closure, JSFunction::kSharedFunctionInfoOffset);
- Node* flags = LoadObjectField(shared_info, SharedFunctionInfo::kFlagsOffset,
- MachineType::Uint32());
+ TNode<Uint32T> flags =
+ LoadObjectField<Uint32T>(shared_info, SharedFunctionInfo::kFlagsOffset);
TNode<BoolT> has_duplicate_parameters =
IsSetWord32<SharedFunctionInfo::HasDuplicateParametersBit>(flags);
Branch(has_duplicate_parameters, &if_duplicate_parameters,
@@ -2812,7 +2830,7 @@ IGNITION_HANDLER(CreateMappedArguments, InterpreterAssembler) {
BIND(&if_not_duplicate_parameters);
{
ArgumentsBuiltinsAssembler constructor_assembler(state());
- Node* result =
+ TNode<JSObject> result =
constructor_assembler.EmitFastNewSloppyArguments(context, closure);
SetAccumulator(result);
Dispatch();
@@ -2832,9 +2850,9 @@ IGNITION_HANDLER(CreateMappedArguments, InterpreterAssembler) {
// Creates a new unmapped arguments object.
IGNITION_HANDLER(CreateUnmappedArguments, InterpreterAssembler) {
TNode<Context> context = GetContext();
- TNode<Object> closure = LoadRegister(Register::function_closure());
+ TNode<JSFunction> closure = CAST(LoadRegister(Register::function_closure()));
ArgumentsBuiltinsAssembler builtins_assembler(state());
- Node* result =
+ TNode<JSObject> result =
builtins_assembler.EmitFastNewStrictArguments(context, closure);
SetAccumulator(result);
Dispatch();
@@ -2844,10 +2862,11 @@ IGNITION_HANDLER(CreateUnmappedArguments, InterpreterAssembler) {
//
// Creates a new rest parameter array.
IGNITION_HANDLER(CreateRestParameter, InterpreterAssembler) {
- TNode<Object> closure = LoadRegister(Register::function_closure());
+ TNode<JSFunction> closure = CAST(LoadRegister(Register::function_closure()));
TNode<Context> context = GetContext();
ArgumentsBuiltinsAssembler builtins_assembler(state());
- Node* result = builtins_assembler.EmitFastNewRestParameter(context, closure);
+ TNode<JSObject> result =
+ builtins_assembler.EmitFastNewRestParameter(context, closure);
SetAccumulator(result);
Dispatch();
}
@@ -2868,7 +2887,7 @@ IGNITION_HANDLER(StackCheck, InterpreterAssembler) {
IGNITION_HANDLER(SetPendingMessage, InterpreterAssembler) {
TNode<ExternalReference> pending_message = ExternalConstant(
ExternalReference::address_of_pending_message_obj(isolate()));
- Node* previous_message = Load(MachineType::TaggedPointer(), pending_message);
+ TNode<HeapObject> previous_message = Load<HeapObject>(pending_message);
TNode<Object> new_message = GetAccumulator();
StoreFullTaggedNoWriteBarrier(pending_message, new_message);
SetAccumulator(previous_message);
@@ -2903,8 +2922,8 @@ IGNITION_HANDLER(ReThrow, InterpreterAssembler) {
//
// Aborts execution (via a call to the runtime function).
IGNITION_HANDLER(Abort, InterpreterAssembler) {
- Node* reason = BytecodeOperandIdx(0);
- CallRuntime(Runtime::kAbort, NoContextConstant(), SmiTag(reason));
+ TNode<UintPtrT> reason = BytecodeOperandIdx(0);
+ CallRuntime(Runtime::kAbort, NoContextConstant(), SmiTag(Signed(reason)));
Unreachable();
}
@@ -2929,7 +2948,7 @@ IGNITION_HANDLER(ThrowReferenceErrorIfHole, InterpreterAssembler) {
BIND(&throw_error);
{
- Node* name = LoadConstantPoolEntryAtOperandIndex(0);
+ TNode<Name> name = CAST(LoadConstantPoolEntryAtOperandIndex(0));
CallRuntime(Runtime::kThrowAccessedUninitializedVariable, GetContext(),
name);
// We shouldn't ever return from a throw.
@@ -2995,7 +3014,7 @@ IGNITION_HANDLER(Debugger, InterpreterAssembler) {
TNode<Object> accumulator = GetAccumulator(); \
TNode<Object> result_pair = \
CallRuntime(Runtime::kDebugBreakOnBytecode, context, accumulator); \
- Node* return_value = Projection(0, result_pair); \
+ TNode<Object> return_value = CAST(Projection(0, result_pair)); \
TNode<IntPtrT> original_bytecode = SmiUntag(Projection(1, result_pair)); \
MaybeDropFrames(context); \
SetAccumulator(return_value); \
@@ -3010,7 +3029,7 @@ DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK)
// coverage.
IGNITION_HANDLER(IncBlockCounter, InterpreterAssembler) {
TNode<Object> closure = LoadRegister(Register::function_closure());
- Node* coverage_array_slot = BytecodeOperandIdxSmi(0);
+ TNode<Smi> coverage_array_slot = BytecodeOperandIdxSmi(0);
TNode<Context> context = GetContext();
CallBuiltin(Builtins::kIncBlockCounter, context, closure,
@@ -3025,11 +3044,11 @@ IGNITION_HANDLER(IncBlockCounter, InterpreterAssembler) {
// map of the |receiver| if it has a usable enum cache or a fixed array
// with the keys to enumerate in the accumulator.
IGNITION_HANDLER(ForInEnumerate, InterpreterAssembler) {
- TNode<Object> receiver = LoadRegisterAtOperandIndex(0);
+ TNode<HeapObject> receiver = CAST(LoadRegisterAtOperandIndex(0));
TNode<Context> context = GetContext();
Label if_empty(this), if_runtime(this, Label::kDeferred);
- Node* receiver_map = CheckEnumCache(receiver, &if_empty, &if_runtime);
+ TNode<Map> receiver_map = CheckEnumCache(receiver, &if_empty, &if_runtime);
SetAccumulator(receiver_map);
Dispatch();
@@ -3060,7 +3079,7 @@ IGNITION_HANDLER(ForInEnumerate, InterpreterAssembler) {
IGNITION_HANDLER(ForInPrepare, InterpreterAssembler) {
// The {enumerator} is either a Map or a FixedArray.
TNode<HeapObject> enumerator = CAST(GetAccumulator());
- Node* vector_index = BytecodeOperandIdx(1);
+ TNode<UintPtrT> vector_index = BytecodeOperandIdx(1);
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
// Check if we're using an enum cache.
@@ -3091,8 +3110,8 @@ IGNITION_HANDLER(ForInPrepare, InterpreterAssembler) {
UpdateFeedback(feedback, maybe_feedback_vector, vector_index);
// Construct the cache info triple.
- Node* cache_type = enumerator;
- Node* cache_array = enum_keys;
+ TNode<Map> cache_type = map_enumerator;
+ TNode<FixedArray> cache_array = enum_keys;
TNode<Smi> cache_length = SmiTag(Signed(enum_length));
StoreRegisterTripleAtOperandIndex(cache_type, cache_array, cache_length, 0);
Dispatch();
@@ -3108,8 +3127,8 @@ IGNITION_HANDLER(ForInPrepare, InterpreterAssembler) {
vector_index);
// Construct the cache info triple.
- Node* cache_type = array_enumerator;
- Node* cache_array = array_enumerator;
+ TNode<FixedArray> cache_type = array_enumerator;
+ TNode<FixedArray> cache_array = array_enumerator;
TNode<Smi> cache_length = LoadFixedArrayBaseLength(array_enumerator);
StoreRegisterTripleAtOperandIndex(cache_type, cache_array, cache_length, 0);
Dispatch();
@@ -3125,7 +3144,7 @@ IGNITION_HANDLER(ForInNext, InterpreterAssembler) {
TNode<Object> cache_type;
TNode<Object> cache_array;
std::tie(cache_type, cache_array) = LoadRegisterPairAtOperandIndex(2);
- Node* vector_index = BytecodeOperandIdx(3);
+ TNode<UintPtrT> vector_index = BytecodeOperandIdx(3);
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
// Load the next key from the enumeration array.
@@ -3195,21 +3214,22 @@ IGNITION_HANDLER(ForInStep, InterpreterAssembler) {
// GetIterator <object>
//
-// Retrieves the object[Symbol.iterator] method and stores the result
-// in the accumulator
-// TODO(swapnilgaikwad): Extend the functionality of the bytecode to call
-// iterator method for an object
+// Retrieves the object[Symbol.iterator] method, calls it and stores
+// the result in the accumulator. If the result is not a JSReceiver, throws
+// SymbolIteratorInvalid runtime exception.
IGNITION_HANDLER(GetIterator, InterpreterAssembler) {
TNode<Object> receiver = LoadRegisterAtOperandIndex(0);
TNode<Context> context = GetContext();
TNode<HeapObject> feedback_vector = LoadFeedbackVector();
- Node* feedback_slot = BytecodeOperandIdx(1);
- TNode<Smi> smi_slot = SmiTag(feedback_slot);
+ TNode<IntPtrT> load_feedback_slot = Signed(BytecodeOperandIdx(1));
+ TNode<IntPtrT> call_feedback_slot = Signed(BytecodeOperandIdx(2));
+ TNode<Smi> load_slot_smi = SmiTag(load_feedback_slot);
+ TNode<Smi> call_slot_smi = SmiTag(call_feedback_slot);
- TNode<Object> result =
+ TNode<Object> iterator =
CallBuiltin(Builtins::kGetIteratorWithFeedback, context, receiver,
- smi_slot, feedback_vector);
- SetAccumulator(result);
+ load_slot_smi, call_slot_smi, feedback_vector);
+ SetAccumulator(iterator);
Dispatch();
}
@@ -3249,7 +3269,7 @@ IGNITION_HANDLER(SuspendGenerator, InterpreterAssembler) {
TNode<JSFunction> closure = CAST(LoadRegister(Register::function_closure()));
TNode<Context> context = GetContext();
RegListNodePair registers = GetRegisterListAtOperandIndex(1);
- Node* suspend_id = BytecodeOperandUImmSmi(3);
+ TNode<Smi> suspend_id = BytecodeOperandUImmSmi(3);
TNode<SharedFunctionInfo> shared =
CAST(LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset));
@@ -3297,10 +3317,10 @@ IGNITION_HANDLER(SwitchOnGeneratorState, InterpreterAssembler) {
CAST(LoadObjectField(generator, JSGeneratorObject::kContextOffset));
SetContext(context);
- Node* table_start = BytecodeOperandIdx(1);
+ TNode<UintPtrT> table_start = BytecodeOperandIdx(1);
// TODO(leszeks): table_length is only used for a CSA_ASSERT, we don't
// actually need it otherwise.
- Node* table_length = BytecodeOperandUImmWord(2);
+ TNode<UintPtrT> table_length = BytecodeOperandUImmWord(2);
// The state must be a Smi.
CSA_ASSERT(this, TaggedIsSmi(state));
@@ -3350,14 +3370,15 @@ IGNITION_HANDLER(ResumeGenerator, InterpreterAssembler) {
} // namespace
-Handle<Code> GenerateBytecodeHandler(Isolate* isolate, Bytecode bytecode,
+Handle<Code> GenerateBytecodeHandler(Isolate* isolate, const char* debug_name,
+ Bytecode bytecode,
OperandScale operand_scale,
int builtin_index,
const AssemblerOptions& options) {
Zone zone(isolate->allocator(), ZONE_NAME);
compiler::CodeAssemblerState state(
isolate, &zone, InterpreterDispatchDescriptor{}, Code::BYTECODE_HANDLER,
- Bytecodes::ToString(bytecode),
+ debug_name,
FLAG_untrusted_code_mitigations
? PoisoningMitigationLevel::kPoisonCriticalOnly
: PoisoningMitigationLevel::kDontPoison,
@@ -3377,7 +3398,7 @@ Handle<Code> GenerateBytecodeHandler(Isolate* isolate, Bytecode bytecode,
#ifdef ENABLE_DISASSEMBLER
if (FLAG_trace_ignition_codegen) {
StdoutStream os;
- code->Disassemble(Bytecodes::ToString(bytecode), os);
+ code->Disassemble(Bytecodes::ToString(bytecode), os, isolate);
os << std::flush;
}
#endif // ENABLE_DISASSEMBLER
diff --git a/deps/v8/src/interpreter/interpreter-generator.h b/deps/v8/src/interpreter/interpreter-generator.h
index a41e89f250..263f02ba39 100644
--- a/deps/v8/src/interpreter/interpreter-generator.h
+++ b/deps/v8/src/interpreter/interpreter-generator.h
@@ -15,7 +15,9 @@ struct AssemblerOptions;
namespace interpreter {
-extern Handle<Code> GenerateBytecodeHandler(Isolate* isolate, Bytecode bytecode,
+extern Handle<Code> GenerateBytecodeHandler(Isolate* isolate,
+ const char* debug_name,
+ Bytecode bytecode,
OperandScale operand_scale,
int builtin_index,
const AssemblerOptions& options);
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
index a329e7189f..f5307762f7 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
@@ -21,8 +21,6 @@ namespace internal {
namespace interpreter {
using compiler::Node;
-template <typename T>
-using TNode = compiler::TNode<T>;
class IntrinsicsGenerator {
public:
@@ -31,8 +29,9 @@ class IntrinsicsGenerator {
zone_(assembler->zone()),
assembler_(assembler) {}
- Node* InvokeIntrinsic(Node* function_id, Node* context,
- const InterpreterAssembler::RegListNodePair& args);
+ TNode<Object> InvokeIntrinsic(
+ TNode<Uint32T> function_id, TNode<Context> context,
+ const InterpreterAssembler::RegListNodePair& args);
private:
enum InstanceTypeCompareMode {
@@ -40,17 +39,20 @@ class IntrinsicsGenerator {
kInstanceTypeGreaterThanOrEqual
};
- Node* IsInstanceType(Node* input, int type);
- Node* CompareInstanceType(Node* map, int type, InstanceTypeCompareMode mode);
- Node* IntrinsicAsStubCall(const InterpreterAssembler::RegListNodePair& args,
- Node* context, Callable const& callable);
- Node* IntrinsicAsBuiltinCall(
- const InterpreterAssembler::RegListNodePair& args, Node* context,
+ TNode<Oddball> IsInstanceType(TNode<Object> input, int type);
+ TNode<BoolT> CompareInstanceType(TNode<HeapObject> map, int type,
+ InstanceTypeCompareMode mode);
+ TNode<Object> IntrinsicAsStubCall(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ Callable const& callable);
+ TNode<Object> IntrinsicAsBuiltinCall(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
Builtins::Name name);
- void AbortIfArgCountMismatch(int expected, compiler::TNode<Word32T> actual);
+ void AbortIfArgCountMismatch(int expected, TNode<Word32T> actual);
-#define DECLARE_INTRINSIC_HELPER(name, lower_case, count) \
- Node* name(const InterpreterAssembler::RegListNodePair& args, Node* context);
+#define DECLARE_INTRINSIC_HELPER(name, lower_case, count) \
+ TNode<Object> name(const InterpreterAssembler::RegListNodePair& args, \
+ TNode<Context> context);
INTRINSICS_LIST(DECLARE_INTRINSIC_HELPER)
#undef DECLARE_INTRINSIC_HELPER
@@ -65,21 +67,20 @@ class IntrinsicsGenerator {
DISALLOW_COPY_AND_ASSIGN(IntrinsicsGenerator);
};
-Node* GenerateInvokeIntrinsic(
- InterpreterAssembler* assembler, Node* function_id, Node* context,
- const InterpreterAssembler::RegListNodePair& args) {
+TNode<Object> GenerateInvokeIntrinsic(
+ InterpreterAssembler* assembler, TNode<Uint32T> function_id,
+ TNode<Context> context, const InterpreterAssembler::RegListNodePair& args) {
IntrinsicsGenerator generator(assembler);
return generator.InvokeIntrinsic(function_id, context, args);
}
#define __ assembler_->
-Node* IntrinsicsGenerator::InvokeIntrinsic(
- Node* function_id, Node* context,
+TNode<Object> IntrinsicsGenerator::InvokeIntrinsic(
+ TNode<Uint32T> function_id, TNode<Context> context,
const InterpreterAssembler::RegListNodePair& args) {
InterpreterAssembler::Label abort(assembler_), end(assembler_);
- InterpreterAssembler::Variable result(assembler_,
- MachineRepresentation::kTagged);
+ InterpreterAssembler::TVariable<Object> result(assembler_);
#define MAKE_LABEL(name, lower_case, count) \
InterpreterAssembler::Label lower_case(assembler_);
@@ -102,9 +103,9 @@ Node* IntrinsicsGenerator::InvokeIntrinsic(
if (FLAG_debug_code && expected_arg_count >= 0) { \
AbortIfArgCountMismatch(expected_arg_count, args.reg_count()); \
} \
- Node* value = name(args, context); \
+ TNode<Object> value = name(args, context); \
if (value) { \
- result.Bind(value); \
+ result = value; \
__ Goto(&end); \
} \
}
@@ -114,7 +115,7 @@ Node* IntrinsicsGenerator::InvokeIntrinsic(
__ BIND(&abort);
{
__ Abort(AbortReason::kUnexpectedFunctionIDForInvokeIntrinsic);
- result.Bind(__ UndefinedConstant());
+ result = __ UndefinedConstant();
__ Goto(&end);
}
@@ -122,8 +123,8 @@ Node* IntrinsicsGenerator::InvokeIntrinsic(
return result.value();
}
-Node* IntrinsicsGenerator::CompareInstanceType(Node* object, int type,
- InstanceTypeCompareMode mode) {
+TNode<BoolT> IntrinsicsGenerator::CompareInstanceType(
+ TNode<HeapObject> object, int type, InstanceTypeCompareMode mode) {
TNode<Uint16T> instance_type = __ LoadInstanceType(object);
if (mode == kInstanceTypeEqual) {
@@ -134,39 +135,42 @@ Node* IntrinsicsGenerator::CompareInstanceType(Node* object, int type,
}
}
-Node* IntrinsicsGenerator::IsInstanceType(Node* input, int type) {
+TNode<Oddball> IntrinsicsGenerator::IsInstanceType(TNode<Object> input,
+ int type) {
TNode<Oddball> result = __ Select<Oddball>(
__ TaggedIsSmi(input), [=] { return __ FalseConstant(); },
[=] {
return __ SelectBooleanConstant(
- CompareInstanceType(input, type, kInstanceTypeEqual));
+ CompareInstanceType(__ CAST(input), type, kInstanceTypeEqual));
});
return result;
}
-Node* IntrinsicsGenerator::IsJSReceiver(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
- Node* input = __ LoadRegisterFromRegisterList(args, 0);
+TNode<Object> IntrinsicsGenerator::IsJSReceiver(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
+ TNode<Object> input = __ LoadRegisterFromRegisterList(args, 0);
TNode<Oddball> result = __ Select<Oddball>(
__ TaggedIsSmi(input), [=] { return __ FalseConstant(); },
- [=] { return __ SelectBooleanConstant(__ IsJSReceiver(input)); });
+ [=] {
+ return __ SelectBooleanConstant(__ IsJSReceiver(__ CAST(input)));
+ });
return result;
}
-Node* IntrinsicsGenerator::IsArray(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
- Node* input = __ LoadRegisterFromRegisterList(args, 0);
+TNode<Object> IntrinsicsGenerator::IsArray(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
+ TNode<Object> input = __ LoadRegisterFromRegisterList(args, 0);
return IsInstanceType(input, JS_ARRAY_TYPE);
}
-Node* IntrinsicsGenerator::IsSmi(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
- Node* input = __ LoadRegisterFromRegisterList(args, 0);
+TNode<Object> IntrinsicsGenerator::IsSmi(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
+ TNode<Object> input = __ LoadRegisterFromRegisterList(args, 0);
return __ SelectBooleanConstant(__ TaggedIsSmi(input));
}
-Node* IntrinsicsGenerator::IntrinsicAsStubCall(
- const InterpreterAssembler::RegListNodePair& args, Node* context,
+TNode<Object> IntrinsicsGenerator::IntrinsicAsStubCall(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
Callable const& callable) {
int param_count = callable.descriptor().GetParameterCount();
int input_count = param_count + 2; // +2 for target and context
@@ -177,59 +181,60 @@ Node* IntrinsicsGenerator::IntrinsicAsStubCall(
stub_args[index++] = __ LoadRegisterFromRegisterList(args, i);
}
stub_args[index++] = context;
- return __ CallStubN(StubCallMode::kCallCodeObject, callable.descriptor(), 1,
- input_count, stub_args);
+ return __ CAST(__ CallStubN(StubCallMode::kCallCodeObject,
+ callable.descriptor(), 1, input_count,
+ stub_args));
}
-Node* IntrinsicsGenerator::IntrinsicAsBuiltinCall(
- const InterpreterAssembler::RegListNodePair& args, Node* context,
+TNode<Object> IntrinsicsGenerator::IntrinsicAsBuiltinCall(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
Builtins::Name name) {
Callable callable = Builtins::CallableFor(isolate_, name);
return IntrinsicAsStubCall(args, context, callable);
}
-Node* IntrinsicsGenerator::CopyDataProperties(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
+TNode<Object> IntrinsicsGenerator::CopyDataProperties(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
return IntrinsicAsStubCall(
args, context,
Builtins::CallableFor(isolate(), Builtins::kCopyDataProperties));
}
-Node* IntrinsicsGenerator::CreateIterResultObject(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
+TNode<Object> IntrinsicsGenerator::CreateIterResultObject(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
return IntrinsicAsStubCall(
args, context,
Builtins::CallableFor(isolate(), Builtins::kCreateIterResultObject));
}
-Node* IntrinsicsGenerator::HasProperty(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
+TNode<Object> IntrinsicsGenerator::HasProperty(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
return IntrinsicAsStubCall(
args, context, Builtins::CallableFor(isolate(), Builtins::kHasProperty));
}
-Node* IntrinsicsGenerator::ToStringRT(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
+TNode<Object> IntrinsicsGenerator::ToStringRT(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
return IntrinsicAsStubCall(
args, context, Builtins::CallableFor(isolate(), Builtins::kToString));
}
-Node* IntrinsicsGenerator::ToLength(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
+TNode<Object> IntrinsicsGenerator::ToLength(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
return IntrinsicAsStubCall(
args, context, Builtins::CallableFor(isolate(), Builtins::kToLength));
}
-Node* IntrinsicsGenerator::ToObject(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
+TNode<Object> IntrinsicsGenerator::ToObject(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
return IntrinsicAsStubCall(
args, context, Builtins::CallableFor(isolate(), Builtins::kToObject));
}
-Node* IntrinsicsGenerator::Call(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
+TNode<Object> IntrinsicsGenerator::Call(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
// First argument register contains the function target.
- Node* function = __ LoadRegisterFromRegisterList(args, 0);
+ TNode<Object> function = __ LoadRegisterFromRegisterList(args, 0);
// The arguments for the target function are from the second runtime call
// argument.
@@ -249,26 +254,25 @@ Node* IntrinsicsGenerator::Call(
__ CallJSAndDispatch(function, context, target_args,
ConvertReceiverMode::kAny);
- return nullptr; // We never return from the CallJSAndDispatch above.
+ return TNode<Object>(); // We never return from the CallJSAndDispatch above.
}
-Node* IntrinsicsGenerator::CreateAsyncFromSyncIterator(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
+TNode<Object> IntrinsicsGenerator::CreateAsyncFromSyncIterator(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
InterpreterAssembler::Label not_receiver(
assembler_, InterpreterAssembler::Label::kDeferred);
InterpreterAssembler::Label done(assembler_);
- InterpreterAssembler::Variable return_value(assembler_,
- MachineRepresentation::kTagged);
+ InterpreterAssembler::TVariable<Object> return_value(assembler_);
- Node* sync_iterator = __ LoadRegisterFromRegisterList(args, 0);
+ TNode<Object> sync_iterator = __ LoadRegisterFromRegisterList(args, 0);
__ GotoIf(__ TaggedIsSmi(sync_iterator), &not_receiver);
- __ GotoIfNot(__ IsJSReceiver(sync_iterator), &not_receiver);
+ __ GotoIfNot(__ IsJSReceiver(__ CAST(sync_iterator)), &not_receiver);
TNode<Object> const next =
__ GetProperty(context, sync_iterator, factory()->next_string());
- TNode<Context> const native_context = __ LoadNativeContext(context);
+ TNode<NativeContext> const native_context = __ LoadNativeContext(context);
TNode<Map> const map = __ CAST(__ LoadContextElement(
native_context, Context::ASYNC_FROM_SYNC_ITERATOR_MAP_INDEX));
TNode<JSObject> const iterator = __ AllocateJSObjectFromMap(map);
@@ -278,13 +282,13 @@ Node* IntrinsicsGenerator::CreateAsyncFromSyncIterator(
__ StoreObjectFieldNoWriteBarrier(iterator,
JSAsyncFromSyncIterator::kNextOffset, next);
- return_value.Bind(iterator);
+ return_value = iterator;
__ Goto(&done);
__ BIND(&not_receiver);
{
- return_value.Bind(
- __ CallRuntime(Runtime::kThrowSymbolIteratorInvalid, context));
+ return_value =
+ __ CallRuntime(Runtime::kThrowSymbolIteratorInvalid, context);
// Unreachable due to the Throw in runtime call.
__ Goto(&done);
@@ -294,104 +298,105 @@ Node* IntrinsicsGenerator::CreateAsyncFromSyncIterator(
return return_value.value();
}
-Node* IntrinsicsGenerator::CreateJSGeneratorObject(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
+TNode<Object> IntrinsicsGenerator::CreateJSGeneratorObject(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
return IntrinsicAsBuiltinCall(args, context,
Builtins::kCreateGeneratorObject);
}
-Node* IntrinsicsGenerator::GeneratorGetResumeMode(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
- Node* generator = __ LoadRegisterFromRegisterList(args, 0);
+TNode<Object> IntrinsicsGenerator::GeneratorGetResumeMode(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
+ TNode<JSGeneratorObject> generator =
+ __ CAST(__ LoadRegisterFromRegisterList(args, 0));
TNode<Object> const value =
__ LoadObjectField(generator, JSGeneratorObject::kResumeModeOffset);
return value;
}
-Node* IntrinsicsGenerator::GeneratorClose(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
- Node* generator = __ LoadRegisterFromRegisterList(args, 0);
+TNode<Object> IntrinsicsGenerator::GeneratorClose(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
+ TNode<JSGeneratorObject> generator =
+ __ CAST(__ LoadRegisterFromRegisterList(args, 0));
__ StoreObjectFieldNoWriteBarrier(
generator, JSGeneratorObject::kContinuationOffset,
__ SmiConstant(JSGeneratorObject::kGeneratorClosed));
return __ UndefinedConstant();
}
-Node* IntrinsicsGenerator::GetImportMetaObject(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
+TNode<Object> IntrinsicsGenerator::GetImportMetaObject(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
TNode<Context> const module_context = __ LoadModuleContext(context);
TNode<HeapObject> const module =
__ CAST(__ LoadContextElement(module_context, Context::EXTENSION_INDEX));
TNode<Object> const import_meta =
__ LoadObjectField(module, SourceTextModule::kImportMetaOffset);
- InterpreterAssembler::Variable return_value(assembler_,
- MachineRepresentation::kTagged);
- return_value.Bind(import_meta);
+ InterpreterAssembler::TVariable<Object> return_value(assembler_);
+ return_value = import_meta;
InterpreterAssembler::Label end(assembler_);
__ GotoIfNot(__ IsTheHole(import_meta), &end);
- return_value.Bind(__ CallRuntime(Runtime::kGetImportMetaObject, context));
+ return_value = __ CallRuntime(Runtime::kGetImportMetaObject, context);
__ Goto(&end);
__ BIND(&end);
return return_value.value();
}
-Node* IntrinsicsGenerator::AsyncFunctionAwaitCaught(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
+TNode<Object> IntrinsicsGenerator::AsyncFunctionAwaitCaught(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
return IntrinsicAsBuiltinCall(args, context,
Builtins::kAsyncFunctionAwaitCaught);
}
-Node* IntrinsicsGenerator::AsyncFunctionAwaitUncaught(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
+TNode<Object> IntrinsicsGenerator::AsyncFunctionAwaitUncaught(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
return IntrinsicAsBuiltinCall(args, context,
Builtins::kAsyncFunctionAwaitUncaught);
}
-Node* IntrinsicsGenerator::AsyncFunctionEnter(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
+TNode<Object> IntrinsicsGenerator::AsyncFunctionEnter(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncFunctionEnter);
}
-Node* IntrinsicsGenerator::AsyncFunctionReject(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
+TNode<Object> IntrinsicsGenerator::AsyncFunctionReject(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncFunctionReject);
}
-Node* IntrinsicsGenerator::AsyncFunctionResolve(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
+TNode<Object> IntrinsicsGenerator::AsyncFunctionResolve(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncFunctionResolve);
}
-Node* IntrinsicsGenerator::AsyncGeneratorAwaitCaught(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
+TNode<Object> IntrinsicsGenerator::AsyncGeneratorAwaitCaught(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
return IntrinsicAsBuiltinCall(args, context,
Builtins::kAsyncGeneratorAwaitCaught);
}
-Node* IntrinsicsGenerator::AsyncGeneratorAwaitUncaught(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
+TNode<Object> IntrinsicsGenerator::AsyncGeneratorAwaitUncaught(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
return IntrinsicAsBuiltinCall(args, context,
Builtins::kAsyncGeneratorAwaitUncaught);
}
-Node* IntrinsicsGenerator::AsyncGeneratorReject(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
+TNode<Object> IntrinsicsGenerator::AsyncGeneratorReject(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncGeneratorReject);
}
-Node* IntrinsicsGenerator::AsyncGeneratorResolve(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
+TNode<Object> IntrinsicsGenerator::AsyncGeneratorResolve(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
return IntrinsicAsBuiltinCall(args, context,
Builtins::kAsyncGeneratorResolve);
}
-Node* IntrinsicsGenerator::AsyncGeneratorYield(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
+TNode<Object> IntrinsicsGenerator::AsyncGeneratorYield(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncGeneratorYield);
}
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics-generator.h b/deps/v8/src/interpreter/interpreter-intrinsics-generator.h
index fd4e167ed0..f0c22e7a59 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics-generator.h
+++ b/deps/v8/src/interpreter/interpreter-intrinsics-generator.h
@@ -16,9 +16,9 @@ class Node;
namespace interpreter {
-extern compiler::Node* GenerateInvokeIntrinsic(
- InterpreterAssembler* assembler, compiler::Node* function_id,
- compiler::Node* context, const InterpreterAssembler::RegListNodePair& args);
+extern TNode<Object> GenerateInvokeIntrinsic(
+ InterpreterAssembler* assembler, TNode<Uint32T> function_id,
+ TNode<Context> context, const InterpreterAssembler::RegListNodePair& args);
} // namespace interpreter
} // namespace internal
diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc
index 482ffb7459..6c730d5a59 100644
--- a/deps/v8/src/interpreter/interpreter.cc
+++ b/deps/v8/src/interpreter/interpreter.cc
@@ -269,7 +269,7 @@ std::unique_ptr<UnoptimizedCompilationJob> Interpreter::NewCompilationJob(
ParseInfo* parse_info, FunctionLiteral* literal,
AccountingAllocator* allocator,
std::vector<FunctionLiteral*>* eager_inner_literals) {
- return base::make_unique<InterpreterCompilationJob>(
+ return std::make_unique<InterpreterCompilationJob>(
parse_info, literal, allocator, eager_inner_literals);
}
@@ -277,10 +277,10 @@ std::unique_ptr<UnoptimizedCompilationJob>
Interpreter::NewSourcePositionCollectionJob(
ParseInfo* parse_info, FunctionLiteral* literal,
Handle<BytecodeArray> existing_bytecode, AccountingAllocator* allocator) {
- auto job = base::make_unique<InterpreterCompilationJob>(parse_info, literal,
- allocator, nullptr);
+ auto job = std::make_unique<InterpreterCompilationJob>(parse_info, literal,
+ allocator, nullptr);
job->compilation_info()->SetBytecodeArray(existing_bytecode);
- return std::unique_ptr<UnoptimizedCompilationJob> { static_cast<UnoptimizedCompilationJob*>(job.release()) };
+ return job;
}
void Interpreter::ForEachBytecode(
diff --git a/deps/v8/src/json/json-parser.cc b/deps/v8/src/json/json-parser.cc
index e49775704d..3a790c210d 100644
--- a/deps/v8/src/json/json-parser.cc
+++ b/deps/v8/src/json/json-parser.cc
@@ -394,7 +394,8 @@ Handle<Map> ParentOfDescriptorOwner(Isolate* isolate, Handle<Map> maybe_root,
DCHECK_EQ(0, maybe_root->NumberOfOwnDescriptors());
return maybe_root;
}
- return handle(source->FindFieldOwner(isolate, descriptor - 1), isolate);
+ return handle(source->FindFieldOwner(isolate, InternalIndex(descriptor - 1)),
+ isolate);
}
} // namespace
@@ -461,10 +462,11 @@ Handle<Object> JsonParser<Char>::BuildJsonObject(
if (property.string.is_index()) continue;
Handle<String> expected;
Handle<Map> target;
+ InternalIndex descriptor_index(descriptor);
if (descriptor < feedback_descriptors) {
- expected = handle(
- String::cast(feedback->instance_descriptors().GetKey(descriptor)),
- isolate_);
+ expected = handle(String::cast(feedback->instance_descriptors().GetKey(
+ descriptor_index)),
+ isolate_);
} else {
DisallowHeapAllocation no_gc;
TransitionsAccessor transitions(isolate(), *map, &no_gc);
@@ -495,7 +497,7 @@ Handle<Object> JsonParser<Char>::BuildJsonObject(
Handle<Object> value = property.value;
PropertyDetails details =
- target->instance_descriptors().GetDetails(descriptor);
+ target->instance_descriptors().GetDetails(descriptor_index);
Representation expected_representation = details.representation();
if (!value->FitsRepresentation(expected_representation)) {
@@ -507,23 +509,24 @@ Handle<Object> JsonParser<Char>::BuildJsonObject(
}
Handle<FieldType> value_type =
value->OptimalType(isolate(), representation);
- Map::GeneralizeField(isolate(), target, descriptor, details.constness(),
- representation, value_type);
+ Map::GeneralizeField(isolate(), target, descriptor_index,
+ details.constness(), representation, value_type);
} else if (expected_representation.IsHeapObject() &&
!target->instance_descriptors()
- .GetFieldType(descriptor)
+ .GetFieldType(descriptor_index)
.NowContains(value)) {
Handle<FieldType> value_type =
value->OptimalType(isolate(), expected_representation);
- Map::GeneralizeField(isolate(), target, descriptor, details.constness(),
- expected_representation, value_type);
+ Map::GeneralizeField(isolate(), target, descriptor_index,
+ details.constness(), expected_representation,
+ value_type);
} else if (!FLAG_unbox_double_fields &&
expected_representation.IsDouble() && value->IsSmi()) {
new_mutable_double++;
}
DCHECK(target->instance_descriptors()
- .GetFieldType(descriptor)
+ .GetFieldType(descriptor_index)
.NowContains(value));
map = target;
descriptor++;
@@ -560,18 +563,21 @@ Handle<Object> JsonParser<Char>::BuildJsonObject(
: reinterpret_cast<Address>(
mutable_double_buffer->GetDataStartAddress());
Address filler_address = mutable_double_address;
- if (IsAligned(mutable_double_address, kDoubleAlignment)) {
- mutable_double_address += kTaggedSize;
- } else {
- filler_address += HeapNumber::kSize;
+ if (kTaggedSize != kDoubleSize) {
+ if (IsAligned(mutable_double_address, kDoubleAlignment)) {
+ mutable_double_address += kTaggedSize;
+ } else {
+ filler_address += HeapNumber::kSize;
+ }
}
for (int j = 0; j < i; j++) {
const JsonProperty& property = property_stack[start + j];
if (property.string.is_index()) continue;
+ InternalIndex descriptor_index(descriptor);
PropertyDetails details =
- map->instance_descriptors().GetDetails(descriptor);
+ map->instance_descriptors().GetDetails(descriptor_index);
Object value = *property.value;
- FieldIndex index = FieldIndex::ForDescriptor(*map, descriptor);
+ FieldIndex index = FieldIndex::ForDescriptor(*map, descriptor_index);
descriptor++;
if (details.representation().IsDouble()) {
@@ -619,9 +625,13 @@ Handle<Object> JsonParser<Char>::BuildJsonObject(
#ifdef DEBUG
Address end =
reinterpret_cast<Address>(mutable_double_buffer->GetDataEndAddress());
- DCHECK_EQ(Min(filler_address, mutable_double_address), end);
- DCHECK_GE(filler_address, end);
- DCHECK_GE(mutable_double_address, end);
+ if (kTaggedSize != kDoubleSize) {
+ DCHECK_EQ(Min(filler_address, mutable_double_address), end);
+ DCHECK_GE(filler_address, end);
+ DCHECK_GE(mutable_double_address, end);
+ } else {
+ DCHECK_EQ(mutable_double_address, end);
+ }
#endif
mutable_double_buffer->set_length(0);
}
diff --git a/deps/v8/src/json/json-stringifier.cc b/deps/v8/src/json/json-stringifier.cc
index 684bcdcf54..47d6a0ddad 100644
--- a/deps/v8/src/json/json-stringifier.cc
+++ b/deps/v8/src/json/json-stringifier.cc
@@ -771,7 +771,7 @@ JsonStringifier::Result JsonStringifier::SerializeJSObject(
builder_.AppendCharacter('{');
Indent();
bool comma = false;
- for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
+ for (InternalIndex i : map->IterateOwnDescriptors()) {
Handle<Name> name(map->instance_descriptors().GetKey(i), isolate_);
// TODO(rossberg): Should this throw?
if (!name->IsString()) continue;
diff --git a/deps/v8/src/libplatform/default-foreground-task-runner.cc b/deps/v8/src/libplatform/default-foreground-task-runner.cc
index 0a31024d9a..2394204348 100644
--- a/deps/v8/src/libplatform/default-foreground-task-runner.cc
+++ b/deps/v8/src/libplatform/default-foreground-task-runner.cc
@@ -60,6 +60,16 @@ bool DefaultForegroundTaskRunner::IdleTasksEnabled() {
return idle_task_support_ == IdleTaskSupport::kEnabled;
}
+void DefaultForegroundTaskRunner::PostNonNestableTask(
+ std::unique_ptr<Task> task) {
+ // Default platform does not nest tasks.
+ PostTask(std::move(task));
+}
+
+bool DefaultForegroundTaskRunner::NonNestableTasksEnabled() const {
+ return true;
+}
+
std::unique_ptr<Task> DefaultForegroundTaskRunner::PopTaskFromQueue(
MessageLoopBehavior wait_for_work) {
base::MutexGuard guard(&lock_);
diff --git a/deps/v8/src/libplatform/default-foreground-task-runner.h b/deps/v8/src/libplatform/default-foreground-task-runner.h
index 78c0f6b660..9ff30e3940 100644
--- a/deps/v8/src/libplatform/default-foreground-task-runner.h
+++ b/deps/v8/src/libplatform/default-foreground-task-runner.h
@@ -5,6 +5,7 @@
#ifndef V8_LIBPLATFORM_DEFAULT_FOREGROUND_TASK_RUNNER_H_
#define V8_LIBPLATFORM_DEFAULT_FOREGROUND_TASK_RUNNER_H_
+#include <memory>
#include <queue>
#include "include/libplatform/libplatform.h"
@@ -35,14 +36,15 @@ class V8_PLATFORM_EXPORT DefaultForegroundTaskRunner
// v8::TaskRunner implementation.
void PostTask(std::unique_ptr<Task> task) override;
-
void PostDelayedTask(std::unique_ptr<Task> task,
double delay_in_seconds) override;
void PostIdleTask(std::unique_ptr<IdleTask> task) override;
-
bool IdleTasksEnabled() override;
+ void PostNonNestableTask(std::unique_ptr<Task> task) override;
+ bool NonNestableTasksEnabled() const override;
+
private:
// The same as PostTask, but the lock is already held by the caller. The
// {guard} parameter should make sure that the caller is holding the lock.
diff --git a/deps/v8/src/libplatform/default-worker-threads-task-runner.cc b/deps/v8/src/libplatform/default-worker-threads-task-runner.cc
index 213e98801a..8cae955fd1 100644
--- a/deps/v8/src/libplatform/default-worker-threads-task-runner.cc
+++ b/deps/v8/src/libplatform/default-worker-threads-task-runner.cc
@@ -15,7 +15,7 @@ DefaultWorkerThreadsTaskRunner::DefaultWorkerThreadsTaskRunner(
time_function_(time_function),
thread_pool_size_(thread_pool_size) {
for (uint32_t i = 0; i < thread_pool_size; ++i) {
- thread_pool_.push_back(base::make_unique<WorkerThread>(this));
+ thread_pool_.push_back(std::make_unique<WorkerThread>(this));
}
}
diff --git a/deps/v8/src/libplatform/default-worker-threads-task-runner.h b/deps/v8/src/libplatform/default-worker-threads-task-runner.h
index 31b6c0e817..d761a36e1b 100644
--- a/deps/v8/src/libplatform/default-worker-threads-task-runner.h
+++ b/deps/v8/src/libplatform/default-worker-threads-task-runner.h
@@ -5,6 +5,7 @@
#ifndef V8_LIBPLATFORM_DEFAULT_WORKER_THREADS_TASK_RUNNER_H_
#define V8_LIBPLATFORM_DEFAULT_WORKER_THREADS_TASK_RUNNER_H_
+#include <memory>
#include <vector>
#include "include/libplatform/libplatform-export.h"
diff --git a/deps/v8/src/libplatform/delayed-task-queue.h b/deps/v8/src/libplatform/delayed-task-queue.h
index 675e9ecb8a..9fec948b86 100644
--- a/deps/v8/src/libplatform/delayed-task-queue.h
+++ b/deps/v8/src/libplatform/delayed-task-queue.h
@@ -6,6 +6,7 @@
#define V8_LIBPLATFORM_DELAYED_TASK_QUEUE_H_
#include <map>
+#include <memory>
#include <queue>
#include "include/libplatform/libplatform-export.h"
diff --git a/deps/v8/src/libplatform/task-queue.h b/deps/v8/src/libplatform/task-queue.h
index f8c76498f2..fbad3a8adf 100644
--- a/deps/v8/src/libplatform/task-queue.h
+++ b/deps/v8/src/libplatform/task-queue.h
@@ -5,6 +5,7 @@
#ifndef V8_LIBPLATFORM_TASK_QUEUE_H_
#define V8_LIBPLATFORM_TASK_QUEUE_H_
+#include <memory>
#include <queue>
#include "include/libplatform/libplatform-export.h"
diff --git a/deps/v8/src/libplatform/tracing/DEPS b/deps/v8/src/libplatform/tracing/DEPS
index 582200e094..7a45bba55a 100644
--- a/deps/v8/src/libplatform/tracing/DEPS
+++ b/deps/v8/src/libplatform/tracing/DEPS
@@ -1,4 +1,4 @@
include_rules = [
"+perfetto",
- "+third_party/perfetto/include/perfetto/base",
-] \ No newline at end of file
+ "+protos/perfetto",
+]
diff --git a/deps/v8/src/libplatform/tracing/json-trace-event-listener.cc b/deps/v8/src/libplatform/tracing/json-trace-event-listener.cc
index 94b74ef255..60cc9a98a8 100644
--- a/deps/v8/src/libplatform/tracing/json-trace-event-listener.cc
+++ b/deps/v8/src/libplatform/tracing/json-trace-event-listener.cc
@@ -7,9 +7,9 @@
#include <cmath>
#include "base/trace_event/common/trace_event_common.h"
-#include "perfetto/trace/chrome/chrome_trace_packet.pb.h"
-#include "perfetto/trace/trace.pb.h"
#include "perfetto/tracing.h"
+#include "protos/perfetto/trace/chrome/chrome_trace_packet.pb.h"
+#include "protos/perfetto/trace/trace.pb.h"
#include "src/base/logging.h"
#include "src/base/macros.h"
diff --git a/deps/v8/src/libplatform/tracing/trace-event-listener.cc b/deps/v8/src/libplatform/tracing/trace-event-listener.cc
index 8224221228..2910d8fab2 100644
--- a/deps/v8/src/libplatform/tracing/trace-event-listener.cc
+++ b/deps/v8/src/libplatform/tracing/trace-event-listener.cc
@@ -4,7 +4,7 @@
#include "src/libplatform/tracing/trace-event-listener.h"
-#include "perfetto/trace/trace.pb.h"
+#include "protos/perfetto/trace/trace.pb.h"
#include "src/base/logging.h"
namespace v8 {
diff --git a/deps/v8/src/libplatform/tracing/trace-object.cc b/deps/v8/src/libplatform/tracing/trace-object.cc
index 6b6e0cf404..d16104df68 100644
--- a/deps/v8/src/libplatform/tracing/trace-object.cc
+++ b/deps/v8/src/libplatform/tracing/trace-object.cc
@@ -23,12 +23,11 @@ V8_INLINE static size_t GetAllocLength(const char* str) {
// location, and then advances |*buffer| by the amount written.
V8_INLINE static void CopyTraceObjectParameter(char** buffer,
const char** member) {
- if (*member) {
- size_t length = strlen(*member) + 1;
- strncpy(*buffer, *member, length);
- *member = *buffer;
- *buffer += length;
- }
+ if (*member == nullptr) return;
+ size_t length = strlen(*member) + 1;
+ memcpy(*buffer, *member, length);
+ *member = *buffer;
+ *buffer += length;
}
void TraceObject::Initialize(
diff --git a/deps/v8/src/libplatform/tracing/tracing-controller.cc b/deps/v8/src/libplatform/tracing/tracing-controller.cc
index 3fb34366c2..d0972f9322 100644
--- a/deps/v8/src/libplatform/tracing/tracing-controller.cc
+++ b/deps/v8/src/libplatform/tracing/tracing-controller.cc
@@ -14,9 +14,9 @@
#ifdef V8_USE_PERFETTO
#include "base/trace_event/common/trace_event_common.h"
-#include "perfetto/trace/chrome/chrome_trace_event.pbzero.h"
-#include "perfetto/trace/trace_packet.pbzero.h"
#include "perfetto/tracing.h"
+#include "protos/perfetto/trace/chrome/chrome_trace_event.pbzero.h"
+#include "protos/perfetto/trace/trace_packet.pbzero.h"
#include "src/base/platform/platform.h"
#include "src/base/platform/semaphore.h"
#include "src/libplatform/tracing/json-trace-event-listener.h"
@@ -280,7 +280,7 @@ void TracingController::StartTracing(TraceConfig* trace_config) {
#ifdef V8_USE_PERFETTO
DCHECK_NOT_NULL(output_stream_);
DCHECK(output_stream_->good());
- json_listener_ = base::make_unique<JSONTraceEventListener>(output_stream_);
+ json_listener_ = std::make_unique<JSONTraceEventListener>(output_stream_);
// TODO(petermarshall): Set other the params for the config.
::perfetto::TraceConfig perfetto_trace_config;
diff --git a/deps/v8/src/libsampler/sampler.cc b/deps/v8/src/libsampler/sampler.cc
index e445dfc65a..d9f59dff74 100644
--- a/deps/v8/src/libsampler/sampler.cc
+++ b/deps/v8/src/libsampler/sampler.cc
@@ -526,7 +526,7 @@ void SignalHandler::FillRegisterState(void* context, RegisterState* state) {
#endif // USE_SIGNALS
Sampler::Sampler(Isolate* isolate)
- : isolate_(isolate), data_(base::make_unique<PlatformData>()) {}
+ : isolate_(isolate), data_(std::make_unique<PlatformData>()) {}
Sampler::~Sampler() {
DCHECK(!IsActive());
diff --git a/deps/v8/src/libsampler/sampler.h b/deps/v8/src/libsampler/sampler.h
index 997b127686..c606add82a 100644
--- a/deps/v8/src/libsampler/sampler.h
+++ b/deps/v8/src/libsampler/sampler.h
@@ -6,6 +6,7 @@
#define V8_LIBSAMPLER_SAMPLER_H_
#include <atomic>
+#include <memory>
#include <unordered_map>
#include "include/v8.h"
diff --git a/deps/v8/src/logging/counters-definitions.h b/deps/v8/src/logging/counters-definitions.h
index 8c808276fa..3931712152 100644
--- a/deps/v8/src/logging/counters-definitions.h
+++ b/deps/v8/src/logging/counters-definitions.h
@@ -221,6 +221,8 @@ namespace internal {
MICROSECOND) \
HT(compile_script_no_cache_because_cache_too_cold, \
V8.CompileScriptMicroSeconds.NoCache.CacheTooCold, 1000000, MICROSECOND) \
+ HT(compile_script_streaming_finalization, \
+ V8.CompileScriptMicroSeconds.StreamingFinalization, 1000000, MICROSECOND) \
HT(compile_script_on_background, \
V8.CompileScriptMicroSeconds.BackgroundThread, 1000000, MICROSECOND) \
HT(compile_function_on_background, \
diff --git a/deps/v8/src/logging/counters.cc b/deps/v8/src/logging/counters.cc
index ce2b1fe9c0..a6a56fac83 100644
--- a/deps/v8/src/logging/counters.cc
+++ b/deps/v8/src/logging/counters.cc
@@ -551,7 +551,7 @@ base::Thread::LocalStorageKey WorkerThreadRuntimeCallStats::GetKey() {
RuntimeCallStats* WorkerThreadRuntimeCallStats::NewTable() {
DCHECK(TracingFlags::is_runtime_stats_enabled());
std::unique_ptr<RuntimeCallStats> new_table =
- base::make_unique<RuntimeCallStats>();
+ std::make_unique<RuntimeCallStats>();
RuntimeCallStats* result = new_table.get();
base::MutexGuard lock(&mutex_);
diff --git a/deps/v8/src/logging/counters.h b/deps/v8/src/logging/counters.h
index 35df5ec038..99a3c3cf9b 100644
--- a/deps/v8/src/logging/counters.h
+++ b/deps/v8/src/logging/counters.h
@@ -5,6 +5,8 @@
#ifndef V8_LOGGING_COUNTERS_H_
#define V8_LOGGING_COUNTERS_H_
+#include <memory>
+
#include "include/v8.h"
#include "src/base/atomic-utils.h"
#include "src/base/optional.h"
@@ -780,7 +782,6 @@ class RuntimeCallTimer final {
V(Message_GetStartColumn) \
V(Module_Evaluate) \
V(Module_InstantiateModule) \
- V(Module_SetSyntheticModuleExport) \
V(NumberObject_New) \
V(NumberObject_NumberValue) \
V(Object_CallAsConstructor) \
@@ -1018,16 +1019,13 @@ class RuntimeCallTimer final {
V(LoadIC_LoadNormalDH) \
V(LoadIC_LoadNormalFromPrototypeDH) \
V(LoadIC_NonReceiver) \
- V(LoadIC_Premonomorphic) \
V(LoadIC_SlowStub) \
V(LoadIC_StringLength) \
V(LoadIC_StringWrapperLength) \
V(StoreGlobalIC_SlowStub) \
V(StoreGlobalIC_StoreScriptContextField) \
- V(StoreGlobalIC_Premonomorphic) \
V(StoreIC_HandlerCacheHit_Accessor) \
V(StoreIC_NonReceiver) \
- V(StoreIC_Premonomorphic) \
V(StoreIC_SlowStub) \
V(StoreIC_StoreAccessorDH) \
V(StoreIC_StoreAccessorOnPrototypeDH) \
diff --git a/deps/v8/src/logging/log-utils.cc b/deps/v8/src/logging/log-utils.cc
index 3980882402..e5c0b027fa 100644
--- a/deps/v8/src/logging/log-utils.cc
+++ b/deps/v8/src/logging/log-utils.cc
@@ -75,8 +75,7 @@ FILE* Log::Close() {
}
output_handle_ = nullptr;
- DeleteArray(format_buffer_);
- format_buffer_ = nullptr;
+ format_buffer_.reset();
is_stopped_ = false;
return result;
@@ -84,7 +83,7 @@ FILE* Log::Close() {
Log::MessageBuilder::MessageBuilder(Log* log)
: log_(log), lock_guard_(&log_->mutex_) {
- DCHECK_NOT_NULL(log_->format_buffer_);
+ DCHECK_NOT_NULL(log_->format_buffer_.get());
}
void Log::MessageBuilder::AppendString(String str,
@@ -185,7 +184,7 @@ void Log::MessageBuilder::AppendSymbolNameDetails(String str,
int Log::MessageBuilder::FormatStringIntoBuffer(const char* format,
va_list args) {
- Vector<char> buf(log_->format_buffer_, Log::kMessageBufferSize);
+ Vector<char> buf(log_->format_buffer_.get(), Log::kMessageBufferSize);
int length = v8::internal::VSNPrintF(buf, format, args);
// |length| is -1 if output was truncated.
if (length == -1) length = Log::kMessageBufferSize;
diff --git a/deps/v8/src/logging/log-utils.h b/deps/v8/src/logging/log-utils.h
index bc5b09d438..e89a449f3b 100644
--- a/deps/v8/src/logging/log-utils.h
+++ b/deps/v8/src/logging/log-utils.h
@@ -125,7 +125,7 @@ class Log {
// Buffer used for formatting log messages. This is a singleton buffer and
// mutex_ should be acquired before using it.
- char* format_buffer_;
+ std::unique_ptr<char[]> format_buffer_;
Logger* logger_;
diff --git a/deps/v8/src/logging/log.cc b/deps/v8/src/logging/log.cc
index 9b86a16031..2befcd330a 100644
--- a/deps/v8/src/logging/log.cc
+++ b/deps/v8/src/logging/log.cc
@@ -180,9 +180,9 @@ class CodeEventLogger::NameBuffer {
};
CodeEventLogger::CodeEventLogger(Isolate* isolate)
- : isolate_(isolate), name_buffer_(new NameBuffer) {}
+ : isolate_(isolate), name_buffer_(std::make_unique<NameBuffer>()) {}
-CodeEventLogger::~CodeEventLogger() { delete name_buffer_; }
+CodeEventLogger::~CodeEventLogger() = default;
void CodeEventLogger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
AbstractCode code, const char* comment) {
@@ -477,6 +477,23 @@ void ExternalCodeEventListener::RegExpCodeCreateEvent(AbstractCode code,
code_event_handler_->Handle(reinterpret_cast<v8::CodeEvent*>(&code_event));
}
+void ExternalCodeEventListener::CodeMoveEvent(AbstractCode from,
+ AbstractCode to) {
+ CodeEvent code_event;
+ code_event.previous_code_start_address =
+ static_cast<uintptr_t>(from.InstructionStart());
+ code_event.code_start_address = static_cast<uintptr_t>(to.InstructionStart());
+ code_event.code_size = static_cast<size_t>(to.InstructionSize());
+ code_event.function_name = isolate_->factory()->empty_string();
+ code_event.script_name = isolate_->factory()->empty_string();
+ code_event.script_line = 0;
+ code_event.script_column = 0;
+ code_event.code_type = v8::CodeEventType::kRelocationType;
+ code_event.comment = "";
+
+ code_event_handler_->Handle(reinterpret_cast<v8::CodeEvent*>(&code_event));
+}
+
// Low-level logging support.
class LowLevelLogger : public CodeEventLogger {
public:
@@ -816,7 +833,7 @@ class Ticker : public sampler::Sampler {
Ticker(Isolate* isolate, int interval_microseconds)
: sampler::Sampler(reinterpret_cast<v8::Isolate*>(isolate)),
sampling_thread_(
- base::make_unique<SamplingThread>(this, interval_microseconds)) {}
+ std::make_unique<SamplingThread>(this, interval_microseconds)) {}
~Ticker() override {
if (IsActive()) Stop();
@@ -910,13 +927,11 @@ void Profiler::Run() {
Logger::Logger(Isolate* isolate)
: isolate_(isolate),
- log_events_(nullptr),
is_logging_(false),
- log_(nullptr),
is_initialized_(false),
existing_code_logger_(isolate) {}
-Logger::~Logger() { delete log_; }
+Logger::~Logger() = default;
const LogSeparator Logger::kNext = LogSeparator::kSeparator;
@@ -931,7 +946,7 @@ void Logger::RemoveCodeEventListener(CodeEventListener* listener) {
void Logger::ProfilerBeginEvent() {
if (!log_->IsEnabled()) return;
- Log::MessageBuilder msg(log_);
+ Log::MessageBuilder msg(log_.get());
msg << "profiler" << kNext << "begin" << kNext << FLAG_prof_sampling_interval;
msg.WriteToLogFile();
}
@@ -942,7 +957,7 @@ void Logger::StringEvent(const char* name, const char* value) {
void Logger::UncheckedStringEvent(const char* name, const char* value) {
if (!log_->IsEnabled()) return;
- Log::MessageBuilder msg(log_);
+ Log::MessageBuilder msg(log_.get());
msg << name << kNext << value;
msg.WriteToLogFile();
}
@@ -953,7 +968,7 @@ void Logger::IntPtrTEvent(const char* name, intptr_t value) {
void Logger::UncheckedIntPtrTEvent(const char* name, intptr_t value) {
if (!log_->IsEnabled()) return;
- Log::MessageBuilder msg(log_);
+ Log::MessageBuilder msg(log_.get());
msg << name << kNext;
msg.AppendFormatString("%" V8PRIdPTR, value);
msg.WriteToLogFile();
@@ -961,14 +976,14 @@ void Logger::UncheckedIntPtrTEvent(const char* name, intptr_t value) {
void Logger::HandleEvent(const char* name, Address* location) {
if (!log_->IsEnabled() || !FLAG_log_handles) return;
- Log::MessageBuilder msg(log_);
+ Log::MessageBuilder msg(log_.get());
msg << name << kNext << reinterpret_cast<void*>(location);
msg.WriteToLogFile();
}
void Logger::ApiSecurityCheck() {
if (!log_->IsEnabled() || !FLAG_log_api) return;
- Log::MessageBuilder msg(log_);
+ Log::MessageBuilder msg(log_.get());
msg << "api" << kNext << "check-security";
msg.WriteToLogFile();
}
@@ -977,7 +992,7 @@ void Logger::SharedLibraryEvent(const std::string& library_path,
uintptr_t start, uintptr_t end,
intptr_t aslr_slide) {
if (!log_->IsEnabled() || !FLAG_prof_cpp) return;
- Log::MessageBuilder msg(log_);
+ Log::MessageBuilder msg(log_.get());
msg << "shared-library" << kNext << library_path.c_str() << kNext
<< reinterpret_cast<void*>(start) << kNext << reinterpret_cast<void*>(end)
<< kNext << aslr_slide;
@@ -988,7 +1003,7 @@ void Logger::CodeDeoptEvent(Code code, DeoptimizeKind kind, Address pc,
int fp_to_sp_delta) {
if (!log_->IsEnabled()) return;
Deoptimizer::DeoptInfo info = Deoptimizer::GetDeoptInfo(code, pc);
- Log::MessageBuilder msg(log_);
+ Log::MessageBuilder msg(log_.get());
msg << "code-deopt" << kNext << timer_.Elapsed().InMicroseconds() << kNext
<< code.CodeSize() << kNext
<< reinterpret_cast<void*>(code.InstructionStart());
@@ -1014,14 +1029,14 @@ void Logger::CodeDeoptEvent(Code code, DeoptimizeKind kind, Address pc,
void Logger::CurrentTimeEvent() {
if (!log_->IsEnabled()) return;
DCHECK(FLAG_log_internal_timer_events);
- Log::MessageBuilder msg(log_);
+ Log::MessageBuilder msg(log_.get());
msg << "current-time" << kNext << timer_.Elapsed().InMicroseconds();
msg.WriteToLogFile();
}
void Logger::TimerEvent(Logger::StartEnd se, const char* name) {
if (!log_->IsEnabled()) return;
- Log::MessageBuilder msg(log_);
+ Log::MessageBuilder msg(log_.get());
switch (se) {
case START:
msg << "timer-event-start";
@@ -1053,7 +1068,7 @@ void Logger::ApiNamedPropertyAccess(const char* tag, JSObject holder,
Object property_name) {
DCHECK(property_name.IsName());
if (!log_->IsEnabled() || !FLAG_log_api) return;
- Log::MessageBuilder msg(log_);
+ Log::MessageBuilder msg(log_.get());
msg << "api" << kNext << tag << kNext << holder.class_name() << kNext
<< Name::cast(property_name);
msg.WriteToLogFile();
@@ -1062,7 +1077,7 @@ void Logger::ApiNamedPropertyAccess(const char* tag, JSObject holder,
void Logger::ApiIndexedPropertyAccess(const char* tag, JSObject holder,
uint32_t index) {
if (!log_->IsEnabled() || !FLAG_log_api) return;
- Log::MessageBuilder msg(log_);
+ Log::MessageBuilder msg(log_.get());
msg << "api" << kNext << tag << kNext << holder.class_name() << kNext
<< index;
msg.WriteToLogFile();
@@ -1070,21 +1085,21 @@ void Logger::ApiIndexedPropertyAccess(const char* tag, JSObject holder,
void Logger::ApiObjectAccess(const char* tag, JSObject object) {
if (!log_->IsEnabled() || !FLAG_log_api) return;
- Log::MessageBuilder msg(log_);
+ Log::MessageBuilder msg(log_.get());
msg << "api" << kNext << tag << kNext << object.class_name();
msg.WriteToLogFile();
}
void Logger::ApiEntryCall(const char* name) {
if (!log_->IsEnabled() || !FLAG_log_api) return;
- Log::MessageBuilder msg(log_);
+ Log::MessageBuilder msg(log_.get());
msg << "api" << kNext << name;
msg.WriteToLogFile();
}
void Logger::NewEvent(const char* name, void* object, size_t size) {
if (!log_->IsEnabled() || !FLAG_log) return;
- Log::MessageBuilder msg(log_);
+ Log::MessageBuilder msg(log_.get());
msg << "new" << kNext << name << kNext << object << kNext
<< static_cast<unsigned int>(size);
msg.WriteToLogFile();
@@ -1092,7 +1107,7 @@ void Logger::NewEvent(const char* name, void* object, size_t size) {
void Logger::DeleteEvent(const char* name, void* object) {
if (!log_->IsEnabled() || !FLAG_log) return;
- Log::MessageBuilder msg(log_);
+ Log::MessageBuilder msg(log_.get());
msg << "delete" << kNext << name << kNext << object;
msg.WriteToLogFile();
}
@@ -1100,7 +1115,7 @@ void Logger::DeleteEvent(const char* name, void* object) {
void Logger::CallbackEventInternal(const char* prefix, Name name,
Address entry_point) {
if (!FLAG_log_code || !log_->IsEnabled()) return;
- Log::MessageBuilder msg(log_);
+ Log::MessageBuilder msg(log_.get());
msg << kLogEventsNames[CodeEventListener::CODE_CREATION_EVENT] << kNext
<< kLogEventsNames[CodeEventListener::CALLBACK_TAG] << kNext << -2
<< kNext << timer_.Elapsed().InMicroseconds() << kNext
@@ -1149,7 +1164,7 @@ void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
AbstractCode code, const char* comment) {
if (!is_listening_to_code_events()) return;
if (!FLAG_log_code || !log_->IsEnabled()) return;
- Log::MessageBuilder msg(log_);
+ Log::MessageBuilder msg(log_.get());
AppendCodeCreateHeader(msg, tag, code, &timer_);
msg << comment;
msg.WriteToLogFile();
@@ -1159,7 +1174,7 @@ void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
AbstractCode code, Name name) {
if (!is_listening_to_code_events()) return;
if (!FLAG_log_code || !log_->IsEnabled()) return;
- Log::MessageBuilder msg(log_);
+ Log::MessageBuilder msg(log_.get());
AppendCodeCreateHeader(msg, tag, code, &timer_);
msg << name;
msg.WriteToLogFile();
@@ -1175,7 +1190,7 @@ void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
return;
}
- Log::MessageBuilder msg(log_);
+ Log::MessageBuilder msg(log_.get());
AppendCodeCreateHeader(msg, tag, code, &timer_);
msg << name << kNext << reinterpret_cast<void*>(shared.address()) << kNext
<< ComputeMarker(shared, code);
@@ -1186,7 +1201,7 @@ void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
const wasm::WasmCode* code, wasm::WasmName name) {
if (!is_listening_to_code_events()) return;
if (!FLAG_log_code || !log_->IsEnabled()) return;
- Log::MessageBuilder msg(log_);
+ Log::MessageBuilder msg(log_.get());
AppendCodeCreateHeader(msg, tag, AbstractCode::Kind::WASM_FUNCTION,
code->instructions().begin(),
code->instructions().length(), &timer_);
@@ -1215,7 +1230,7 @@ void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
if (!is_listening_to_code_events()) return;
if (!FLAG_log_code || !log_->IsEnabled()) return;
{
- Log::MessageBuilder msg(log_);
+ Log::MessageBuilder msg(log_.get());
AppendCodeCreateHeader(msg, tag, code, &timer_);
msg << shared.DebugName() << " " << source << ":" << line << ":" << column
<< kNext << reinterpret_cast<void*>(shared.address()) << kNext
@@ -1250,7 +1265,7 @@ void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
// <function-id> is an index into the <fns> function table
// <fns> is the function table encoded as a sequence of strings
// S<shared-function-info-address>
- Log::MessageBuilder msg(log_);
+ Log::MessageBuilder msg(log_.get());
msg << "code-source-info" << kNext
<< reinterpret_cast<void*>(code.InstructionStart()) << kNext
<< script.id() << kNext << shared.StartPosition() << kNext
@@ -1307,7 +1322,7 @@ void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
void Logger::CodeDisableOptEvent(AbstractCode code, SharedFunctionInfo shared) {
if (!is_listening_to_code_events()) return;
if (!FLAG_log_code || !log_->IsEnabled()) return;
- Log::MessageBuilder msg(log_);
+ Log::MessageBuilder msg(log_.get());
msg << kLogEventsNames[CodeEventListener::CODE_DISABLE_OPT_EVENT] << kNext
<< shared.DebugName() << kNext
<< GetBailoutReason(shared.disable_optimization_reason());
@@ -1323,7 +1338,7 @@ void Logger::CodeMovingGCEvent() {
void Logger::RegExpCodeCreateEvent(AbstractCode code, String source) {
if (!is_listening_to_code_events()) return;
if (!FLAG_log_code || !log_->IsEnabled()) return;
- Log::MessageBuilder msg(log_);
+ Log::MessageBuilder msg(log_.get());
AppendCodeCreateHeader(msg, CodeEventListener::REG_EXP_TAG, code, &timer_);
msg << source;
msg.WriteToLogFile();
@@ -1373,7 +1388,7 @@ void Logger::CodeLinePosInfoRecordEvent(
void Logger::CodeNameEvent(Address addr, int pos, const char* code_name) {
if (code_name == nullptr) return; // Not a code object.
- Log::MessageBuilder msg(log_);
+ Log::MessageBuilder msg(log_.get());
msg << kLogEventsNames[CodeEventListener::SNAPSHOT_CODE_NAME_EVENT] << kNext
<< pos << kNext << code_name;
msg.WriteToLogFile();
@@ -1387,7 +1402,7 @@ void Logger::SharedFunctionInfoMoveEvent(Address from, Address to) {
void Logger::MoveEventInternal(CodeEventListener::LogEventsAndTags event,
Address from, Address to) {
if (!FLAG_log_code || !log_->IsEnabled()) return;
- Log::MessageBuilder msg(log_);
+ Log::MessageBuilder msg(log_.get());
msg << kLogEventsNames[event] << kNext << reinterpret_cast<void*>(from)
<< kNext << reinterpret_cast<void*>(to);
msg.WriteToLogFile();
@@ -1395,7 +1410,7 @@ void Logger::MoveEventInternal(CodeEventListener::LogEventsAndTags event,
void Logger::ResourceEvent(const char* name, const char* tag) {
if (!log_->IsEnabled() || !FLAG_log) return;
- Log::MessageBuilder msg(log_);
+ Log::MessageBuilder msg(log_.get());
msg << name << kNext << tag << kNext;
uint32_t sec, usec;
@@ -1409,7 +1424,7 @@ void Logger::ResourceEvent(const char* name, const char* tag) {
void Logger::SuspectReadEvent(Name name, Object obj) {
if (!log_->IsEnabled() || !FLAG_log_suspect) return;
- Log::MessageBuilder msg(log_);
+ Log::MessageBuilder msg(log_.get());
String class_name = obj.IsJSObject() ? JSObject::cast(obj).class_name()
: ReadOnlyRoots(isolate_).empty_string();
msg << "suspect-read" << kNext << class_name << kNext << name;
@@ -1432,7 +1447,7 @@ void Logger::FunctionEvent(const char* reason, int script_id, double time_delta,
int start_position, int end_position,
String function_name) {
if (!log_->IsEnabled() || !FLAG_log_function_events) return;
- Log::MessageBuilder msg(log_);
+ Log::MessageBuilder msg(log_.get());
AppendFunctionMessage(msg, reason, script_id, time_delta, start_position,
end_position, &timer_);
if (!function_name.is_null()) msg << function_name;
@@ -1444,7 +1459,7 @@ void Logger::FunctionEvent(const char* reason, int script_id, double time_delta,
const char* function_name,
size_t function_name_length) {
if (!log_->IsEnabled() || !FLAG_log_function_events) return;
- Log::MessageBuilder msg(log_);
+ Log::MessageBuilder msg(log_.get());
AppendFunctionMessage(msg, reason, script_id, time_delta, start_position,
end_position, &timer_);
if (function_name_length > 0) {
@@ -1456,7 +1471,7 @@ void Logger::FunctionEvent(const char* reason, int script_id, double time_delta,
void Logger::CompilationCacheEvent(const char* action, const char* cache_type,
SharedFunctionInfo sfi) {
if (!log_->IsEnabled() || !FLAG_log_function_events) return;
- Log::MessageBuilder msg(log_);
+ Log::MessageBuilder msg(log_.get());
int script_id = -1;
if (sfi.script().IsScript()) {
script_id = Script::cast(sfi.script()).id();
@@ -1470,7 +1485,7 @@ void Logger::CompilationCacheEvent(const char* action, const char* cache_type,
void Logger::ScriptEvent(ScriptEventType type, int script_id) {
if (!log_->IsEnabled() || !FLAG_log_function_events) return;
- Log::MessageBuilder msg(log_);
+ Log::MessageBuilder msg(log_.get());
msg << "script" << Logger::kNext;
switch (type) {
case ScriptEventType::kReserveId:
@@ -1497,7 +1512,7 @@ void Logger::ScriptEvent(ScriptEventType type, int script_id) {
void Logger::ScriptDetails(Script script) {
if (!log_->IsEnabled() || !FLAG_log_function_events) return;
{
- Log::MessageBuilder msg(log_);
+ Log::MessageBuilder msg(log_.get());
msg << "script-details" << Logger::kNext << script.id() << Logger::kNext;
if (script.name().IsString()) {
msg << String::cast(script.name());
@@ -1514,7 +1529,7 @@ void Logger::ScriptDetails(Script script) {
bool Logger::EnsureLogScriptSource(Script script) {
if (!log_->IsEnabled()) return false;
- Log::MessageBuilder msg(log_);
+ Log::MessageBuilder msg(log_.get());
// Make sure the script is written to the log file.
int script_id = script.id();
if (logged_source_code_.find(script_id) != logged_source_code_.end()) {
@@ -1544,7 +1559,7 @@ void Logger::RuntimeCallTimerEvent() {
RuntimeCallStats* stats = isolate_->counters()->runtime_call_stats();
RuntimeCallCounter* counter = stats->current_counter();
if (counter == nullptr) return;
- Log::MessageBuilder msg(log_);
+ Log::MessageBuilder msg(log_.get());
msg << "active-runtime-timer" << kNext << counter->name();
msg.WriteToLogFile();
}
@@ -1555,7 +1570,7 @@ void Logger::TickEvent(TickSample* sample, bool overflow) {
v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE)) {
RuntimeCallTimerEvent();
}
- Log::MessageBuilder msg(log_);
+ Log::MessageBuilder msg(log_.get());
msg << kLogEventsNames[CodeEventListener::TICK_EVENT] << kNext
<< reinterpret_cast<void*>(sample->pc) << kNext
<< timer_.Elapsed().InMicroseconds();
@@ -1577,7 +1592,7 @@ void Logger::ICEvent(const char* type, bool keyed, Map map, Object key,
char old_state, char new_state, const char* modifier,
const char* slow_stub_reason) {
if (!log_->IsEnabled() || !FLAG_trace_ic) return;
- Log::MessageBuilder msg(log_);
+ Log::MessageBuilder msg(log_.get());
if (keyed) msg << "Keyed";
int line;
int column;
@@ -1611,7 +1626,7 @@ void Logger::MapEvent(const char* type, Map from, Map to, const char* reason,
if (!isolate_->bootstrapper()->IsActive()) {
pc = isolate_->GetAbstractPC(&line, &column);
}
- Log::MessageBuilder msg(log_);
+ Log::MessageBuilder msg(log_.get());
msg << "map" << kNext << type << kNext << timer_.Elapsed().InMicroseconds()
<< kNext << AsHex::Address(from.ptr()) << kNext
<< AsHex::Address(to.ptr()) << kNext << AsHex::Address(pc) << kNext
@@ -1634,7 +1649,7 @@ void Logger::MapEvent(const char* type, Map from, Map to, const char* reason,
void Logger::MapCreate(Map map) {
if (!log_->IsEnabled() || !FLAG_trace_maps) return;
DisallowHeapAllocation no_gc;
- Log::MessageBuilder msg(log_);
+ Log::MessageBuilder msg(log_.get());
msg << "map-create" << kNext << timer_.Elapsed().InMicroseconds() << kNext
<< AsHex::Address(map.ptr());
msg.WriteToLogFile();
@@ -1643,7 +1658,7 @@ void Logger::MapCreate(Map map) {
void Logger::MapDetails(Map map) {
if (!log_->IsEnabled() || !FLAG_trace_maps) return;
DisallowHeapAllocation no_gc;
- Log::MessageBuilder msg(log_);
+ Log::MessageBuilder msg(log_.get());
msg << "map-details" << kNext << timer_.Elapsed().InMicroseconds() << kNext
<< AsHex::Address(map.ptr()) << kNext;
if (FLAG_trace_maps_details) {
@@ -1842,24 +1857,25 @@ bool Logger::SetUp(Isolate* isolate) {
std::ostringstream log_file_name;
std::ostringstream source_log_file_name;
PrepareLogFileName(log_file_name, isolate, FLAG_logfile);
- log_ = new Log(this, log_file_name.str().c_str());
+ log_ = std::make_unique<Log>(this, log_file_name.str().c_str());
if (FLAG_perf_basic_prof) {
- perf_basic_logger_.reset(new PerfBasicLogger(isolate));
+ perf_basic_logger_ = std::make_unique<PerfBasicLogger>(isolate);
AddCodeEventListener(perf_basic_logger_.get());
}
if (FLAG_perf_prof) {
- perf_jit_logger_.reset(new PerfJitLogger(isolate));
+ perf_jit_logger_ = std::make_unique<PerfJitLogger>(isolate);
AddCodeEventListener(perf_jit_logger_.get());
}
if (FLAG_ll_prof) {
- ll_logger_.reset(new LowLevelLogger(isolate, log_file_name.str().c_str()));
+ ll_logger_ =
+ std::make_unique<LowLevelLogger>(isolate, log_file_name.str().c_str());
AddCodeEventListener(ll_logger_.get());
}
- ticker_.reset(new Ticker(isolate, FLAG_prof_sampling_interval));
+ ticker_ = std::make_unique<Ticker>(isolate, FLAG_prof_sampling_interval);
if (Log::InitLogAtStart()) {
is_logging_ = true;
@@ -1868,7 +1884,7 @@ bool Logger::SetUp(Isolate* isolate) {
timer_.Start();
if (FLAG_prof_cpp) {
- profiler_.reset(new Profiler(isolate));
+ profiler_ = std::make_unique<Profiler>(isolate);
is_logging_ = true;
profiler_->Engage();
}
@@ -1891,7 +1907,7 @@ void Logger::SetCodeEventHandler(uint32_t options,
if (isolate_->wasm_engine() != nullptr) {
isolate_->wasm_engine()->EnableCodeLogging(isolate_);
}
- jit_logger_.reset(new JitLogger(isolate_, event_handler));
+ jit_logger_ = std::make_unique<JitLogger>(isolate_, event_handler);
AddCodeEventListener(jit_logger_.get());
if (options & kJitCodeEventEnumExisting) {
HandleScope scope(isolate_);
@@ -2042,9 +2058,9 @@ void ExistingCodeLogger::LogCompiledFunctions() {
const int wasm_module_objects_count =
EnumerateWasmModuleObjects(heap, nullptr);
- std::unique_ptr<Handle<WasmModuleObject>[]> module_objects(
- new Handle<WasmModuleObject>[wasm_module_objects_count]);
- EnumerateWasmModuleObjects(heap, module_objects.get());
+ ScopedVector<Handle<WasmModuleObject>> module_objects(
+ wasm_module_objects_count);
+ EnumerateWasmModuleObjects(heap, module_objects.begin());
for (int i = 0; i < wasm_module_objects_count; ++i) {
module_objects[i]->native_module()->LogWasmCodes(isolate_);
}
diff --git a/deps/v8/src/logging/log.h b/deps/v8/src/logging/log.h
index 3c28222982..69760c4c6c 100644
--- a/deps/v8/src/logging/log.h
+++ b/deps/v8/src/logging/log.h
@@ -5,6 +5,7 @@
#ifndef V8_LOGGING_LOG_H_
#define V8_LOGGING_LOG_H_
+#include <memory>
#include <set>
#include <string>
@@ -115,6 +116,9 @@ class Logger : public CodeEventListener {
kStreamingCompile
};
+ explicit Logger(Isolate* isolate);
+ ~Logger();
+
// The separator is used to write an unescaped "," into the log.
static const LogSeparator kNext;
@@ -273,9 +277,6 @@ class Logger : public CodeEventListener {
void LogCodeObject(Object code_object);
private:
- explicit Logger(Isolate* isolate);
- ~Logger() override;
-
// Emits the profiler's first message.
void ProfilerBeginEvent();
@@ -314,21 +315,11 @@ class Logger : public CodeEventListener {
// of samples.
std::unique_ptr<Profiler> profiler_;
- // An array of log events names.
- const char* const* log_events_;
-
- // Internal implementation classes with access to
- // private members.
- friend class EventLog;
- friend class Isolate;
- friend class TimeLog;
+ // Internal implementation classes with access to private members.
friend class Profiler;
- template <StateTag Tag>
- friend class VMState;
- friend class LoggerTestHelper;
bool is_logging_;
- Log* log_;
+ std::unique_ptr<Log> log_;
std::unique_ptr<PerfBasicLogger> perf_basic_logger_;
std::unique_ptr<PerfJitLogger> perf_jit_logger_;
std::unique_ptr<LowLevelLogger> ll_logger_;
@@ -419,7 +410,7 @@ class V8_EXPORT_PRIVATE CodeEventLogger : public CodeEventListener {
virtual void LogRecordedBuffer(const wasm::WasmCode* code, const char* name,
int length) = 0;
- NameBuffer* name_buffer_;
+ std::unique_ptr<NameBuffer> name_buffer_;
};
struct CodeEvent {
@@ -432,6 +423,7 @@ struct CodeEvent {
int script_column;
CodeEventType code_type;
const char* comment;
+ uintptr_t previous_code_start_address;
};
class ExternalCodeEventListener : public CodeEventListener {
@@ -457,7 +449,7 @@ class ExternalCodeEventListener : public CodeEventListener {
void SetterCallbackEvent(Name name, Address entry_point) override {}
void SharedFunctionInfoMoveEvent(Address from, Address to) override {}
void NativeContextMoveEvent(Address from, Address to) override {}
- void CodeMoveEvent(AbstractCode from, AbstractCode to) override {}
+ void CodeMoveEvent(AbstractCode from, AbstractCode to) override;
void CodeDisableOptEvent(AbstractCode code,
SharedFunctionInfo shared) override {}
void CodeMovingGCEvent() override {}
diff --git a/deps/v8/src/numbers/OWNERS b/deps/v8/src/numbers/OWNERS
index df62d01730..882d275fe8 100644
--- a/deps/v8/src/numbers/OWNERS
+++ b/deps/v8/src/numbers/OWNERS
@@ -1,4 +1,4 @@
-clemensh@chromium.org
+clemensb@chromium.org
jgruber@chromium.org
jkummerow@chromium.org
sigurds@chromium.org
diff --git a/deps/v8/src/numbers/math-random.cc b/deps/v8/src/numbers/math-random.cc
index dee18788a7..d45b4d0a5f 100644
--- a/deps/v8/src/numbers/math-random.cc
+++ b/deps/v8/src/numbers/math-random.cc
@@ -16,9 +16,8 @@ namespace internal {
void MathRandom::InitializeContext(Isolate* isolate,
Handle<Context> native_context) {
- Handle<FixedDoubleArray> cache =
- Handle<FixedDoubleArray>::cast(isolate->factory()->NewFixedDoubleArray(
- kCacheSize, AllocationType::kOld));
+ Handle<FixedDoubleArray> cache = Handle<FixedDoubleArray>::cast(
+ isolate->factory()->NewFixedDoubleArray(kCacheSize));
for (int i = 0; i < kCacheSize; i++) cache->set(i, 0);
native_context->set_math_random_cache(*cache);
Handle<PodArray<State>> pod =
diff --git a/deps/v8/src/objects/arguments.h b/deps/v8/src/objects/arguments.h
index a306ef592a..0a1e3e4ac9 100644
--- a/deps/v8/src/objects/arguments.h
+++ b/deps/v8/src/objects/arguments.h
@@ -16,7 +16,7 @@
namespace v8 {
namespace internal {
-// Superclass for all objects with instance type {JS_ARGUMENTS_TYPE}
+// Superclass for all objects with instance type {JS_ARGUMENTS_OBJECT_TYPE}
class JSArgumentsObject
: public TorqueGeneratedJSArgumentsObject<JSArgumentsObject, JSObject> {
public:
@@ -25,15 +25,16 @@ class JSArgumentsObject
};
// Common superclass for JSSloppyArgumentsObject and JSStrictArgumentsObject.
-// Note that the instance type {JS_ARGUMENTS_TYPE} does _not_ guarantee the
-// below layout, the in-object properties might have transitioned to dictionary
-// mode already. Only use the below layout with the specific initial maps.
+// Note that the instance type {JS_ARGUMENTS_OBJECT_TYPE} does _not_ guarantee
+// the below layout, the in-object properties might have transitioned to
+// dictionary mode already. Only use the below layout with the specific initial
+// maps.
class JSArgumentsObjectWithLength : public JSArgumentsObject {
public:
// Layout description.
DEFINE_FIELD_OFFSET_CONSTANTS(
JSObject::kHeaderSize,
- TORQUE_GENERATED_JSARGUMENTS_OBJECT_WITH_LENGTH_FIELDS)
+ TORQUE_GENERATED_JS_ARGUMENTS_OBJECT_WITH_LENGTH_FIELDS)
// Indices of in-object properties.
static const int kLengthIndex = 0;
@@ -50,7 +51,7 @@ class JSSloppyArgumentsObject : public JSArgumentsObjectWithLength {
public:
DEFINE_FIELD_OFFSET_CONSTANTS(
JSArgumentsObjectWithLength::kSize,
- TORQUE_GENERATED_JSSLOPPY_ARGUMENTS_OBJECT_FIELDS)
+ TORQUE_GENERATED_JS_SLOPPY_ARGUMENTS_OBJECT_FIELDS)
// Indices of in-object properties.
static const int kCalleeIndex = kLengthIndex + 1;
diff --git a/deps/v8/src/objects/backing-store.cc b/deps/v8/src/objects/backing-store.cc
new file mode 100644
index 0000000000..55957e001b
--- /dev/null
+++ b/deps/v8/src/objects/backing-store.cc
@@ -0,0 +1,648 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/backing-store.h"
+#include "src/execution/isolate.h"
+#include "src/handles/global-handles.h"
+#include "src/logging/counters.h"
+#include "src/wasm/wasm-engine.h"
+#include "src/wasm/wasm-limits.h"
+#include "src/wasm/wasm-objects-inl.h"
+
+#define TRACE_BS(...) \
+ do { \
+ if (FLAG_trace_backing_store) PrintF(__VA_ARGS__); \
+ } while (false)
+
+namespace v8 {
+namespace internal {
+
+namespace {
+#if V8_TARGET_ARCH_64_BIT
+constexpr bool kUseGuardRegions = true;
+#else
+constexpr bool kUseGuardRegions = false;
+#endif
+
+#if V8_TARGET_ARCH_MIPS64
+// MIPS64 has a user space of 2^40 bytes on most processors,
+// address space limits needs to be smaller.
+constexpr size_t kAddressSpaceLimit = 0x8000000000L; // 512 GiB
+#elif V8_TARGET_ARCH_64_BIT
+constexpr size_t kAddressSpaceLimit = 0x10100000000L; // 1 TiB + 4 GiB
+#else
+constexpr size_t kAddressSpaceLimit = 0xC0000000; // 3 GiB
+#endif
+
+constexpr uint64_t kOneGiB = 1024 * 1024 * 1024;
+constexpr uint64_t kNegativeGuardSize = 2 * kOneGiB;
+constexpr uint64_t kFullGuardSize = 10 * kOneGiB;
+
+std::atomic<uint64_t> reserved_address_space_{0};
+
+// Allocation results are reported to UMA
+//
+// See wasm_memory_allocation_result in counters.h
+enum class AllocationStatus {
+ kSuccess, // Succeeded on the first try
+
+ kSuccessAfterRetry, // Succeeded after garbage collection
+
+ kAddressSpaceLimitReachedFailure, // Failed because Wasm is at its address
+ // space limit
+
+ kOtherFailure // Failed for an unknown reason
+};
+
+base::AddressRegion GetGuardedRegion(void* buffer_start, size_t byte_length) {
+ // Guard regions always look like this:
+ // |xxx(2GiB)xxx|.......(4GiB)..xxxxx|xxxxxx(4GiB)xxxxxx|
+ // ^ buffer_start
+ // ^ byte_length
+ // ^ negative guard region ^ positive guard region
+
+ Address start = reinterpret_cast<Address>(buffer_start);
+ DCHECK_EQ(8, sizeof(size_t)); // only use on 64-bit
+ DCHECK_EQ(0, start % AllocatePageSize());
+ return base::AddressRegion(start - (2 * kOneGiB),
+ static_cast<size_t>(kFullGuardSize));
+}
+
+void RecordStatus(Isolate* isolate, AllocationStatus status) {
+ isolate->counters()->wasm_memory_allocation_result()->AddSample(
+ static_cast<int>(status));
+}
+
+inline void DebugCheckZero(void* start, size_t byte_length) {
+#if DEBUG
+ // Double check memory is zero-initialized.
+ const byte* bytes = reinterpret_cast<const byte*>(start);
+ for (size_t i = 0; i < byte_length; i++) {
+ DCHECK_EQ(0, bytes[i]);
+ }
+#endif
+}
+} // namespace
+
+bool BackingStore::ReserveAddressSpace(uint64_t num_bytes) {
+ uint64_t reservation_limit = kAddressSpaceLimit;
+ while (true) {
+ uint64_t old_count = reserved_address_space_.load();
+ if (old_count > reservation_limit) return false;
+ if (reservation_limit - old_count < num_bytes) return false;
+ if (reserved_address_space_.compare_exchange_weak(old_count,
+ old_count + num_bytes)) {
+ return true;
+ }
+ }
+}
+
+void BackingStore::ReleaseReservation(uint64_t num_bytes) {
+ uint64_t old_reserved = reserved_address_space_.fetch_sub(num_bytes);
+ USE(old_reserved);
+ DCHECK_LE(num_bytes, old_reserved);
+}
+
+// The backing store for a Wasm shared memory remembers all the isolates
+// with which it has been shared.
+struct SharedWasmMemoryData {
+ std::vector<Isolate*> isolates_;
+};
+
+void BackingStore::Clear() {
+ buffer_start_ = nullptr;
+ byte_length_ = 0;
+ has_guard_regions_ = false;
+ type_specific_data_.v8_api_array_buffer_allocator = nullptr;
+}
+
+BackingStore::~BackingStore() {
+ GlobalBackingStoreRegistry::Unregister(this);
+
+ if (buffer_start_ == nullptr) return; // nothing to deallocate
+
+ if (is_wasm_memory_) {
+ DCHECK(free_on_destruct_);
+ TRACE_BS("BSw:free bs=%p mem=%p (length=%zu, capacity=%zu)\n", this,
+ buffer_start_, byte_length(), byte_capacity_);
+ if (is_shared_) {
+ // Deallocate the list of attached memory objects.
+ SharedWasmMemoryData* shared_data = get_shared_wasm_memory_data();
+ delete shared_data;
+ type_specific_data_.shared_wasm_memory_data = nullptr;
+ }
+
+ // Wasm memories are always allocated through the page allocator.
+ auto region =
+ has_guard_regions_
+ ? GetGuardedRegion(buffer_start_, byte_length_)
+ : base::AddressRegion(reinterpret_cast<Address>(buffer_start_),
+ byte_capacity_);
+ bool pages_were_freed =
+ region.size() == 0 /* no need to free any pages */ ||
+ FreePages(GetPlatformPageAllocator(),
+ reinterpret_cast<void*>(region.begin()), region.size());
+ CHECK(pages_were_freed);
+ BackingStore::ReleaseReservation(has_guard_regions_ ? kFullGuardSize
+ : byte_capacity_);
+ Clear();
+ return;
+ }
+ if (free_on_destruct_) {
+ // JSArrayBuffer backing store. Deallocate through the embedder's allocator.
+ auto allocator = reinterpret_cast<v8::ArrayBuffer::Allocator*>(
+ get_v8_api_array_buffer_allocator());
+ TRACE_BS("BS:free bs=%p mem=%p (length=%zu, capacity=%zu)\n", this,
+ buffer_start_, byte_length(), byte_capacity_);
+ allocator->Free(buffer_start_, byte_length_);
+ }
+ Clear();
+}
+
+// Allocate a backing store using the array buffer allocator from the embedder.
+std::unique_ptr<BackingStore> BackingStore::Allocate(
+ Isolate* isolate, size_t byte_length, SharedFlag shared,
+ InitializedFlag initialized) {
+ void* buffer_start = nullptr;
+ auto allocator = isolate->array_buffer_allocator();
+ CHECK_NOT_NULL(allocator);
+ if (byte_length != 0) {
+ auto counters = isolate->counters();
+ int mb_length = static_cast<int>(byte_length / MB);
+ if (mb_length > 0) {
+ counters->array_buffer_big_allocations()->AddSample(mb_length);
+ }
+ if (shared == SharedFlag::kShared) {
+ counters->shared_array_allocations()->AddSample(mb_length);
+ }
+ auto allocate_buffer = [allocator, initialized](size_t byte_length) {
+ if (initialized == InitializedFlag::kUninitialized) {
+ return allocator->AllocateUninitialized(byte_length);
+ }
+ void* buffer_start = allocator->Allocate(byte_length);
+ if (buffer_start) {
+ // TODO(wasm): node does not implement the zero-initialization API.
+ // Reenable this debug check when node does implement it properly.
+ constexpr bool
+ kDebugCheckZeroDisabledDueToNodeNotImplementingZeroInitAPI = true;
+ if ((!(kDebugCheckZeroDisabledDueToNodeNotImplementingZeroInitAPI)) &&
+ !FLAG_mock_arraybuffer_allocator) {
+ DebugCheckZero(buffer_start, byte_length);
+ }
+ }
+ return buffer_start;
+ };
+
+ buffer_start = isolate->heap()->AllocateExternalBackingStore(
+ allocate_buffer, byte_length);
+
+ if (buffer_start == nullptr) {
+ // Allocation failed.
+ counters->array_buffer_new_size_failures()->AddSample(mb_length);
+ return {};
+ }
+ }
+
+ auto result = new BackingStore(buffer_start, // start
+ byte_length, // length
+ byte_length, // capacity
+ shared, // shared
+ false, // is_wasm_memory
+ true, // free_on_destruct
+ false); // has_guard_regions
+
+ TRACE_BS("BS:alloc bs=%p mem=%p (length=%zu)\n", result,
+ result->buffer_start(), byte_length);
+ result->type_specific_data_.v8_api_array_buffer_allocator = allocator;
+ return std::unique_ptr<BackingStore>(result);
+}
+
+// Allocate a backing store for a Wasm memory. Always use the page allocator
+// and add guard regions.
+std::unique_ptr<BackingStore> BackingStore::TryAllocateWasmMemory(
+ Isolate* isolate, size_t initial_pages, size_t maximum_pages,
+ SharedFlag shared) {
+ // Cannot reserve 0 pages on some OSes.
+ if (maximum_pages == 0) maximum_pages = 1;
+
+ TRACE_BS("BSw:try %zu pages, %zu max\n", initial_pages, maximum_pages);
+
+ bool guards = kUseGuardRegions;
+
+ // For accounting purposes, whether a GC was necessary.
+ bool did_retry = false;
+
+ // A helper to try running a function up to 3 times, executing a GC
+ // if the first and second attempts failed.
+ auto gc_retry = [&](const std::function<bool()>& fn) {
+ for (int i = 0; i < 3; i++) {
+ if (fn()) return true;
+ // Collect garbage and retry.
+ did_retry = true;
+ // TODO(wasm): try Heap::EagerlyFreeExternalMemory() first?
+ isolate->heap()->MemoryPressureNotification(
+ MemoryPressureLevel::kCritical, true);
+ }
+ return false;
+ };
+
+ // Compute size of reserved memory.
+
+ size_t engine_max_pages = wasm::max_mem_pages();
+ size_t byte_capacity =
+ std::min(engine_max_pages, maximum_pages) * wasm::kWasmPageSize;
+ size_t reservation_size =
+ guards ? static_cast<size_t>(kFullGuardSize) : byte_capacity;
+
+ //--------------------------------------------------------------------------
+ // 1. Enforce maximum address space reservation per engine.
+ //--------------------------------------------------------------------------
+ auto reserve_memory_space = [&] {
+ return BackingStore::ReserveAddressSpace(reservation_size);
+ };
+
+ if (!gc_retry(reserve_memory_space)) {
+ // Crash on out-of-memory if the correctness fuzzer is running.
+ if (FLAG_correctness_fuzzer_suppressions) {
+ FATAL("could not allocate wasm memory backing store");
+ }
+ RecordStatus(isolate, AllocationStatus::kAddressSpaceLimitReachedFailure);
+ TRACE_BS("BSw:try failed to reserve address space\n");
+ return {};
+ }
+
+ //--------------------------------------------------------------------------
+ // 2. Allocate pages (inaccessible by default).
+ //--------------------------------------------------------------------------
+ void* allocation_base = nullptr;
+ auto allocate_pages = [&] {
+ allocation_base =
+ AllocatePages(GetPlatformPageAllocator(), nullptr, reservation_size,
+ wasm::kWasmPageSize, PageAllocator::kNoAccess);
+ return allocation_base != nullptr;
+ };
+ if (!gc_retry(allocate_pages)) {
+ // Page allocator could not reserve enough pages.
+ BackingStore::ReleaseReservation(reservation_size);
+ RecordStatus(isolate, AllocationStatus::kOtherFailure);
+ TRACE_BS("BSw:try failed to allocate pages\n");
+ return {};
+ }
+
+ // Get a pointer to the start of the buffer, skipping negative guard region
+ // if necessary.
+ byte* buffer_start = reinterpret_cast<byte*>(allocation_base) +
+ (guards ? kNegativeGuardSize : 0);
+
+ //--------------------------------------------------------------------------
+ // 3. Commit the initial pages (allow read/write).
+ //--------------------------------------------------------------------------
+ size_t byte_length = initial_pages * wasm::kWasmPageSize;
+ auto commit_memory = [&] {
+ return byte_length == 0 ||
+ SetPermissions(GetPlatformPageAllocator(), buffer_start, byte_length,
+ PageAllocator::kReadWrite);
+ };
+ if (!gc_retry(commit_memory)) {
+ // SetPermissions put us over the process memory limit.
+ V8::FatalProcessOutOfMemory(nullptr, "BackingStore::AllocateWasmMemory()");
+ TRACE_BS("BSw:try failed to set permissions\n");
+ }
+
+ DebugCheckZero(buffer_start, byte_length); // touch the bytes.
+
+ RecordStatus(isolate, did_retry ? AllocationStatus::kSuccessAfterRetry
+ : AllocationStatus::kSuccess);
+
+ auto result = new BackingStore(buffer_start, // start
+ byte_length, // length
+ byte_capacity, // capacity
+ shared, // shared
+ true, // is_wasm_memory
+ true, // free_on_destruct
+ guards); // has_guard_regions
+
+ TRACE_BS("BSw:alloc bs=%p mem=%p (length=%zu, capacity=%zu)\n", result,
+ result->buffer_start(), byte_length, byte_capacity);
+
+ // Shared Wasm memories need an anchor for the memory object list.
+ if (shared == SharedFlag::kShared) {
+ result->type_specific_data_.shared_wasm_memory_data =
+ new SharedWasmMemoryData();
+ }
+
+ return std::unique_ptr<BackingStore>(result);
+}
+
+// Allocate a backing store for a Wasm memory. Always use the page allocator
+// and add guard regions.
+std::unique_ptr<BackingStore> BackingStore::AllocateWasmMemory(
+ Isolate* isolate, size_t initial_pages, size_t maximum_pages,
+ SharedFlag shared) {
+ // Wasm pages must be a multiple of the allocation page size.
+ DCHECK_EQ(0, wasm::kWasmPageSize % AllocatePageSize());
+
+ // Enforce engine limitation on the maximum number of pages.
+ if (initial_pages > wasm::max_mem_pages()) return nullptr;
+
+ auto backing_store =
+ TryAllocateWasmMemory(isolate, initial_pages, maximum_pages, shared);
+ if (!backing_store && maximum_pages > initial_pages) {
+ // If reserving {maximum_pages} failed, try with maximum = initial.
+ backing_store =
+ TryAllocateWasmMemory(isolate, initial_pages, initial_pages, shared);
+ }
+ return backing_store;
+}
+
+std::unique_ptr<BackingStore> BackingStore::CopyWasmMemory(Isolate* isolate,
+ size_t new_pages) {
+ DCHECK_GE(new_pages * wasm::kWasmPageSize, byte_length_);
+ // Note that we could allocate uninitialized to save initialization cost here,
+ // but since Wasm memories are allocated by the page allocator, the zeroing
+ // cost is already built-in.
+ // TODO(titzer): should we use a suitable maximum here?
+ auto new_backing_store = BackingStore::AllocateWasmMemory(
+ isolate, new_pages, new_pages,
+ is_shared() ? SharedFlag::kShared : SharedFlag::kNotShared);
+
+ if (!new_backing_store ||
+ new_backing_store->has_guard_regions() != has_guard_regions_) {
+ return {};
+ }
+
+ if (byte_length_ > 0) {
+ memcpy(new_backing_store->buffer_start(), buffer_start_, byte_length_);
+ }
+
+ return new_backing_store;
+}
+
+// Try to grow the size of a wasm memory in place, without realloc + copy.
+bool BackingStore::GrowWasmMemoryInPlace(Isolate* isolate, size_t delta_pages,
+ size_t max_pages) {
+ DCHECK(is_wasm_memory_);
+ max_pages = std::min(max_pages, byte_capacity_ / wasm::kWasmPageSize);
+
+ if (delta_pages == 0) return true; // degenerate grow.
+ if (delta_pages > max_pages) return false; // would never work.
+
+ // Do a compare-exchange loop, because we also need to adjust page
+ // permissions. Note that multiple racing grows both try to set page
+ // permissions for the entire range (to be RW), so the operating system
+ // should deal with that raciness. We know we succeeded when we can
+ // compare/swap the old length with the new length.
+ size_t old_length = 0;
+ size_t new_length = 0;
+ while (true) {
+ old_length = byte_length_.load(std::memory_order_acquire);
+ size_t current_pages = old_length / wasm::kWasmPageSize;
+
+ // Check if we have exceed the supplied maximum.
+ if (current_pages > (max_pages - delta_pages)) return false;
+
+ new_length = (current_pages + delta_pages) * wasm::kWasmPageSize;
+
+ // Try to adjust the permissions on the memory.
+ if (!i::SetPermissions(GetPlatformPageAllocator(), buffer_start_,
+ new_length, PageAllocator::kReadWrite)) {
+ return false;
+ }
+ if (byte_length_.compare_exchange_weak(old_length, new_length,
+ std::memory_order_acq_rel)) {
+ // Successfully updated both the length and permissions.
+ break;
+ }
+ }
+
+ if (!is_shared_) {
+ // Only do per-isolate accounting for non-shared backing stores.
+ reinterpret_cast<v8::Isolate*>(isolate)
+ ->AdjustAmountOfExternalAllocatedMemory(new_length - old_length);
+ }
+ return true;
+}
+
+void BackingStore::AttachSharedWasmMemoryObject(
+ Isolate* isolate, Handle<WasmMemoryObject> memory_object) {
+ DCHECK(is_wasm_memory_);
+ DCHECK(is_shared_);
+ // We need to take the global registry lock for this operation.
+ GlobalBackingStoreRegistry::AddSharedWasmMemoryObject(isolate, this,
+ memory_object);
+}
+
+void BackingStore::BroadcastSharedWasmMemoryGrow(
+ Isolate* isolate, std::shared_ptr<BackingStore> backing_store,
+ size_t new_pages) {
+ GlobalBackingStoreRegistry::BroadcastSharedWasmMemoryGrow(
+ isolate, backing_store, new_pages);
+}
+
+void BackingStore::RemoveSharedWasmMemoryObjects(Isolate* isolate) {
+ GlobalBackingStoreRegistry::Purge(isolate);
+}
+
+void BackingStore::UpdateSharedWasmMemoryObjects(Isolate* isolate) {
+ GlobalBackingStoreRegistry::UpdateSharedWasmMemoryObjects(isolate);
+}
+
+std::unique_ptr<BackingStore> BackingStore::WrapAllocation(
+ Isolate* isolate, void* allocation_base, size_t allocation_length,
+ SharedFlag shared, bool free_on_destruct) {
+ auto result =
+ new BackingStore(allocation_base, allocation_length, allocation_length,
+ shared, false, free_on_destruct, false);
+ result->type_specific_data_.v8_api_array_buffer_allocator =
+ isolate->array_buffer_allocator();
+ TRACE_BS("BS:wrap bs=%p mem=%p (length=%zu)\n", result,
+ result->buffer_start(), result->byte_length());
+ return std::unique_ptr<BackingStore>(result);
+}
+
+std::unique_ptr<BackingStore> BackingStore::EmptyBackingStore(
+ SharedFlag shared) {
+ auto result = new BackingStore(nullptr, // start
+ 0, // length
+ 0, // capacity
+ shared, // shared
+ false, // is_wasm_memory
+ false, // free_on_destruct
+ false); // has_guard_regions
+
+ return std::unique_ptr<BackingStore>(result);
+}
+
+void* BackingStore::get_v8_api_array_buffer_allocator() {
+ CHECK(!is_wasm_memory_);
+ auto array_buffer_allocator =
+ type_specific_data_.v8_api_array_buffer_allocator;
+ CHECK_NOT_NULL(array_buffer_allocator);
+ return array_buffer_allocator;
+}
+
+SharedWasmMemoryData* BackingStore::get_shared_wasm_memory_data() {
+ CHECK(is_wasm_memory_ && is_shared_);
+ auto shared_wasm_memory_data = type_specific_data_.shared_wasm_memory_data;
+ CHECK(shared_wasm_memory_data);
+ return shared_wasm_memory_data;
+}
+
+namespace {
+// Implementation details of GlobalBackingStoreRegistry.
+struct GlobalBackingStoreRegistryImpl {
+ GlobalBackingStoreRegistryImpl() {}
+ base::Mutex mutex_;
+ std::unordered_map<const void*, std::weak_ptr<BackingStore>> map_;
+};
+base::LazyInstance<GlobalBackingStoreRegistryImpl>::type global_registry_impl_ =
+ LAZY_INSTANCE_INITIALIZER;
+inline GlobalBackingStoreRegistryImpl* impl() {
+ return global_registry_impl_.Pointer();
+}
+} // namespace
+
+void GlobalBackingStoreRegistry::Register(
+ std::shared_ptr<BackingStore> backing_store) {
+ if (!backing_store || !backing_store->buffer_start()) return;
+
+ if (!backing_store->free_on_destruct()) {
+ // If the backing store buffer is managed by the embedder,
+ // then we don't have to guarantee that there is single unique
+ // BackingStore per buffer_start() because the destructor of
+ // of the BackingStore will be a no-op in that case.
+ return;
+ }
+
+ base::MutexGuard scope_lock(&impl()->mutex_);
+ if (backing_store->globally_registered_) return;
+ TRACE_BS("BS:reg bs=%p mem=%p (length=%zu, capacity=%zu)\n",
+ backing_store.get(), backing_store->buffer_start(),
+ backing_store->byte_length(), backing_store->byte_capacity());
+ std::weak_ptr<BackingStore> weak = backing_store;
+ auto result = impl()->map_.insert({backing_store->buffer_start(), weak});
+ CHECK(result.second);
+ backing_store->globally_registered_ = true;
+}
+
+void GlobalBackingStoreRegistry::Unregister(BackingStore* backing_store) {
+ if (!backing_store->globally_registered_) return;
+
+ DCHECK_NOT_NULL(backing_store->buffer_start());
+
+ base::MutexGuard scope_lock(&impl()->mutex_);
+ const auto& result = impl()->map_.find(backing_store->buffer_start());
+ if (result != impl()->map_.end()) {
+ DCHECK(!result->second.lock());
+ impl()->map_.erase(result);
+ }
+ backing_store->globally_registered_ = false;
+}
+
+std::shared_ptr<BackingStore> GlobalBackingStoreRegistry::Lookup(
+ void* buffer_start, size_t length) {
+ base::MutexGuard scope_lock(&impl()->mutex_);
+ TRACE_BS("BS:lookup mem=%p (%zu bytes)\n", buffer_start, length);
+ const auto& result = impl()->map_.find(buffer_start);
+ if (result == impl()->map_.end()) {
+ return std::shared_ptr<BackingStore>();
+ }
+ auto backing_store = result->second.lock();
+ DCHECK_EQ(buffer_start, backing_store->buffer_start());
+ DCHECK_EQ(length, backing_store->byte_length());
+ return backing_store;
+}
+
+void GlobalBackingStoreRegistry::Purge(Isolate* isolate) {
+ // We need to keep a reference to all backing stores that are inspected
+ // in the purging loop below. Otherwise, we might get a deadlock
+ // if the temporary backing store reference created in the loop is
+ // the last reference. In that case the destructor of the backing store
+ // may try to take the &impl()->mutex_ in order to unregister itself.
+ std::vector<std::shared_ptr<BackingStore>> prevent_destruction_under_lock;
+ base::MutexGuard scope_lock(&impl()->mutex_);
+ // Purge all entries in the map that refer to the given isolate.
+ for (auto& entry : impl()->map_) {
+ auto backing_store = entry.second.lock();
+ prevent_destruction_under_lock.emplace_back(backing_store);
+ if (!backing_store) continue; // skip entries where weak ptr is null
+ if (!backing_store->is_wasm_memory()) continue; // skip non-wasm memory
+ if (!backing_store->is_shared()) continue; // skip non-shared memory
+ SharedWasmMemoryData* shared_data =
+ backing_store->get_shared_wasm_memory_data();
+ // Remove this isolate from the isolates list.
+ auto& isolates = shared_data->isolates_;
+ for (size_t i = 0; i < isolates.size(); i++) {
+ if (isolates[i] == isolate) isolates[i] = nullptr;
+ }
+ }
+}
+
+void GlobalBackingStoreRegistry::AddSharedWasmMemoryObject(
+ Isolate* isolate, BackingStore* backing_store,
+ Handle<WasmMemoryObject> memory_object) {
+ // Add to the weak array list of shared memory objects in the isolate.
+ isolate->AddSharedWasmMemory(memory_object);
+
+ // Add the isolate to the list of isolates sharing this backing store.
+ base::MutexGuard scope_lock(&impl()->mutex_);
+ SharedWasmMemoryData* shared_data =
+ backing_store->get_shared_wasm_memory_data();
+ auto& isolates = shared_data->isolates_;
+ int free_entry = -1;
+ for (size_t i = 0; i < isolates.size(); i++) {
+ if (isolates[i] == isolate) return;
+ if (isolates[i] == nullptr) free_entry = static_cast<int>(i);
+ }
+ if (free_entry >= 0)
+ isolates[free_entry] = isolate;
+ else
+ isolates.push_back(isolate);
+}
+
+void GlobalBackingStoreRegistry::BroadcastSharedWasmMemoryGrow(
+ Isolate* isolate, std::shared_ptr<BackingStore> backing_store,
+ size_t new_pages) {
+ {
+ // The global lock protects the list of isolates per backing store.
+ base::MutexGuard scope_lock(&impl()->mutex_);
+ SharedWasmMemoryData* shared_data =
+ backing_store->get_shared_wasm_memory_data();
+ for (Isolate* other : shared_data->isolates_) {
+ if (other && other != isolate) {
+ other->stack_guard()->RequestGrowSharedMemory();
+ }
+ }
+ }
+ // Update memory objects in this isolate.
+ UpdateSharedWasmMemoryObjects(isolate);
+}
+
+void GlobalBackingStoreRegistry::UpdateSharedWasmMemoryObjects(
+ Isolate* isolate) {
+ HandleScope scope(isolate);
+ Handle<WeakArrayList> shared_wasm_memories =
+ isolate->factory()->shared_wasm_memories();
+
+ for (int i = 0; i < shared_wasm_memories->length(); i++) {
+ HeapObject obj;
+ if (!shared_wasm_memories->Get(i).GetHeapObject(&obj)) continue;
+
+ Handle<WasmMemoryObject> memory_object(WasmMemoryObject::cast(obj),
+ isolate);
+ Handle<JSArrayBuffer> old_buffer(memory_object->array_buffer(), isolate);
+ std::shared_ptr<BackingStore> backing_store = old_buffer->GetBackingStore();
+
+ if (old_buffer->byte_length() != backing_store->byte_length()) {
+ Handle<JSArrayBuffer> new_buffer =
+ isolate->factory()->NewJSSharedArrayBuffer(std::move(backing_store));
+ memory_object->update_instances(isolate, new_buffer);
+ }
+ }
+}
+
+} // namespace internal
+} // namespace v8
+
+#undef TRACE_BS
diff --git a/deps/v8/src/objects/backing-store.h b/deps/v8/src/objects/backing-store.h
new file mode 100644
index 0000000000..2c6ffb28da
--- /dev/null
+++ b/deps/v8/src/objects/backing-store.h
@@ -0,0 +1,206 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_BACKING_STORE_H_
+#define V8_OBJECTS_BACKING_STORE_H_
+
+#include <memory>
+
+#include "include/v8-internal.h"
+#include "src/handles/handles.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+class WasmMemoryObject;
+
+// Whether the backing store is shared or not.
+enum class SharedFlag : uint8_t { kNotShared, kShared };
+
+// Whether the backing store memory is initialied to zero or not.
+enum class InitializedFlag : uint8_t { kUninitialized, kZeroInitialized };
+
+// Internal information for shared wasm memories. E.g. contains
+// a list of all memory objects (across all isolates) that share this
+// backing store.
+struct SharedWasmMemoryData;
+
+// The {BackingStore} data structure stores all the low-level details about the
+// backing store of an array buffer or Wasm memory, including its base address
+// and length, whether it is shared, provided by the embedder, has guard
+// regions, etc. Instances of this classes *own* the underlying memory
+// when they are created through one of the {Allocate()} methods below,
+// and the destructor frees the memory (and page allocation if necessary).
+// Backing stores can also *wrap* embedder-allocated memory. In this case,
+// they do not own the memory, and upon destruction, they do not deallocate it.
+class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase {
+ public:
+ ~BackingStore();
+
+ // Allocate an array buffer backing store using the default method,
+ // which currently is the embedder-provided array buffer allocator.
+ static std::unique_ptr<BackingStore> Allocate(Isolate* isolate,
+ size_t byte_length,
+ SharedFlag shared,
+ InitializedFlag initialized);
+
+ // Allocate the backing store for a Wasm memory.
+ static std::unique_ptr<BackingStore> AllocateWasmMemory(Isolate* isolate,
+ size_t initial_pages,
+ size_t maximum_pages,
+ SharedFlag shared);
+
+ // Create a backing store that wraps existing allocated memory.
+ // If {free_on_destruct} is {true}, the memory will be freed using the
+ // ArrayBufferAllocator::Free() callback when this backing store is
+ // destructed. Otherwise destructing the backing store will do nothing
+ // to the allocated memory.
+ static std::unique_ptr<BackingStore> WrapAllocation(Isolate* isolate,
+ void* allocation_base,
+ size_t allocation_length,
+ SharedFlag shared,
+ bool free_on_destruct);
+
+ // Create an empty backing store.
+ static std::unique_ptr<BackingStore> EmptyBackingStore(SharedFlag shared);
+
+ // Accessors.
+ void* buffer_start() const { return buffer_start_; }
+ size_t byte_length() const {
+ return byte_length_.load(std::memory_order_relaxed);
+ }
+ size_t byte_capacity() const { return byte_capacity_; }
+ bool is_shared() const { return is_shared_; }
+ bool is_wasm_memory() const { return is_wasm_memory_; }
+ bool has_guard_regions() const { return has_guard_regions_; }
+ bool free_on_destruct() const { return free_on_destruct_; }
+
+ // Attempt to grow this backing store in place.
+ bool GrowWasmMemoryInPlace(Isolate* isolate, size_t delta_pages,
+ size_t max_pages);
+
+ // Allocate a new, larger, backing store for this Wasm memory and copy the
+ // contents of this backing store into it.
+ std::unique_ptr<BackingStore> CopyWasmMemory(Isolate* isolate,
+ size_t new_pages);
+
+ // Attach the given memory object to this backing store. The memory object
+ // will be updated if this backing store is grown.
+ void AttachSharedWasmMemoryObject(Isolate* isolate,
+ Handle<WasmMemoryObject> memory_object);
+
+ // Send asynchronous updates to attached memory objects in other isolates
+ // after the backing store has been grown. Memory objects in this
+ // isolate are updated synchronously.
+ static void BroadcastSharedWasmMemoryGrow(Isolate* isolate,
+ std::shared_ptr<BackingStore>,
+ size_t new_pages);
+
+ // TODO(wasm): address space limitations should be enforced in page alloc.
+ // These methods enforce a limit on the total amount of address space,
+ // which is used for both backing stores and wasm memory.
+ static bool ReserveAddressSpace(uint64_t num_bytes);
+ static void ReleaseReservation(uint64_t num_bytes);
+
+ // Remove all memory objects in the given isolate that refer to this
+ // backing store.
+ static void RemoveSharedWasmMemoryObjects(Isolate* isolate);
+
+ // Update all shared memory objects in this isolate (after a grow operation).
+ static void UpdateSharedWasmMemoryObjects(Isolate* isolate);
+
+ private:
+ friend class GlobalBackingStoreRegistry;
+
+ BackingStore(void* buffer_start, size_t byte_length, size_t byte_capacity,
+ SharedFlag shared, bool is_wasm_memory, bool free_on_destruct,
+ bool has_guard_regions)
+ : buffer_start_(buffer_start),
+ byte_length_(byte_length),
+ byte_capacity_(byte_capacity),
+ is_shared_(shared == SharedFlag::kShared),
+ is_wasm_memory_(is_wasm_memory),
+ free_on_destruct_(free_on_destruct),
+ has_guard_regions_(has_guard_regions),
+ globally_registered_(false) {
+ type_specific_data_.v8_api_array_buffer_allocator = nullptr;
+ }
+
+ void* buffer_start_ = nullptr;
+ std::atomic<size_t> byte_length_{0};
+ size_t byte_capacity_ = 0;
+ bool is_shared_ : 1;
+ bool is_wasm_memory_ : 1;
+ bool free_on_destruct_ : 1;
+ bool has_guard_regions_ : 1;
+ bool globally_registered_ : 1;
+
+ union {
+ // If this backing store was allocated through the ArrayBufferAllocator API,
+ // this is a direct pointer to the API object for freeing the backing
+ // store.
+ // Note: we use {void*} here because we cannot forward-declare an inner
+ // class from the API.
+ void* v8_api_array_buffer_allocator;
+
+ // For shared Wasm memories, this is a list of all the attached memory
+ // objects, which is needed to grow shared backing stores.
+ SharedWasmMemoryData* shared_wasm_memory_data;
+ } type_specific_data_;
+
+ // Accessors for type-specific data.
+ void* get_v8_api_array_buffer_allocator();
+ SharedWasmMemoryData* get_shared_wasm_memory_data();
+
+ void Clear(); // Internally clears fields after deallocation.
+ static std::unique_ptr<BackingStore> TryAllocateWasmMemory(
+ Isolate* isolate, size_t initial_pages, size_t maximum_pages,
+ SharedFlag shared);
+
+ DISALLOW_COPY_AND_ASSIGN(BackingStore);
+};
+
+// A global, per-process mapping from buffer addresses to backing stores.
+// This is generally only used for dealing with an embedder that has not
+// migrated to the new API which should use proper pointers to manage
+// backing stores.
+class GlobalBackingStoreRegistry {
+ public:
+ // Register a backing store in the global registry. A mapping from the
+ // {buffer_start} to the backing store object will be added. The backing
+ // store will automatically unregister itself upon destruction.
+ static void Register(std::shared_ptr<BackingStore> backing_store);
+
+ // Look up a backing store based on the {buffer_start} pointer.
+ static std::shared_ptr<BackingStore> Lookup(void* buffer_start,
+ size_t length);
+
+ private:
+ friend class BackingStore;
+ // Unregister a backing store in the global registry.
+ static void Unregister(BackingStore* backing_store);
+
+ // Adds the given memory object to the backing store's weak list
+ // of memory objects (under the registry lock).
+ static void AddSharedWasmMemoryObject(Isolate* isolate,
+ BackingStore* backing_store,
+ Handle<WasmMemoryObject> memory_object);
+
+ // Purge any shared wasm memory lists that refer to this isolate.
+ static void Purge(Isolate* isolate);
+
+ // Broadcast updates to all attached memory objects.
+ static void BroadcastSharedWasmMemoryGrow(
+ Isolate* isolate, std::shared_ptr<BackingStore> backing_store,
+ size_t new_pages);
+
+ // Update all shared memory objects in the given isolate.
+ static void UpdateSharedWasmMemoryObjects(Isolate* isolate);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_BACKING_STORE_H_
diff --git a/deps/v8/src/objects/bigint.cc b/deps/v8/src/objects/bigint.cc
index 2905bb44c6..6cc43a78e7 100644
--- a/deps/v8/src/objects/bigint.cc
+++ b/deps/v8/src/objects/bigint.cc
@@ -1981,14 +1981,13 @@ void BigInt::SerializeDigits(uint8_t* storage) {
// The serialization format MUST NOT CHANGE without updating the format
// version in value-serializer.cc!
MaybeHandle<BigInt> BigInt::FromSerializedDigits(
- Isolate* isolate, uint32_t bitfield, Vector<const uint8_t> digits_storage,
- AllocationType allocation) {
+ Isolate* isolate, uint32_t bitfield, Vector<const uint8_t> digits_storage) {
int bytelength = LengthBits::decode(bitfield);
DCHECK(digits_storage.length() == bytelength);
bool sign = SignBits::decode(bitfield);
int length = (bytelength + kDigitSize - 1) / kDigitSize; // Round up.
Handle<MutableBigInt> result =
- MutableBigInt::Cast(isolate->factory()->NewBigInt(length, allocation));
+ MutableBigInt::Cast(isolate->factory()->NewBigInt(length));
result->initialize_bitfield(sign, length);
void* digits =
reinterpret_cast<void*>(result->ptr() + kDigitsOffset - kHeapObjectTag);
diff --git a/deps/v8/src/objects/bigint.h b/deps/v8/src/objects/bigint.h
index ca80547230..f50e3bcf04 100644
--- a/deps/v8/src/objects/bigint.h
+++ b/deps/v8/src/objects/bigint.h
@@ -6,8 +6,8 @@
#define V8_OBJECTS_BIGINT_H_
#include "src/common/globals.h"
-#include "src/objects/heap-object.h"
#include "src/objects/objects.h"
+#include "src/objects/primitive-heap-object.h"
#include "src/utils/utils.h"
// Has to be the last include (doesn't have include guards):
@@ -28,7 +28,7 @@ class ValueSerializer;
// BigIntBase is just the raw data object underlying a BigInt. Use with care!
// Most code should be using BigInts instead.
-class BigIntBase : public HeapObject {
+class BigIntBase : public PrimitiveHeapObject {
public:
inline int length() const {
int32_t bitfield = RELAXED_READ_INT32_FIELD(*this, kBitfieldOffset);
@@ -69,7 +69,7 @@ class BigIntBase : public HeapObject {
V(kHeaderSize, 0) \
V(kDigitsOffset, 0)
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, BIGINT_FIELDS)
+ DEFINE_FIELD_OFFSET_CONSTANTS(PrimitiveHeapObject::kHeaderSize, BIGINT_FIELDS)
#undef BIGINT_FIELDS
static constexpr bool HasOptionalPadding() {
@@ -105,7 +105,7 @@ class BigIntBase : public HeapObject {
// Only serves to make macros happy; other code should use IsBigInt.
bool IsBigIntBase() const { return true; }
- OBJECT_CONSTRUCTORS(BigIntBase, HeapObject);
+ OBJECT_CONSTRUCTORS(BigIntBase, PrimitiveHeapObject);
};
class FreshlyAllocatedBigInt : public BigIntBase {
@@ -263,8 +263,8 @@ class BigInt : public BigIntBase {
// {DigitsByteLengthForBitfield(GetBitfieldForSerialization())}.
void SerializeDigits(uint8_t* storage);
V8_WARN_UNUSED_RESULT static MaybeHandle<BigInt> FromSerializedDigits(
- Isolate* isolate, uint32_t bitfield, Vector<const uint8_t> digits_storage,
- AllocationType allocation);
+ Isolate* isolate, uint32_t bitfield,
+ Vector<const uint8_t> digits_storage);
OBJECT_CONSTRUCTORS(BigInt, BigIntBase);
};
diff --git a/deps/v8/src/objects/code.cc b/deps/v8/src/objects/code.cc
index b416df8878..a477a7da26 100644
--- a/deps/v8/src/objects/code.cc
+++ b/deps/v8/src/objects/code.cc
@@ -101,7 +101,6 @@ void Code::CopyFromNoFlush(Heap* heap, const CodeDesc& desc) {
// Unbox handles and relocate.
Assembler* origin = desc.origin;
- AllowDeferredHandleDereference embedding_raw_address;
const int mode_mask = RelocInfo::PostCodegenRelocationMask();
for (RelocIterator it(*this, mode_mask); !it.done(); it.next()) {
RelocInfo::Mode mode = it.rinfo()->rmode();
@@ -670,8 +669,8 @@ inline void DisassembleCodeRange(Isolate* isolate, std::ostream& os, Code code,
} // namespace
-void Code::Disassemble(const char* name, std::ostream& os, Address current_pc) {
- Isolate* isolate = GetIsolate();
+void Code::Disassemble(const char* name, std::ostream& os, Isolate* isolate,
+ Address current_pc) {
os << "kind = " << Kind2String(kind()) << "\n";
if (name == nullptr) {
name = GetName(isolate);
@@ -683,7 +682,7 @@ void Code::Disassemble(const char* name, std::ostream& os, Address current_pc) {
os << "stack_slots = " << stack_slots() << "\n";
}
os << "compiler = " << (is_turbofanned() ? "turbofan" : "unknown") << "\n";
- os << "address = " << static_cast<const void*>(this) << "\n\n";
+ os << "address = " << reinterpret_cast<void*>(ptr()) << "\n\n";
if (is_off_heap_trampoline()) {
int trampoline_size = raw_instruction_size();
@@ -991,8 +990,7 @@ Handle<DependentCode> DependentCode::EnsureSpace(
int capacity = kCodesStartIndex + DependentCode::Grow(entries->count());
int grow_by = capacity - entries->length();
return Handle<DependentCode>::cast(
- isolate->factory()->CopyWeakFixedArrayAndGrow(entries, grow_by,
- AllocationType::kOld));
+ isolate->factory()->CopyWeakFixedArrayAndGrow(entries, grow_by));
}
bool DependentCode::Compact() {
diff --git a/deps/v8/src/objects/code.h b/deps/v8/src/objects/code.h
index 6a5ac9f31a..6f8c378093 100644
--- a/deps/v8/src/objects/code.h
+++ b/deps/v8/src/objects/code.h
@@ -61,6 +61,7 @@ class Code : public HeapObject {
#ifdef ENABLE_DISASSEMBLER
const char* GetName(Isolate* isolate) const;
V8_EXPORT_PRIVATE void Disassemble(const char* name, std::ostream& os,
+ Isolate* isolate,
Address current_pc = kNullAddress);
#endif
diff --git a/deps/v8/src/objects/contexts-inl.h b/deps/v8/src/objects/contexts-inl.h
index 0c566dd081..669e98591f 100644
--- a/deps/v8/src/objects/contexts-inl.h
+++ b/deps/v8/src/objects/contexts-inl.h
@@ -13,6 +13,7 @@
#include "src/objects/js-objects-inl.h"
#include "src/objects/map-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/osr-optimized-code-cache-inl.h"
#include "src/objects/regexp-match-info.h"
#include "src/objects/scope-info.h"
#include "src/objects/shared-function-info.h"
@@ -47,10 +48,29 @@ Context ScriptContextTable::get_context(int i) const {
OBJECT_CONSTRUCTORS_IMPL(Context, HeapObject)
NEVER_READ_ONLY_SPACE_IMPL(Context)
CAST_ACCESSOR(Context)
-SMI_ACCESSORS(Context, length, kLengthOffset)
+
+SMI_ACCESSORS(Context, length_and_extension_flag, kLengthOffset)
+SYNCHRONIZED_SMI_ACCESSORS(Context, length_and_extension_flag, kLengthOffset)
CAST_ACCESSOR(NativeContext)
+int Context::length() const {
+ return LengthField::decode(length_and_extension_flag());
+}
+
+int Context::synchronized_length() const {
+ return LengthField::decode(synchronized_length_and_extension_flag());
+}
+
+void Context::initialize_length_and_extension_bit(int len,
+ Context::HasExtension flag) {
+ DCHECK(LengthField::is_valid(len));
+ int value = 0;
+ value = LengthField::update(value, len);
+ value = HasExtensionField::update(value, flag == Context::HasExtension::kYes);
+ set_length_and_extension_flag(value);
+}
+
Object Context::get(int index) const {
Isolate* isolate = GetIsolateForPtrCompr(*this);
return get(isolate, index);
@@ -94,11 +114,20 @@ void Context::set_previous(Context context) { set(PREVIOUS_INDEX, context); }
Object Context::next_context_link() { return get(Context::NEXT_CONTEXT_LINK); }
-bool Context::has_extension() { return !extension().IsTheHole(); }
+bool Context::has_extension() {
+ return static_cast<bool>(
+ HasExtensionField::decode(length_and_extension_flag())) &&
+ !extension().IsTheHole();
+}
+
HeapObject Context::extension() {
return HeapObject::cast(get(EXTENSION_INDEX));
}
-void Context::set_extension(HeapObject object) { set(EXTENSION_INDEX, object); }
+void Context::set_extension(HeapObject object) {
+ set(EXTENSION_INDEX, object);
+ synchronized_set_length_and_extension_flag(
+ HasExtensionField::update(length_and_extension_flag(), true));
+}
NativeContext Context::native_context() const {
Object result = get(NATIVE_CONTEXT_INDEX);
@@ -197,7 +226,7 @@ int Context::FunctionMapIndex(LanguageMode language_mode, FunctionKind kind,
base = IsAsyncFunction(kind) ? ASYNC_GENERATOR_FUNCTION_MAP_INDEX
: GENERATOR_FUNCTION_MAP_INDEX;
- } else if (IsAsyncFunction(kind)) {
+ } else if (IsAsyncFunction(kind) || IsAsyncModule(kind)) {
CHECK_FOLLOWS4(ASYNC_FUNCTION_MAP_INDEX, ASYNC_FUNCTION_WITH_NAME_MAP_INDEX,
ASYNC_FUNCTION_WITH_HOME_OBJECT_MAP_INDEX,
ASYNC_FUNCTION_WITH_NAME_AND_HOME_OBJECT_MAP_INDEX);
@@ -252,6 +281,10 @@ void NativeContext::set_microtask_queue(MicrotaskQueue* microtask_queue) {
reinterpret_cast<Address>(microtask_queue));
}
+OSROptimizedCodeCache NativeContext::GetOSROptimizedCodeCache() {
+ return OSROptimizedCodeCache::cast(osr_code_cache());
+}
+
OBJECT_CONSTRUCTORS_IMPL(NativeContext, Context)
} // namespace internal
diff --git a/deps/v8/src/objects/contexts.cc b/deps/v8/src/objects/contexts.cc
index 74fb4477b1..9dbba06a4d 100644
--- a/deps/v8/src/objects/contexts.cc
+++ b/deps/v8/src/objects/contexts.cc
@@ -39,12 +39,14 @@ Handle<ScriptContextTable> ScriptContextTable::Extend(
bool ScriptContextTable::Lookup(Isolate* isolate, ScriptContextTable table,
String name, LookupResult* result) {
DisallowHeapAllocation no_gc;
+ // Static variables cannot be in script contexts.
+ IsStaticFlag is_static_flag;
for (int i = 0; i < table.used(); i++) {
Context context = table.get_context(i);
DCHECK(context.IsScriptContext());
int slot_index = ScopeInfo::ContextSlotIndex(
context.scope_info(), name, &result->mode, &result->init_flag,
- &result->maybe_assigned_flag);
+ &result->maybe_assigned_flag, &is_static_flag);
if (slot_index >= 0) {
result->context_index = i;
@@ -129,10 +131,6 @@ JSGlobalProxy Context::global_proxy() {
return native_context().global_proxy_object();
}
-void Context::set_global_proxy(JSGlobalProxy object) {
- native_context().set_global_proxy_object(object);
-}
-
/**
* Lookups a property in an object environment, taking the unscopables into
* account. This is used For HasBinding spec algorithms for ObjectEnvironment.
@@ -175,7 +173,6 @@ Handle<Object> Context::Lookup(Handle<Context> context, Handle<String> name,
Isolate* isolate = context->GetIsolate();
bool follow_context_chain = (flags & FOLLOW_CONTEXT_CHAIN) != 0;
- bool failed_whitelist = false;
*index = kNotFound;
*attributes = ABSENT;
*init_flag = kCreatedInitialized;
@@ -287,8 +284,10 @@ Handle<Object> Context::Lookup(Handle<Context> context, Handle<String> name,
VariableMode mode;
InitializationFlag flag;
MaybeAssignedFlag maybe_assigned_flag;
- int slot_index = ScopeInfo::ContextSlotIndex(scope_info, *name, &mode,
- &flag, &maybe_assigned_flag);
+ IsStaticFlag is_static_flag;
+ int slot_index =
+ ScopeInfo::ContextSlotIndex(scope_info, *name, &mode, &flag,
+ &maybe_assigned_flag, &is_static_flag);
DCHECK(slot_index < 0 || slot_index >= MIN_CONTEXT_SLOTS);
if (slot_index >= 0) {
if (FLAG_trace_contexts) {
@@ -357,6 +356,17 @@ Handle<Object> Context::Lookup(Handle<Context> context, Handle<String> name,
return extension;
}
}
+
+ // Check blacklist. Names that are listed, cannot be resolved further.
+ Object blacklist = context->get(BLACK_LIST_INDEX);
+ if (blacklist.IsStringSet() &&
+ StringSet::cast(blacklist).Has(isolate, name)) {
+ if (FLAG_trace_contexts) {
+ PrintF(" - name is blacklisted. Aborting.\n");
+ }
+ break;
+ }
+
// Check the original context, but do not follow its context chain.
Object obj = context->get(WRAPPED_CONTEXT_INDEX);
if (obj.IsContext()) {
@@ -366,26 +376,12 @@ Handle<Object> Context::Lookup(Handle<Context> context, Handle<String> name,
attributes, init_flag, variable_mode);
if (!result.is_null()) return result;
}
- // Check whitelist. Names that do not pass whitelist shall only resolve
- // to with, script or native contexts up the context chain.
- obj = context->get(WHITE_LIST_INDEX);
- if (obj.IsStringSet()) {
- failed_whitelist =
- failed_whitelist || !StringSet::cast(obj).Has(isolate, name);
- }
}
// 3. Prepare to continue with the previous (next outermost) context.
if (context->IsNativeContext()) break;
- do {
- context = Handle<Context>(context->previous(), isolate);
- // If we come across a whitelist context, and the name is not
- // whitelisted, then only consider with, script, module or native
- // contexts.
- } while (failed_whitelist && !context->IsScriptContext() &&
- !context->IsNativeContext() && !context->IsWithContext() &&
- !context->IsModuleContext());
+ context = Handle<Context>(context->previous(), isolate);
} while (follow_context_chain);
if (FLAG_trace_contexts) {
diff --git a/deps/v8/src/objects/contexts.h b/deps/v8/src/objects/contexts.h
index a7b60ff7b9..7fa988be07 100644
--- a/deps/v8/src/objects/contexts.h
+++ b/deps/v8/src/objects/contexts.h
@@ -7,6 +7,7 @@
#include "src/objects/fixed-array.h"
#include "src/objects/function-kind.h"
+#include "src/objects/osr-optimized-code-cache.h"
#include "torque-generated/field-offsets-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -37,21 +38,23 @@ enum ContextLookupFlags {
// must always be allocated via Heap::AllocateContext() or
// Factory::NewContext.
-#define NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(V) \
- V(GENERATOR_NEXT_INTERNAL, JSFunction, generator_next_internal) \
- V(MAKE_ERROR_INDEX, JSFunction, make_error) \
- V(MAKE_RANGE_ERROR_INDEX, JSFunction, make_range_error) \
- V(MAKE_SYNTAX_ERROR_INDEX, JSFunction, make_syntax_error) \
- V(MAKE_TYPE_ERROR_INDEX, JSFunction, make_type_error) \
- V(MAKE_URI_ERROR_INDEX, JSFunction, make_uri_error) \
- V(OBJECT_CREATE, JSFunction, object_create) \
- V(REFLECT_APPLY_INDEX, JSFunction, reflect_apply) \
- V(REFLECT_CONSTRUCT_INDEX, JSFunction, reflect_construct) \
- V(MATH_FLOOR_INDEX, JSFunction, math_floor) \
- V(MATH_POW_INDEX, JSFunction, math_pow) \
- V(PROMISE_INTERNAL_CONSTRUCTOR_INDEX, JSFunction, \
- promise_internal_constructor) \
- V(IS_PROMISE_INDEX, JSFunction, is_promise) \
+#define NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(V) \
+ V(GENERATOR_NEXT_INTERNAL, JSFunction, generator_next_internal) \
+ V(ASYNC_MODULE_EVALUATE_INTERNAL, JSFunction, \
+ async_module_evaluate_internal) \
+ V(MAKE_ERROR_INDEX, JSFunction, make_error) \
+ V(MAKE_RANGE_ERROR_INDEX, JSFunction, make_range_error) \
+ V(MAKE_SYNTAX_ERROR_INDEX, JSFunction, make_syntax_error) \
+ V(MAKE_TYPE_ERROR_INDEX, JSFunction, make_type_error) \
+ V(MAKE_URI_ERROR_INDEX, JSFunction, make_uri_error) \
+ V(OBJECT_CREATE, JSFunction, object_create) \
+ V(REFLECT_APPLY_INDEX, JSFunction, reflect_apply) \
+ V(REFLECT_CONSTRUCT_INDEX, JSFunction, reflect_construct) \
+ V(MATH_FLOOR_INDEX, JSFunction, math_floor) \
+ V(MATH_POW_INDEX, JSFunction, math_pow) \
+ V(PROMISE_INTERNAL_CONSTRUCTOR_INDEX, JSFunction, \
+ promise_internal_constructor) \
+ V(IS_PROMISE_INDEX, JSFunction, is_promise) \
V(PROMISE_THEN_INDEX, JSFunction, promise_then)
#define NATIVE_CONTEXT_FIELDS(V) \
@@ -104,6 +107,8 @@ enum ContextLookupFlags {
V(CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, JSFunction, \
call_as_constructor_delegate) \
V(CALL_AS_FUNCTION_DELEGATE_INDEX, JSFunction, call_as_function_delegate) \
+ V(CALL_ASYNC_MODULE_FULFILLED, JSFunction, call_async_module_fulfilled) \
+ V(CALL_ASYNC_MODULE_REJECTED, JSFunction, call_async_module_rejected) \
V(CALLSITE_FUNCTION_INDEX, JSFunction, callsite_function) \
V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \
V(DATA_PROPERTY_DESCRIPTOR_MAP_INDEX, Map, data_property_descriptor_map) \
@@ -159,6 +164,11 @@ enum ContextLookupFlags {
V(INTL_NUMBER_FORMAT_FUNCTION_INDEX, JSFunction, \
intl_number_format_function) \
V(INTL_LOCALE_FUNCTION_INDEX, JSFunction, intl_locale_function) \
+ V(INTL_LIST_FORMAT_FUNCTION_INDEX, JSFunction, intl_list_format_function) \
+ V(INTL_PLURAL_RULES_FUNCTION_INDEX, JSFunction, intl_plural_rules_function) \
+ V(INTL_RELATIVE_TIME_FORMAT_FUNCTION_INDEX, JSFunction, \
+ intl_relative_time_format_function) \
+ V(INTL_SEGMENTER_FUNCTION_INDEX, JSFunction, intl_segmenter_function) \
V(INTL_SEGMENT_ITERATOR_MAP_INDEX, Map, intl_segment_iterator_map) \
V(ITERATOR_RESULT_MAP_INDEX, Map, iterator_result_map) \
V(JS_ARRAY_PACKED_SMI_ELEMENTS_MAP_INDEX, Map, \
@@ -233,6 +243,7 @@ enum ContextLookupFlags {
V(REGEXP_PROTOTYPE_MAP_INDEX, Map, regexp_prototype_map) \
V(REGEXP_REPLACE_FUNCTION_INDEX, JSFunction, regexp_replace_function) \
V(REGEXP_RESULT_MAP_INDEX, Map, regexp_result_map) \
+ V(REGEXP_RESULT_INDICES_MAP_INDEX, Map, regexp_result_indices_map) \
V(REGEXP_SEARCH_FUNCTION_INDEX, JSFunction, regexp_search_function) \
V(REGEXP_SPLIT_FUNCTION_INDEX, JSFunction, regexp_split_function) \
V(INITIAL_REGEXP_STRING_ITERATOR_PROTOTYPE_MAP_INDEX, Map, \
@@ -345,6 +356,7 @@ enum ContextLookupFlags {
V(WEAKMAP_SET_INDEX, JSFunction, weakmap_set) \
V(WEAKMAP_GET_INDEX, JSFunction, weakmap_get) \
V(WEAKSET_ADD_INDEX, JSFunction, weakset_add) \
+ V(OSR_CODE_CACHE_INDEX, WeakFixedArray, osr_code_cache) \
NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(V)
// A table of all script contexts. Every loaded top-level script with top-level
@@ -443,9 +455,19 @@ class Context : public HeapObject {
DECL_CAST(Context)
+ enum class HasExtension { kYes, kNo };
+
// [length]: length of the context.
V8_INLINE int length() const;
- V8_INLINE void set_length(int value);
+ V8_INLINE int synchronized_length() const;
+ V8_INLINE void initialize_length_and_extension_bit(
+ int len, HasExtension flag = HasExtension::kNo);
+
+ // We use the 30th bit. Otherwise if we set the 31st bit,
+ // the number would be pottentially bigger than an SMI.
+ // Any DCHECK(Smi::IsValue(...)) would fail.
+ using LengthField = BitField<int, 0, kSmiValueSize - 2>;
+ using HasExtensionField = BitField<int, kSmiValueSize - 2, 1>;
// Setter and getter for elements.
V8_INLINE Object get(int index) const;
@@ -458,18 +480,18 @@ class Context : public HeapObject {
TORQUE_GENERATED_CONTEXT_FIELDS)
// TODO(v8:8989): [torque] Support marker constants.
/* TODO(ishell): remove this fixedArray-like header size. */
- static const int kHeaderSize = kScopeInfoOffset;
+ static const int kFixedArrayLikeHeaderSize = kScopeInfoOffset;
static const int kStartOfTaggedFieldsOffset = kScopeInfoOffset;
/* Header size. */ \
/* TODO(ishell): use this as header size once MIN_CONTEXT_SLOTS */ \
/* is removed in favour of offset-based access to common fields. */ \
- static const int kTodoHeaderSize = kSize;
+ static const int kTodoHeaderSize = kHeaderSize;
// Garbage collection support.
V8_INLINE static constexpr int SizeFor(int length) {
// TODO(ishell): switch to kTodoHeaderSize based approach once we no longer
// reference common Context fields via index
- return kHeaderSize + length * kTaggedSize;
+ return kFixedArrayLikeHeaderSize + length * kTaggedSize;
}
// Code Generation support.
@@ -517,7 +539,7 @@ class Context : public HeapObject {
// These slots hold values in debug evaluate contexts.
WRAPPED_CONTEXT_INDEX = MIN_CONTEXT_SLOTS,
- WHITE_LIST_INDEX = MIN_CONTEXT_SLOTS + 1
+ BLACK_LIST_INDEX = MIN_CONTEXT_SLOTS + 1
};
// A region of native context entries containing maps for functions created
@@ -558,7 +580,6 @@ class Context : public HeapObject {
// Returns a JSGlobalProxy object or null.
V8_EXPORT_PRIVATE JSGlobalProxy global_proxy();
- void set_global_proxy(JSGlobalProxy global);
// Get the JSGlobalObject object.
V8_EXPORT_PRIVATE JSGlobalObject global_object();
@@ -652,6 +673,8 @@ class Context : public HeapObject {
#endif
OBJECT_CONSTRUCTORS(Context, HeapObject);
+ DECL_INT_ACCESSORS(length_and_extension_flag)
+ DECL_SYNCHRONIZED_INT_ACCESSORS(length_and_extension_flag)
};
class NativeContext : public Context {
@@ -696,6 +719,8 @@ class NativeContext : public Context {
void SetDeoptimizedCodeListHead(Object head);
Object DeoptimizedCodeListHead();
+ inline OSROptimizedCodeCache GetOSROptimizedCodeCache();
+
void ResetErrorsThrown();
void IncrementErrorsThrown();
int GetErrorsThrown();
diff --git a/deps/v8/src/objects/data-handler.h b/deps/v8/src/objects/data-handler.h
index 667b19b3d4..c9c0cf4cbc 100644
--- a/deps/v8/src/objects/data-handler.h
+++ b/deps/v8/src/objects/data-handler.h
@@ -41,7 +41,7 @@ class DataHandler : public Struct {
static const int kSizeWithData0 = kData1Offset;
static const int kSizeWithData1 = kData2Offset;
static const int kSizeWithData2 = kData3Offset;
- static const int kSizeWithData3 = kSize;
+ static const int kSizeWithData3 = kHeaderSize;
DECL_CAST(DataHandler)
diff --git a/deps/v8/src/objects/debug-objects-inl.h b/deps/v8/src/objects/debug-objects-inl.h
index 273f710c3b..8189481394 100644
--- a/deps/v8/src/objects/debug-objects-inl.h
+++ b/deps/v8/src/objects/debug-objects-inl.h
@@ -21,24 +21,16 @@ namespace internal {
OBJECT_CONSTRUCTORS_IMPL(BreakPoint, Tuple2)
OBJECT_CONSTRUCTORS_IMPL(BreakPointInfo, Tuple2)
OBJECT_CONSTRUCTORS_IMPL(CoverageInfo, FixedArray)
-OBJECT_CONSTRUCTORS_IMPL(DebugInfo, Struct)
+TQ_OBJECT_CONSTRUCTORS_IMPL(DebugInfo)
NEVER_READ_ONLY_SPACE_IMPL(DebugInfo)
CAST_ACCESSOR(BreakPointInfo)
-CAST_ACCESSOR(DebugInfo)
CAST_ACCESSOR(CoverageInfo)
CAST_ACCESSOR(BreakPoint)
-SMI_ACCESSORS(DebugInfo, flags, kFlagsOffset)
-ACCESSORS(DebugInfo, shared, SharedFunctionInfo, kSharedFunctionInfoOffset)
-SMI_ACCESSORS(DebugInfo, debugger_hints, kDebuggerHintsOffset)
-ACCESSORS(DebugInfo, script, Object, kScriptOffset)
-ACCESSORS(DebugInfo, original_bytecode_array, Object,
- kOriginalBytecodeArrayOffset)
-ACCESSORS(DebugInfo, debug_bytecode_array, Object, kDebugBytecodeArrayOffset)
-ACCESSORS(DebugInfo, break_points, FixedArray, kBreakPointsOffset)
-ACCESSORS(DebugInfo, coverage_info, Object, kCoverageInfoOffset)
+TQ_SMI_ACCESSORS(DebugInfo, flags)
+TQ_SMI_ACCESSORS(DebugInfo, debugger_hints)
BIT_FIELD_ACCESSORS(DebugInfo, debugger_hints, side_effect_state,
DebugInfo::SideEffectStateBits)
diff --git a/deps/v8/src/objects/debug-objects.h b/deps/v8/src/objects/debug-objects.h
index 243caaa526..39f42c1168 100644
--- a/deps/v8/src/objects/debug-objects.h
+++ b/deps/v8/src/objects/debug-objects.h
@@ -5,6 +5,8 @@
#ifndef V8_OBJECTS_DEBUG_OBJECTS_H_
#define V8_OBJECTS_DEBUG_OBJECTS_H_
+#include <memory>
+
#include "src/objects/fixed-array.h"
#include "src/objects/objects.h"
#include "src/objects/struct.h"
@@ -20,7 +22,7 @@ class BytecodeArray;
// The DebugInfo class holds additional information for a function being
// debugged.
-class DebugInfo : public Struct {
+class DebugInfo : public TorqueGeneratedDebugInfo<DebugInfo, Struct> {
public:
NEVER_READ_ONLY_SPACE
enum Flag {
@@ -38,15 +40,9 @@ class DebugInfo : public Struct {
// A bitfield that lists uses of the current instance.
DECL_INT_ACCESSORS(flags)
- // The shared function info for the source being debugged.
- DECL_ACCESSORS(shared, SharedFunctionInfo)
-
// Bit field containing various information collected for debugging.
DECL_INT_ACCESSORS(debugger_hints)
- // Script field from shared function info.
- DECL_ACCESSORS(script, Object)
-
// DebugInfo can be detached from the SharedFunctionInfo iff it is empty.
bool IsEmpty() const;
@@ -83,17 +79,6 @@ class DebugInfo : public Struct {
void ClearBreakAtEntry();
bool BreakAtEntry() const;
- // The original uninstrumented bytecode array for functions with break
- // points - the instrumented bytecode is held in the shared function info.
- DECL_ACCESSORS(original_bytecode_array, Object)
-
- // The debug instrumented bytecode array for functions with break points
- // - also pointed to by the shared function info.
- DECL_ACCESSORS(debug_bytecode_array, Object)
-
- // Fixed array holding status information for each active break point.
- DECL_ACCESSORS(break_points, FixedArray)
-
// Check if there is a break point at a source position.
bool HasBreakPoint(Isolate* isolate, int source_position);
// Attempt to clear a break point. Return true if successful.
@@ -160,17 +145,9 @@ class DebugInfo : public Struct {
// Clears all fields related to block coverage.
void ClearCoverageInfo(Isolate* isolate);
- DECL_ACCESSORS(coverage_info, Object)
-
- DECL_CAST(DebugInfo)
// Dispatched behavior.
DECL_PRINTER(DebugInfo)
- DECL_VERIFIER(DebugInfo)
-
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize,
- TORQUE_GENERATED_DEBUG_INFO_FIELDS)
static const int kEstimatedNofBreakPointsInFunction = 4;
@@ -178,7 +155,7 @@ class DebugInfo : public Struct {
// Get the break point info object for a source position.
Object GetBreakPointInfo(Isolate* isolate, int source_position);
- OBJECT_CONSTRUCTORS(DebugInfo, Struct);
+ TQ_OBJECT_CONSTRUCTORS(DebugInfo)
};
// The BreakPointInfo class holds information for break points set in a
diff --git a/deps/v8/src/objects/descriptor-array-inl.h b/deps/v8/src/objects/descriptor-array-inl.h
index e2805d795a..5ea14c1e60 100644
--- a/deps/v8/src/objects/descriptor-array-inl.h
+++ b/deps/v8/src/objects/descriptor-array-inl.h
@@ -58,33 +58,35 @@ void DescriptorArray::CopyEnumCacheFrom(DescriptorArray array) {
set_enum_cache(array.enum_cache());
}
-int DescriptorArray::Search(Name name, int valid_descriptors) {
+InternalIndex DescriptorArray::Search(Name name, int valid_descriptors) {
DCHECK(name.IsUniqueName());
- return internal::Search<VALID_ENTRIES>(this, name, valid_descriptors,
- nullptr);
+ return InternalIndex(
+ internal::Search<VALID_ENTRIES>(this, name, valid_descriptors, nullptr));
}
-int DescriptorArray::Search(Name name, Map map) {
+InternalIndex DescriptorArray::Search(Name name, Map map) {
DCHECK(name.IsUniqueName());
int number_of_own_descriptors = map.NumberOfOwnDescriptors();
- if (number_of_own_descriptors == 0) return kNotFound;
+ if (number_of_own_descriptors == 0) return InternalIndex::NotFound();
return Search(name, number_of_own_descriptors);
}
-int DescriptorArray::SearchWithCache(Isolate* isolate, Name name, Map map) {
+InternalIndex DescriptorArray::SearchWithCache(Isolate* isolate, Name name,
+ Map map) {
DCHECK(name.IsUniqueName());
int number_of_own_descriptors = map.NumberOfOwnDescriptors();
- if (number_of_own_descriptors == 0) return kNotFound;
+ if (number_of_own_descriptors == 0) return InternalIndex::NotFound();
DescriptorLookupCache* cache = isolate->descriptor_lookup_cache();
int number = cache->Lookup(map, name);
if (number == DescriptorLookupCache::kAbsent) {
- number = Search(name, number_of_own_descriptors);
+ InternalIndex result = Search(name, number_of_own_descriptors);
+ number = result.is_found() ? result.as_int() : DescriptorArray::kNotFound;
cache->Update(map, name, number);
}
-
- return number;
+ if (number == DescriptorArray::kNotFound) return InternalIndex::NotFound();
+ return InternalIndex(number);
}
ObjectSlot DescriptorArray::GetFirstPointerSlot() {
@@ -102,26 +104,27 @@ ObjectSlot DescriptorArray::GetDescriptorSlot(int descriptor) {
return RawField(OffsetOfDescriptorAt(descriptor));
}
-Name DescriptorArray::GetKey(int descriptor_number) const {
+Name DescriptorArray::GetKey(InternalIndex descriptor_number) const {
Isolate* isolate = GetIsolateForPtrCompr(*this);
return GetKey(isolate, descriptor_number);
}
-Name DescriptorArray::GetKey(Isolate* isolate, int descriptor_number) const {
- DCHECK_LT(descriptor_number, number_of_descriptors());
- int entry_offset = OffsetOfDescriptorAt(descriptor_number);
+Name DescriptorArray::GetKey(Isolate* isolate,
+ InternalIndex descriptor_number) const {
+ DCHECK_LT(descriptor_number.as_int(), number_of_descriptors());
+ int entry_offset = OffsetOfDescriptorAt(descriptor_number.as_int());
return Name::cast(EntryKeyField::Relaxed_Load(isolate, *this, entry_offset));
}
-void DescriptorArray::SetKey(int descriptor_number, Name key) {
- DCHECK_LT(descriptor_number, number_of_descriptors());
- int entry_offset = OffsetOfDescriptorAt(descriptor_number);
+void DescriptorArray::SetKey(InternalIndex descriptor_number, Name key) {
+ DCHECK_LT(descriptor_number.as_int(), number_of_descriptors());
+ int entry_offset = OffsetOfDescriptorAt(descriptor_number.as_int());
EntryKeyField::Relaxed_Store(*this, entry_offset, key);
WRITE_BARRIER(*this, entry_offset + kEntryKeyOffset, key);
}
int DescriptorArray::GetSortedKeyIndex(int descriptor_number) {
- return GetDetails(descriptor_number).pointer();
+ return GetDetails(InternalIndex(descriptor_number)).pointer();
}
Name DescriptorArray::GetSortedKey(int descriptor_number) {
@@ -130,81 +133,83 @@ Name DescriptorArray::GetSortedKey(int descriptor_number) {
}
Name DescriptorArray::GetSortedKey(Isolate* isolate, int descriptor_number) {
- return GetKey(isolate, GetSortedKeyIndex(descriptor_number));
+ return GetKey(isolate, InternalIndex(GetSortedKeyIndex(descriptor_number)));
}
void DescriptorArray::SetSortedKey(int descriptor_number, int pointer) {
- PropertyDetails details = GetDetails(descriptor_number);
- SetDetails(descriptor_number, details.set_pointer(pointer));
+ PropertyDetails details = GetDetails(InternalIndex(descriptor_number));
+ SetDetails(InternalIndex(descriptor_number), details.set_pointer(pointer));
}
-Object DescriptorArray::GetStrongValue(int descriptor_number) {
+Object DescriptorArray::GetStrongValue(InternalIndex descriptor_number) {
Isolate* isolate = GetIsolateForPtrCompr(*this);
return GetStrongValue(isolate, descriptor_number);
}
Object DescriptorArray::GetStrongValue(Isolate* isolate,
- int descriptor_number) {
+ InternalIndex descriptor_number) {
return GetValue(isolate, descriptor_number).cast<Object>();
}
-void DescriptorArray::SetValue(int descriptor_number, MaybeObject value) {
- DCHECK_LT(descriptor_number, number_of_descriptors());
- int entry_offset = OffsetOfDescriptorAt(descriptor_number);
+void DescriptorArray::SetValue(InternalIndex descriptor_number,
+ MaybeObject value) {
+ DCHECK_LT(descriptor_number.as_int(), number_of_descriptors());
+ int entry_offset = OffsetOfDescriptorAt(descriptor_number.as_int());
EntryValueField::Relaxed_Store(*this, entry_offset, value);
WEAK_WRITE_BARRIER(*this, entry_offset + kEntryValueOffset, value);
}
-MaybeObject DescriptorArray::GetValue(int descriptor_number) {
+MaybeObject DescriptorArray::GetValue(InternalIndex descriptor_number) {
Isolate* isolate = GetIsolateForPtrCompr(*this);
return GetValue(isolate, descriptor_number);
}
-MaybeObject DescriptorArray::GetValue(Isolate* isolate, int descriptor_number) {
- DCHECK_LT(descriptor_number, number_of_descriptors());
- int entry_offset = OffsetOfDescriptorAt(descriptor_number);
+MaybeObject DescriptorArray::GetValue(Isolate* isolate,
+ InternalIndex descriptor_number) {
+ DCHECK_LT(descriptor_number.as_int(), number_of_descriptors());
+ int entry_offset = OffsetOfDescriptorAt(descriptor_number.as_int());
return EntryValueField::Relaxed_Load(isolate, *this, entry_offset);
}
-PropertyDetails DescriptorArray::GetDetails(int descriptor_number) {
- DCHECK_LT(descriptor_number, number_of_descriptors());
- int entry_offset = OffsetOfDescriptorAt(descriptor_number);
+PropertyDetails DescriptorArray::GetDetails(InternalIndex descriptor_number) {
+ DCHECK_LT(descriptor_number.as_int(), number_of_descriptors());
+ int entry_offset = OffsetOfDescriptorAt(descriptor_number.as_int());
Smi details = EntryDetailsField::Relaxed_Load(*this, entry_offset);
return PropertyDetails(details);
}
-void DescriptorArray::SetDetails(int descriptor_number,
+void DescriptorArray::SetDetails(InternalIndex descriptor_number,
PropertyDetails details) {
- DCHECK_LT(descriptor_number, number_of_descriptors());
- int entry_offset = OffsetOfDescriptorAt(descriptor_number);
+ DCHECK_LT(descriptor_number.as_int(), number_of_descriptors());
+ int entry_offset = OffsetOfDescriptorAt(descriptor_number.as_int());
EntryDetailsField::Relaxed_Store(*this, entry_offset, details.AsSmi());
}
-int DescriptorArray::GetFieldIndex(int descriptor_number) {
+int DescriptorArray::GetFieldIndex(InternalIndex descriptor_number) {
DCHECK_EQ(GetDetails(descriptor_number).location(), kField);
return GetDetails(descriptor_number).field_index();
}
-FieldType DescriptorArray::GetFieldType(int descriptor_number) {
+FieldType DescriptorArray::GetFieldType(InternalIndex descriptor_number) {
Isolate* isolate = GetIsolateForPtrCompr(*this);
return GetFieldType(isolate, descriptor_number);
}
FieldType DescriptorArray::GetFieldType(Isolate* isolate,
- int descriptor_number) {
+ InternalIndex descriptor_number) {
DCHECK_EQ(GetDetails(descriptor_number).location(), kField);
MaybeObject wrapped_type = GetValue(isolate, descriptor_number);
return Map::UnwrapFieldType(wrapped_type);
}
-void DescriptorArray::Set(int descriptor_number, Name key, MaybeObject value,
- PropertyDetails details) {
+void DescriptorArray::Set(InternalIndex descriptor_number, Name key,
+ MaybeObject value, PropertyDetails details) {
SetKey(descriptor_number, key);
SetDetails(descriptor_number, details);
SetValue(descriptor_number, value);
}
-void DescriptorArray::Set(int descriptor_number, Descriptor* desc) {
+void DescriptorArray::Set(InternalIndex descriptor_number, Descriptor* desc) {
Name key = *desc->GetKey();
MaybeObject value = *desc->GetValue();
Set(descriptor_number, key, value, desc->GetDetails());
@@ -215,7 +220,7 @@ void DescriptorArray::Append(Descriptor* desc) {
int descriptor_number = number_of_descriptors();
DCHECK_LE(descriptor_number + 1, number_of_all_descriptors());
set_number_of_descriptors(descriptor_number + 1);
- Set(descriptor_number, desc);
+ Set(InternalIndex(descriptor_number), desc);
uint32_t hash = desc->GetKey()->Hash();
diff --git a/deps/v8/src/objects/descriptor-array.h b/deps/v8/src/objects/descriptor-array.h
index 0f17cd22ea..73b94b7cfa 100644
--- a/deps/v8/src/objects/descriptor-array.h
+++ b/deps/v8/src/objects/descriptor-array.h
@@ -6,6 +6,8 @@
#define V8_OBJECTS_DESCRIPTOR_ARRAY_H_
#include "src/objects/fixed-array.h"
+// TODO(jkummerow): Consider forward-declaring instead.
+#include "src/objects/internal-index.h"
#include "src/objects/objects.h"
#include "src/objects/struct.h"
#include "src/utils/utils.h"
@@ -62,27 +64,29 @@ class DescriptorArray : public HeapObject {
Handle<FixedArray> indices);
// Accessors for fetching instance descriptor at descriptor number.
- inline Name GetKey(int descriptor_number) const;
- inline Name GetKey(Isolate* isolate, int descriptor_number) const;
- inline Object GetStrongValue(int descriptor_number);
- inline Object GetStrongValue(Isolate* isolate, int descriptor_number);
- inline MaybeObject GetValue(int descriptor_number);
- inline MaybeObject GetValue(Isolate* isolate, int descriptor_number);
- inline PropertyDetails GetDetails(int descriptor_number);
- inline int GetFieldIndex(int descriptor_number);
- inline FieldType GetFieldType(int descriptor_number);
- inline FieldType GetFieldType(Isolate* isolate, int descriptor_number);
+ inline Name GetKey(InternalIndex descriptor_number) const;
+ inline Name GetKey(Isolate* isolate, InternalIndex descriptor_number) const;
+ inline Object GetStrongValue(InternalIndex descriptor_number);
+ inline Object GetStrongValue(Isolate* isolate,
+ InternalIndex descriptor_number);
+ inline MaybeObject GetValue(InternalIndex descriptor_number);
+ inline MaybeObject GetValue(Isolate* isolate,
+ InternalIndex descriptor_number);
+ inline PropertyDetails GetDetails(InternalIndex descriptor_number);
+ inline int GetFieldIndex(InternalIndex descriptor_number);
+ inline FieldType GetFieldType(InternalIndex descriptor_number);
+ inline FieldType GetFieldType(Isolate* isolate,
+ InternalIndex descriptor_number);
inline Name GetSortedKey(int descriptor_number);
inline Name GetSortedKey(Isolate* isolate, int descriptor_number);
inline int GetSortedKeyIndex(int descriptor_number);
- inline void SetSortedKey(int pointer, int descriptor_number);
// Accessor for complete descriptor.
- inline void Set(int descriptor_number, Descriptor* desc);
- inline void Set(int descriptor_number, Name key, MaybeObject value,
+ inline void Set(InternalIndex descriptor_number, Descriptor* desc);
+ inline void Set(InternalIndex descriptor_number, Name key, MaybeObject value,
PropertyDetails details);
- void Replace(int descriptor_number, Descriptor* descriptor);
+ void Replace(InternalIndex descriptor_number, Descriptor* descriptor);
// Generalizes constness, representation and field type of all field
// descriptors.
@@ -109,20 +113,20 @@ class DescriptorArray : public HeapObject {
void Sort();
// Search the instance descriptors for given name.
- V8_INLINE int Search(Name name, int number_of_own_descriptors);
- V8_INLINE int Search(Name name, Map map);
+ V8_INLINE InternalIndex Search(Name name, int number_of_own_descriptors);
+ V8_INLINE InternalIndex Search(Name name, Map map);
// As the above, but uses DescriptorLookupCache and updates it when
// necessary.
- V8_INLINE int SearchWithCache(Isolate* isolate, Name name, Map map);
+ V8_INLINE InternalIndex SearchWithCache(Isolate* isolate, Name name, Map map);
bool IsEqualUpTo(DescriptorArray desc, int nof_descriptors);
// Allocates a DescriptorArray, but returns the singleton
// empty descriptor array object if number_of_descriptors is 0.
- V8_EXPORT_PRIVATE static Handle<DescriptorArray> Allocate(
- Isolate* isolate, int nof_descriptors, int slack,
- AllocationType allocation = AllocationType::kYoung);
+ V8_EXPORT_PRIVATE static Handle<DescriptorArray> Allocate(Isolate* isolate,
+ int nof_descriptors,
+ int slack);
void Initialize(EnumCache enum_cache, HeapObject undefined_value,
int nof_descriptors, int slack);
@@ -176,7 +180,7 @@ class DescriptorArray : public HeapObject {
// Print all the descriptors.
void PrintDescriptors(std::ostream& os);
- void PrintDescriptorDetails(std::ostream& os, int descriptor,
+ void PrintDescriptorDetails(std::ostream& os, InternalIndex descriptor,
PropertyDetails::PrintMode mode);
DECL_PRINTER(DescriptorArray)
@@ -210,13 +214,16 @@ class DescriptorArray : public HeapObject {
private:
DECL_INT16_ACCESSORS(filler16bits)
- inline void SetKey(int descriptor_number, Name key);
- inline void SetValue(int descriptor_number, MaybeObject value);
- inline void SetDetails(int descriptor_number, PropertyDetails details);
+ inline void SetKey(InternalIndex descriptor_number, Name key);
+ inline void SetValue(InternalIndex descriptor_number, MaybeObject value);
+ inline void SetDetails(InternalIndex descriptor_number,
+ PropertyDetails details);
// Transfer a complete descriptor from the src descriptor array to this
// descriptor array.
- void CopyFrom(int index, DescriptorArray src);
+ void CopyFrom(InternalIndex index, DescriptorArray src);
+
+ inline void SetSortedKey(int pointer, int descriptor_number);
// Swap first and second descriptor.
inline void SwapSortedKeys(int first, int second);
diff --git a/deps/v8/src/objects/elements.cc b/deps/v8/src/objects/elements.cc
index 6e5648d2f4..686f1a9b1a 100644
--- a/deps/v8/src/objects/elements.cc
+++ b/deps/v8/src/objects/elements.cc
@@ -8,6 +8,7 @@
#include "src/execution/arguments.h"
#include "src/execution/frames.h"
#include "src/execution/isolate-inl.h"
+#include "src/execution/protectors-inl.h"
#include "src/heap/factory.h"
#include "src/heap/heap-inl.h" // For MaxNumberToStringCacheSize.
#include "src/heap/heap-write-barrier-inl.h"
@@ -509,11 +510,11 @@ Maybe<int64_t> IndexOfValueSlowPath(Isolate* isolate, Handle<JSObject> receiver,
// that take an entry (instead of an index) as an argument.
class InternalElementsAccessor : public ElementsAccessor {
public:
- uint32_t GetEntryForIndex(Isolate* isolate, JSObject holder,
- FixedArrayBase backing_store,
- uint32_t index) override = 0;
+ InternalIndex GetEntryForIndex(Isolate* isolate, JSObject holder,
+ FixedArrayBase backing_store,
+ uint32_t index) override = 0;
- PropertyDetails GetDetails(JSObject holder, uint32_t entry) override = 0;
+ PropertyDetails GetDetails(JSObject holder, InternalIndex entry) override = 0;
};
// Base class for element handler implementations. Contains the
@@ -594,16 +595,17 @@ class ElementsAccessorBase : public InternalElementsAccessor {
FixedArrayBase backing_store,
PropertyFilter filter = ALL_PROPERTIES) {
return Subclass::GetEntryForIndexImpl(isolate, holder, backing_store, index,
- filter) != kMaxUInt32;
+ filter)
+ .is_found();
}
- bool HasEntry(JSObject holder, uint32_t entry) final {
+ bool HasEntry(JSObject holder, InternalIndex entry) final {
return Subclass::HasEntryImpl(holder.GetIsolate(), holder.elements(),
entry);
}
static bool HasEntryImpl(Isolate* isolate, FixedArrayBase backing_store,
- uint32_t entry) {
+ InternalIndex entry) {
UNIMPLEMENTED();
}
@@ -615,33 +617,33 @@ class ElementsAccessorBase : public InternalElementsAccessor {
return false;
}
- Handle<Object> Get(Handle<JSObject> holder, uint32_t entry) final {
+ Handle<Object> Get(Handle<JSObject> holder, InternalIndex entry) final {
return Subclass::GetInternalImpl(holder, entry);
}
static Handle<Object> GetInternalImpl(Handle<JSObject> holder,
- uint32_t entry) {
+ InternalIndex entry) {
return Subclass::GetImpl(holder->GetIsolate(), holder->elements(), entry);
}
static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase backing_store,
- uint32_t entry) {
+ InternalIndex entry) {
uint32_t index = GetIndexForEntryImpl(backing_store, entry);
return handle(BackingStore::cast(backing_store).get(index), isolate);
}
- void Set(Handle<JSObject> holder, uint32_t entry, Object value) final {
+ void Set(Handle<JSObject> holder, InternalIndex entry, Object value) final {
Subclass::SetImpl(holder, entry, value);
}
void Reconfigure(Handle<JSObject> object, Handle<FixedArrayBase> store,
- uint32_t entry, Handle<Object> value,
+ InternalIndex entry, Handle<Object> value,
PropertyAttributes attributes) final {
Subclass::ReconfigureImpl(object, store, entry, value, attributes);
}
static void ReconfigureImpl(Handle<JSObject> object,
- Handle<FixedArrayBase> store, uint32_t entry,
+ Handle<FixedArrayBase> store, InternalIndex entry,
Handle<Object> value,
PropertyAttributes attributes) {
UNREACHABLE();
@@ -917,7 +919,7 @@ class ElementsAccessorBase : public InternalElementsAccessor {
return true;
}
- void Delete(Handle<JSObject> obj, uint32_t entry) final {
+ void Delete(Handle<JSObject> obj, InternalIndex entry) final {
Subclass::DeleteImpl(obj, entry);
}
@@ -1024,9 +1026,9 @@ class ElementsAccessorBase : public InternalElementsAccessor {
if (!key->ToUint32(&index)) continue;
DCHECK_EQ(object->GetElementsKind(), original_elements_kind);
- uint32_t entry = Subclass::GetEntryForIndexImpl(
+ InternalIndex entry = Subclass::GetEntryForIndexImpl(
isolate, *object, object->elements(), index, filter);
- if (entry == kMaxUInt32) continue;
+ if (entry.is_not_found()) continue;
PropertyDetails details = Subclass::GetDetailsImpl(*object, entry);
Handle<Object> value;
@@ -1053,9 +1055,9 @@ class ElementsAccessorBase : public InternalElementsAccessor {
InternalElementsAccessor* accessor =
reinterpret_cast<InternalElementsAccessor*>(
object->GetElementsAccessor());
- uint32_t entry = accessor->GetEntryForIndex(isolate, *object,
- object->elements(), index);
- if (entry == kMaxUInt32) continue;
+ InternalIndex entry = accessor->GetEntryForIndex(
+ isolate, *object, object->elements(), index);
+ if (entry.is_not_found()) continue;
PropertyDetails details = accessor->GetDetails(*object, entry);
if (!details.IsEnumerable()) continue;
}
@@ -1280,43 +1282,44 @@ class ElementsAccessorBase : public InternalElementsAccessor {
void Reverse(JSObject receiver) final { Subclass::ReverseImpl(receiver); }
static uint32_t GetIndexForEntryImpl(FixedArrayBase backing_store,
- uint32_t entry) {
- return entry;
+ InternalIndex entry) {
+ return entry.as_uint32();
}
- static uint32_t GetEntryForIndexImpl(Isolate* isolate, JSObject holder,
- FixedArrayBase backing_store,
- uint32_t index, PropertyFilter filter) {
+ static InternalIndex GetEntryForIndexImpl(Isolate* isolate, JSObject holder,
+ FixedArrayBase backing_store,
+ uint32_t index,
+ PropertyFilter filter) {
DCHECK(IsFastElementsKind(kind()) ||
IsAnyNonextensibleElementsKind(kind()));
uint32_t length = Subclass::GetMaxIndex(holder, backing_store);
if (IsHoleyElementsKindForRead(kind())) {
return index < length && !BackingStore::cast(backing_store)
.is_the_hole(isolate, index)
- ? index
- : kMaxUInt32;
+ ? InternalIndex(index)
+ : InternalIndex::NotFound();
} else {
- return index < length ? index : kMaxUInt32;
+ return index < length ? InternalIndex(index) : InternalIndex::NotFound();
}
}
- uint32_t GetEntryForIndex(Isolate* isolate, JSObject holder,
- FixedArrayBase backing_store,
- uint32_t index) final {
+ InternalIndex GetEntryForIndex(Isolate* isolate, JSObject holder,
+ FixedArrayBase backing_store,
+ uint32_t index) final {
return Subclass::GetEntryForIndexImpl(isolate, holder, backing_store, index,
ALL_PROPERTIES);
}
static PropertyDetails GetDetailsImpl(FixedArrayBase backing_store,
- uint32_t entry) {
+ InternalIndex entry) {
return PropertyDetails(kData, NONE, PropertyCellType::kNoCell);
}
- static PropertyDetails GetDetailsImpl(JSObject holder, uint32_t entry) {
+ static PropertyDetails GetDetailsImpl(JSObject holder, InternalIndex entry) {
return PropertyDetails(kData, NONE, PropertyCellType::kNoCell);
}
- PropertyDetails GetDetails(JSObject holder, uint32_t entry) final {
+ PropertyDetails GetDetails(JSObject holder, InternalIndex entry) final {
return Subclass::GetDetailsImpl(holder, entry);
}
@@ -1419,10 +1422,11 @@ class DictionaryElementsAccessor
UNREACHABLE();
}
- static void DeleteImpl(Handle<JSObject> obj, uint32_t entry) {
+ static void DeleteImpl(Handle<JSObject> obj, InternalIndex entry) {
Handle<NumberDictionary> dict(NumberDictionary::cast(obj->elements()),
obj->GetIsolate());
- dict = NumberDictionary::DeleteEntry(obj->GetIsolate(), dict, entry);
+ dict =
+ NumberDictionary::DeleteEntry(obj->GetIsolate(), dict, entry.as_int());
obj->set_elements(*dict);
}
@@ -1441,38 +1445,38 @@ class DictionaryElementsAccessor
return false;
}
- static Object GetRaw(FixedArrayBase store, uint32_t entry) {
+ static Object GetRaw(FixedArrayBase store, InternalIndex entry) {
NumberDictionary backing_store = NumberDictionary::cast(store);
- return backing_store.ValueAt(entry);
+ return backing_store.ValueAt(entry.as_int());
}
static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase backing_store,
- uint32_t entry) {
+ InternalIndex entry) {
return handle(GetRaw(backing_store, entry), isolate);
}
- static inline void SetImpl(Handle<JSObject> holder, uint32_t entry,
+ static inline void SetImpl(Handle<JSObject> holder, InternalIndex entry,
Object value) {
SetImpl(holder->elements(), entry, value);
}
- static inline void SetImpl(FixedArrayBase backing_store, uint32_t entry,
+ static inline void SetImpl(FixedArrayBase backing_store, InternalIndex entry,
Object value) {
- NumberDictionary::cast(backing_store).ValueAtPut(entry, value);
+ NumberDictionary::cast(backing_store).ValueAtPut(entry.as_int(), value);
}
static void ReconfigureImpl(Handle<JSObject> object,
- Handle<FixedArrayBase> store, uint32_t entry,
+ Handle<FixedArrayBase> store, InternalIndex entry,
Handle<Object> value,
PropertyAttributes attributes) {
NumberDictionary dictionary = NumberDictionary::cast(*store);
if (attributes != NONE) object->RequireSlowElements(dictionary);
- dictionary.ValueAtPut(entry, *value);
- PropertyDetails details = dictionary.DetailsAt(entry);
+ dictionary.ValueAtPut(entry.as_int(), *value);
+ PropertyDetails details = dictionary.DetailsAt(entry.as_int());
details = PropertyDetails(kData, attributes, PropertyCellType::kNoCell,
details.dictionary_index());
- dictionary.DetailsAtPut(object->GetIsolate(), entry, details);
+ dictionary.DetailsAtPut(object->GetIsolate(), entry.as_int(), details);
}
static void AddImpl(Handle<JSObject> object, uint32_t index,
@@ -1493,43 +1497,47 @@ class DictionaryElementsAccessor
}
static bool HasEntryImpl(Isolate* isolate, FixedArrayBase store,
- uint32_t entry) {
+ InternalIndex entry) {
DisallowHeapAllocation no_gc;
NumberDictionary dict = NumberDictionary::cast(store);
- Object index = dict.KeyAt(entry);
+ Object index = dict.KeyAt(entry.as_int());
return !index.IsTheHole(isolate);
}
- static uint32_t GetIndexForEntryImpl(FixedArrayBase store, uint32_t entry) {
+ static uint32_t GetIndexForEntryImpl(FixedArrayBase store,
+ InternalIndex entry) {
DisallowHeapAllocation no_gc;
NumberDictionary dict = NumberDictionary::cast(store);
uint32_t result = 0;
- CHECK(dict.KeyAt(entry).ToArrayIndex(&result));
+ CHECK(dict.KeyAt(entry.as_int()).ToArrayIndex(&result));
return result;
}
- static uint32_t GetEntryForIndexImpl(Isolate* isolate, JSObject holder,
- FixedArrayBase store, uint32_t index,
- PropertyFilter filter) {
+ static InternalIndex GetEntryForIndexImpl(Isolate* isolate, JSObject holder,
+ FixedArrayBase store,
+ uint32_t index,
+ PropertyFilter filter) {
DisallowHeapAllocation no_gc;
NumberDictionary dictionary = NumberDictionary::cast(store);
int entry = dictionary.FindEntry(isolate, index);
- if (entry == NumberDictionary::kNotFound) return kMaxUInt32;
+ if (entry == NumberDictionary::kNotFound) {
+ return InternalIndex::NotFound();
+ }
if (filter != ALL_PROPERTIES) {
PropertyDetails details = dictionary.DetailsAt(entry);
PropertyAttributes attr = details.attributes();
- if ((attr & filter) != 0) return kMaxUInt32;
+ if ((attr & filter) != 0) return InternalIndex::NotFound();
}
- return static_cast<uint32_t>(entry);
+ return InternalIndex(entry);
}
- static PropertyDetails GetDetailsImpl(JSObject holder, uint32_t entry) {
+ static PropertyDetails GetDetailsImpl(JSObject holder, InternalIndex entry) {
return GetDetailsImpl(holder.elements(), entry);
}
static PropertyDetails GetDetailsImpl(FixedArrayBase backing_store,
- uint32_t entry) {
- return NumberDictionary::cast(backing_store).DetailsAt(entry);
+ InternalIndex entry) {
+ return NumberDictionary::cast(backing_store).DetailsAt(entry.as_int());
}
static uint32_t FilterKey(Handle<NumberDictionary> dictionary, int entry,
@@ -1688,7 +1696,8 @@ class DictionaryElementsAccessor
continue;
}
- PropertyDetails details = GetDetailsImpl(*dictionary, entry);
+ PropertyDetails details =
+ GetDetailsImpl(*dictionary, InternalIndex(entry));
switch (details.kind()) {
case kData: {
Object element_k = dictionary->ValueAt(entry);
@@ -1757,7 +1766,8 @@ class DictionaryElementsAccessor
int entry = dictionary->FindEntry(isolate, k);
if (entry == NumberDictionary::kNotFound) continue;
- PropertyDetails details = GetDetailsImpl(*dictionary, entry);
+ PropertyDetails details =
+ GetDetailsImpl(*dictionary, InternalIndex(entry));
switch (details.kind()) {
case kData: {
Object element_k = dictionary->ValueAt(entry);
@@ -1863,7 +1873,8 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
if (BackingStore::cast(*store).is_the_hole(isolate, i)) continue;
}
max_number_key = i;
- Handle<Object> value = Subclass::GetImpl(isolate, *store, i);
+ Handle<Object> value =
+ Subclass::GetImpl(isolate, *store, InternalIndex(i));
dictionary =
NumberDictionary::Add(isolate, dictionary, i, value, details);
j++;
@@ -1971,11 +1982,12 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
}
static void ReconfigureImpl(Handle<JSObject> object,
- Handle<FixedArrayBase> store, uint32_t entry,
+ Handle<FixedArrayBase> store, InternalIndex entry,
Handle<Object> value,
PropertyAttributes attributes) {
Handle<NumberDictionary> dictionary = JSObject::NormalizeElements(object);
- entry = dictionary->FindEntry(object->GetIsolate(), entry);
+ entry = InternalIndex(
+ dictionary->FindEntry(object->GetIsolate(), entry.as_uint32()));
DictionaryElementsAccessor::ReconfigureImpl(object, dictionary, entry,
value, attributes);
}
@@ -2000,10 +2012,10 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
JSObject::EnsureWritableFastElements(object);
}
}
- Subclass::SetImpl(object, index, *value);
+ Subclass::SetImpl(object, InternalIndex(index), *value);
}
- static void DeleteImpl(Handle<JSObject> obj, uint32_t entry) {
+ static void DeleteImpl(Handle<JSObject> obj, InternalIndex entry) {
ElementsKind kind = KindTraits::Kind;
if (IsFastPackedElementsKind(kind) ||
kind == PACKED_NONEXTENSIBLE_ELEMENTS) {
@@ -2013,12 +2025,14 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
IsNonextensibleElementsKind(kind)) {
JSObject::EnsureWritableFastElements(obj);
}
- DeleteCommon(obj, entry, handle(obj->elements(), obj->GetIsolate()));
+ DeleteCommon(obj, entry.as_uint32(),
+ handle(obj->elements(), obj->GetIsolate()));
}
static bool HasEntryImpl(Isolate* isolate, FixedArrayBase backing_store,
- uint32_t entry) {
- return !BackingStore::cast(backing_store).is_the_hole(isolate, entry);
+ InternalIndex entry) {
+ return !BackingStore::cast(backing_store)
+ .is_the_hole(isolate, entry.as_int());
}
static uint32_t NumberOfElementsImpl(JSObject receiver,
@@ -2028,7 +2042,9 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
Isolate* isolate = receiver.GetIsolate();
uint32_t count = 0;
for (uint32_t i = 0; i < max_index; i++) {
- if (Subclass::HasEntryImpl(isolate, backing_store, i)) count++;
+ if (Subclass::HasEntryImpl(isolate, backing_store, InternalIndex(i))) {
+ count++;
+ }
}
return count;
}
@@ -2041,9 +2057,9 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
uint32_t length = Subclass::GetMaxNumberOfEntries(*receiver, *elements);
for (uint32_t i = 0; i < length; i++) {
if (IsFastPackedElementsKind(KindTraits::Kind) ||
- HasEntryImpl(isolate, *elements, i)) {
+ HasEntryImpl(isolate, *elements, InternalIndex(i))) {
RETURN_FAILURE_IF_NOT_SUCCESSFUL(accumulator->AddKey(
- Subclass::GetImpl(isolate, *elements, i), convert));
+ Subclass::GetImpl(isolate, *elements, InternalIndex(i)), convert));
}
}
return ExceptionStatus::kSuccess;
@@ -2157,7 +2173,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
DCHECK_LE(end, Subclass::GetCapacityImpl(*receiver, receiver->elements()));
for (uint32_t index = start; index < end; ++index) {
- Subclass::SetImpl(receiver, index, *obj_value);
+ Subclass::SetImpl(receiver, InternalIndex(index), *obj_value);
}
return *receiver;
}
@@ -2311,9 +2327,10 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
Handle<FixedArray> result = isolate->factory()->NewFixedArray(length);
Handle<FixedArrayBase> elements(object->elements(), isolate);
for (uint32_t i = 0; i < length; i++) {
- if (!Subclass::HasElementImpl(isolate, *object, i, *elements)) continue;
+ InternalIndex entry(i);
+ if (!Subclass::HasEntryImpl(isolate, *elements, entry)) continue;
Handle<Object> value;
- value = Subclass::GetImpl(isolate, *elements, i);
+ value = Subclass::GetImpl(isolate, *elements, entry);
if (value->IsName()) {
value = isolate->factory()->InternalizeName(Handle<Name>::cast(value));
}
@@ -2336,7 +2353,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
int new_length = length - 1;
int remove_index = remove_position == AT_START ? 0 : new_length;
Handle<Object> result =
- Subclass::GetImpl(isolate, *backing_store, remove_index);
+ Subclass::GetImpl(isolate, *backing_store, InternalIndex(remove_index));
if (remove_position == AT_START) {
Subclass::MoveElements(isolate, receiver, backing_store, 0, 1, new_length,
0, 0);
@@ -2396,7 +2413,8 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
for (uint32_t i = 0; i < copy_size; i++) {
Object argument = (*args)[src_index + i];
DCHECK(!argument.IsTheHole());
- Subclass::SetImpl(raw_backing_store, dst_index + i, argument, mode);
+ Subclass::SetImpl(raw_backing_store, InternalIndex(dst_index + i),
+ argument, mode);
}
}
};
@@ -2405,22 +2423,22 @@ template <typename Subclass, typename KindTraits>
class FastSmiOrObjectElementsAccessor
: public FastElementsAccessor<Subclass, KindTraits> {
public:
- static inline void SetImpl(Handle<JSObject> holder, uint32_t entry,
+ static inline void SetImpl(Handle<JSObject> holder, InternalIndex entry,
Object value) {
SetImpl(holder->elements(), entry, value);
}
- static inline void SetImpl(FixedArrayBase backing_store, uint32_t entry,
+ static inline void SetImpl(FixedArrayBase backing_store, InternalIndex entry,
Object value) {
- FixedArray::cast(backing_store).set(entry, value);
+ FixedArray::cast(backing_store).set(entry.as_int(), value);
}
- static inline void SetImpl(FixedArrayBase backing_store, uint32_t entry,
+ static inline void SetImpl(FixedArrayBase backing_store, InternalIndex entry,
Object value, WriteBarrierMode mode) {
- FixedArray::cast(backing_store).set(entry, value, mode);
+ FixedArray::cast(backing_store).set(entry.as_int(), value, mode);
}
- static Object GetRaw(FixedArray backing_store, uint32_t entry) {
+ static Object GetRaw(FixedArray backing_store, InternalIndex entry) {
uint32_t index = Subclass::GetIndexForEntryImpl(backing_store, entry);
return backing_store.get(index);
}
@@ -2488,8 +2506,9 @@ class FastSmiOrObjectElementsAccessor
isolate);
uint32_t length = elements->length();
for (uint32_t index = 0; index < length; ++index) {
- if (!Subclass::HasEntryImpl(isolate, *elements, index)) continue;
- Handle<Object> value = Subclass::GetImpl(isolate, *elements, index);
+ InternalIndex entry(index);
+ if (!Subclass::HasEntryImpl(isolate, *elements, entry)) continue;
+ Handle<Object> value = Subclass::GetImpl(isolate, *elements, entry);
value = MakeEntryPair(isolate, index, value);
values_or_entries->set(count++, *value);
}
@@ -2499,8 +2518,9 @@ class FastSmiOrObjectElementsAccessor
FixedArray elements = FixedArray::cast(object->elements());
uint32_t length = elements.length();
for (uint32_t index = 0; index < length; ++index) {
- if (!Subclass::HasEntryImpl(isolate, elements, index)) continue;
- Object value = GetRaw(elements, index);
+ InternalIndex entry(index);
+ if (!Subclass::HasEntryImpl(isolate, elements, entry)) continue;
+ Object value = GetRaw(elements, entry);
values_or_entries->set(count++, value);
}
}
@@ -2641,7 +2661,7 @@ class FastSealedObjectElementsAccessor
UNREACHABLE();
}
- static void DeleteImpl(Handle<JSObject> obj, uint32_t entry) {
+ static void DeleteImpl(Handle<JSObject> obj, InternalIndex entry) {
UNREACHABLE();
}
@@ -2733,17 +2753,17 @@ class FastFrozenObjectElementsAccessor
public:
using BackingStore = typename KindTraits::BackingStore;
- static inline void SetImpl(Handle<JSObject> holder, uint32_t entry,
+ static inline void SetImpl(Handle<JSObject> holder, InternalIndex entry,
Object value) {
UNREACHABLE();
}
- static inline void SetImpl(FixedArrayBase backing_store, uint32_t entry,
+ static inline void SetImpl(FixedArrayBase backing_store, InternalIndex entry,
Object value) {
UNREACHABLE();
}
- static inline void SetImpl(FixedArrayBase backing_store, uint32_t entry,
+ static inline void SetImpl(FixedArrayBase backing_store, InternalIndex entry,
Object value, WriteBarrierMode mode) {
UNREACHABLE();
}
@@ -2753,7 +2773,7 @@ class FastFrozenObjectElementsAccessor
UNREACHABLE();
}
- static void DeleteImpl(Handle<JSObject> obj, uint32_t entry) {
+ static void DeleteImpl(Handle<JSObject> obj, InternalIndex entry) {
UNREACHABLE();
}
@@ -2787,7 +2807,7 @@ class FastFrozenObjectElementsAccessor
}
static void ReconfigureImpl(Handle<JSObject> object,
- Handle<FixedArrayBase> store, uint32_t entry,
+ Handle<FixedArrayBase> store, InternalIndex entry,
Handle<Object> value,
PropertyAttributes attributes) {
UNREACHABLE();
@@ -2816,24 +2836,24 @@ class FastDoubleElementsAccessor
: public FastElementsAccessor<Subclass, KindTraits> {
public:
static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase backing_store,
- uint32_t entry) {
- return FixedDoubleArray::get(FixedDoubleArray::cast(backing_store), entry,
- isolate);
+ InternalIndex entry) {
+ return FixedDoubleArray::get(FixedDoubleArray::cast(backing_store),
+ entry.as_int(), isolate);
}
- static inline void SetImpl(Handle<JSObject> holder, uint32_t entry,
+ static inline void SetImpl(Handle<JSObject> holder, InternalIndex entry,
Object value) {
SetImpl(holder->elements(), entry, value);
}
- static inline void SetImpl(FixedArrayBase backing_store, uint32_t entry,
+ static inline void SetImpl(FixedArrayBase backing_store, InternalIndex entry,
Object value) {
- FixedDoubleArray::cast(backing_store).set(entry, value.Number());
+ FixedDoubleArray::cast(backing_store).set(entry.as_int(), value.Number());
}
- static inline void SetImpl(FixedArrayBase backing_store, uint32_t entry,
+ static inline void SetImpl(FixedArrayBase backing_store, InternalIndex entry,
Object value, WriteBarrierMode mode) {
- FixedDoubleArray::cast(backing_store).set(entry, value.Number());
+ FixedDoubleArray::cast(backing_store).set(entry.as_int(), value.Number());
}
static void CopyElementsImpl(Isolate* isolate, FixedArrayBase from,
@@ -2890,8 +2910,9 @@ class FastDoubleElementsAccessor
int count = 0;
uint32_t length = elements->length();
for (uint32_t index = 0; index < length; ++index) {
- if (!Subclass::HasEntryImpl(isolate, *elements, index)) continue;
- Handle<Object> value = Subclass::GetImpl(isolate, *elements, index);
+ InternalIndex entry(index);
+ if (!Subclass::HasEntryImpl(isolate, *elements, entry)) continue;
+ Handle<Object> value = Subclass::GetImpl(isolate, *elements, entry);
if (get_entries) {
value = MakeEntryPair(isolate, index, value);
}
@@ -2988,11 +3009,12 @@ class TypedElementsAccessor
// Conversion of scalar value to handlified object.
static Handle<Object> ToHandle(Isolate* isolate, ElementType value);
- static void SetImpl(Handle<JSObject> holder, uint32_t entry, Object value) {
+ static void SetImpl(Handle<JSObject> holder, InternalIndex entry,
+ Object value) {
Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(holder);
- DCHECK_LE(entry, typed_array->length());
- SetImpl(static_cast<ElementType*>(typed_array->DataPtr()), entry,
- FromObject(value));
+ DCHECK_LE(entry.raw_value(), typed_array->length());
+ SetImpl(static_cast<ElementType*>(typed_array->DataPtr()),
+ entry.raw_value(), FromObject(value));
}
static void SetImpl(ElementType* data_ptr, size_t entry, ElementType value) {
@@ -3019,18 +3041,18 @@ class TypedElementsAccessor
}
static Handle<Object> GetInternalImpl(Handle<JSObject> holder,
- uint32_t entry) {
+ InternalIndex entry) {
Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(holder);
Isolate* isolate = typed_array->GetIsolate();
- DCHECK_LE(entry, typed_array->length());
+ DCHECK_LE(entry.raw_value(), typed_array->length());
DCHECK(!typed_array->WasDetached());
- ElementType elem =
- GetImpl(static_cast<ElementType*>(typed_array->DataPtr()), entry);
+ ElementType elem = GetImpl(
+ static_cast<ElementType*>(typed_array->DataPtr()), entry.raw_value());
return ToHandle(isolate, elem);
}
static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase backing_store,
- uint32_t entry) {
+ InternalIndex entry) {
UNREACHABLE();
}
@@ -3059,12 +3081,12 @@ class TypedElementsAccessor
return result;
}
- static PropertyDetails GetDetailsImpl(JSObject holder, uint32_t entry) {
+ static PropertyDetails GetDetailsImpl(JSObject holder, InternalIndex entry) {
return PropertyDetails(kData, DONT_DELETE, PropertyCellType::kNoCell);
}
static PropertyDetails GetDetailsImpl(FixedArrayBase backing_store,
- uint32_t entry) {
+ InternalIndex entry) {
return PropertyDetails(kData, DONT_DELETE, PropertyCellType::kNoCell);
}
@@ -3085,21 +3107,22 @@ class TypedElementsAccessor
UNREACHABLE();
}
- static void DeleteImpl(Handle<JSObject> obj, uint32_t entry) {
+ static void DeleteImpl(Handle<JSObject> obj, InternalIndex entry) {
UNREACHABLE();
}
static uint32_t GetIndexForEntryImpl(FixedArrayBase backing_store,
- uint32_t entry) {
- return entry;
+ InternalIndex entry) {
+ return entry.as_uint32();
}
- static uint32_t GetEntryForIndexImpl(Isolate* isolate, JSObject holder,
- FixedArrayBase backing_store,
- uint32_t index, PropertyFilter filter) {
+ static InternalIndex GetEntryForIndexImpl(Isolate* isolate, JSObject holder,
+ FixedArrayBase backing_store,
+ uint32_t index,
+ PropertyFilter filter) {
return index < AccessorClass::GetCapacityImpl(holder, backing_store)
- ? index
- : kMaxUInt32;
+ ? InternalIndex(index)
+ : InternalIndex::NotFound();
}
static uint32_t GetCapacityImpl(JSObject holder,
@@ -3122,7 +3145,8 @@ class TypedElementsAccessor
Handle<FixedArrayBase> elements(receiver->elements(), isolate);
uint32_t length = AccessorClass::GetCapacityImpl(*receiver, *elements);
for (uint32_t i = 0; i < length; i++) {
- Handle<Object> value = AccessorClass::GetInternalImpl(receiver, i);
+ Handle<Object> value =
+ AccessorClass::GetInternalImpl(receiver, InternalIndex(i));
RETURN_FAILURE_IF_NOT_SUCCESSFUL(accumulator->AddKey(value, convert));
}
return ExceptionStatus::kSuccess;
@@ -3137,7 +3161,8 @@ class TypedElementsAccessor
Handle<FixedArrayBase> elements(object->elements(), isolate);
uint32_t length = AccessorClass::GetCapacityImpl(*object, *elements);
for (uint32_t index = 0; index < length; ++index) {
- Handle<Object> value = AccessorClass::GetInternalImpl(object, index);
+ Handle<Object> value =
+ AccessorClass::GetInternalImpl(object, InternalIndex(index));
if (get_entries) {
value = MakeEntryPair(isolate, index, value);
}
@@ -3361,7 +3386,8 @@ class TypedElementsAccessor
Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(object);
Handle<FixedArray> result = isolate->factory()->NewFixedArray(length);
for (uint32_t i = 0; i < length; i++) {
- Handle<Object> value = AccessorClass::GetInternalImpl(typed_array, i);
+ Handle<Object> value =
+ AccessorClass::GetInternalImpl(typed_array, InternalIndex(i));
result->set(i, *value);
}
return result;
@@ -3499,7 +3525,7 @@ class TypedElementsAccessor
return true;
}
- return !isolate->IsNoElementsProtectorIntact(context);
+ return !Protectors::IsNoElementsIntact(isolate);
}
static bool TryCopyElementsFastNumber(Context context, JSArray source,
@@ -3539,18 +3565,18 @@ class TypedElementsAccessor
if (kind == PACKED_SMI_ELEMENTS) {
FixedArray source_store = FixedArray::cast(source.elements());
- for (uint32_t i = 0; i < length; i++) {
- Object elem = source_store.get(i);
+ for (size_t i = 0; i < length; i++) {
+ Object elem = source_store.get(static_cast<int>(i));
SetImpl(dest_data, i, FromScalar(Smi::ToInt(elem)));
}
return true;
} else if (kind == HOLEY_SMI_ELEMENTS) {
FixedArray source_store = FixedArray::cast(source.elements());
- for (uint32_t i = 0; i < length; i++) {
- if (source_store.is_the_hole(isolate, i)) {
+ for (size_t i = 0; i < length; i++) {
+ if (source_store.is_the_hole(isolate, static_cast<int>(i))) {
SetImpl(dest_data, i, FromObject(undefined));
} else {
- Object elem = source_store.get(i);
+ Object elem = source_store.get(static_cast<int>(i));
SetImpl(dest_data, i, FromScalar(Smi::ToInt(elem)));
}
}
@@ -3560,20 +3586,20 @@ class TypedElementsAccessor
// unboxing the double here by using get_scalar.
FixedDoubleArray source_store = FixedDoubleArray::cast(source.elements());
- for (uint32_t i = 0; i < length; i++) {
+ for (size_t i = 0; i < length; i++) {
// Use the from_double conversion for this specific TypedArray type,
// rather than relying on C++ to convert elem.
- double elem = source_store.get_scalar(i);
+ double elem = source_store.get_scalar(static_cast<int>(i));
SetImpl(dest_data, i, FromScalar(elem));
}
return true;
} else if (kind == HOLEY_DOUBLE_ELEMENTS) {
FixedDoubleArray source_store = FixedDoubleArray::cast(source.elements());
- for (uint32_t i = 0; i < length; i++) {
- if (source_store.is_the_hole(i)) {
+ for (size_t i = 0; i < length; i++) {
+ if (source_store.is_the_hole(static_cast<int>(i))) {
SetImpl(dest_data, i, FromObject(undefined));
} else {
- double elem = source_store.get_scalar(i);
+ double elem = source_store.get_scalar(static_cast<int>(i));
SetImpl(dest_data, i, FromScalar(elem));
}
}
@@ -3588,7 +3614,8 @@ class TypedElementsAccessor
Isolate* isolate = destination->GetIsolate();
for (size_t i = 0; i < length; i++) {
Handle<Object> elem;
- if (i <= kMaxUInt32) {
+ // TODO(4153): This if-branch will subsume its else-branch.
+ if (i <= JSArray::kMaxArrayIndex) {
LookupIterator it(isolate, source, static_cast<uint32_t>(i));
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, elem,
Object::GetProperty(&it));
@@ -3619,8 +3646,7 @@ class TypedElementsAccessor
}
// The spec says we store the length, then get each element, so we don't
// need to check changes to length.
- // TODO(bmeurer, v8:4153): Remove this static_cast.
- SetImpl(destination, static_cast<uint32_t>(offset + i), *elem);
+ SetImpl(destination, InternalIndex(offset + i), *elem);
}
return *isolate->factory()->undefined_value();
}
@@ -3893,14 +3919,14 @@ class SloppyArgumentsElementsAccessor
}
static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase parameters,
- uint32_t entry) {
+ InternalIndex entry) {
Handle<SloppyArgumentsElements> elements(
SloppyArgumentsElements::cast(parameters), isolate);
uint32_t length = elements->parameter_map_length();
- if (entry < length) {
+ if (entry.as_uint32() < length) {
// Read context mapped entry.
DisallowHeapAllocation no_gc;
- Object probe = elements->get_mapped_entry(entry);
+ Object probe = elements->get_mapped_entry(entry.as_uint32());
DCHECK(!probe.IsTheHole(isolate));
Context context = elements->context();
int context_entry = Smi::ToInt(probe);
@@ -3909,7 +3935,7 @@ class SloppyArgumentsElementsAccessor
} else {
// Entry is not context mapped, defer to the arguments.
Handle<Object> result = ArgumentsAccessor::GetImpl(
- isolate, elements->arguments(), entry - length);
+ isolate, elements->arguments(), entry.adjust_down(length));
return Subclass::ConvertArgumentsStoreResult(isolate, elements, result);
}
}
@@ -3924,19 +3950,19 @@ class SloppyArgumentsElementsAccessor
UNREACHABLE();
}
- static inline void SetImpl(Handle<JSObject> holder, uint32_t entry,
+ static inline void SetImpl(Handle<JSObject> holder, InternalIndex entry,
Object value) {
SetImpl(holder->elements(), entry, value);
}
- static inline void SetImpl(FixedArrayBase store, uint32_t entry,
+ static inline void SetImpl(FixedArrayBase store, InternalIndex entry,
Object value) {
SloppyArgumentsElements elements = SloppyArgumentsElements::cast(store);
uint32_t length = elements.parameter_map_length();
- if (entry < length) {
+ if (entry.as_uint32() < length) {
// Store context mapped entry.
DisallowHeapAllocation no_gc;
- Object probe = elements.get_mapped_entry(entry);
+ Object probe = elements.get_mapped_entry(entry.as_uint32());
DCHECK(!probe.IsTheHole());
Context context = elements.context();
int context_entry = Smi::ToInt(probe);
@@ -3945,7 +3971,8 @@ class SloppyArgumentsElementsAccessor
} else {
// Entry is not context mapped defer to arguments.
FixedArray arguments = elements.arguments();
- Object current = ArgumentsAccessor::GetRaw(arguments, entry - length);
+ Object current =
+ ArgumentsAccessor::GetRaw(arguments, entry.adjust_down(length));
if (current.IsAliasedArgumentsEntry()) {
AliasedArgumentsEntry alias = AliasedArgumentsEntry::cast(current);
Context context = elements.context();
@@ -3953,7 +3980,7 @@ class SloppyArgumentsElementsAccessor
DCHECK(!context.get(context_entry).IsTheHole());
context.set(context_entry, value);
} else {
- ArgumentsAccessor::SetImpl(arguments, entry - length, value);
+ ArgumentsAccessor::SetImpl(arguments, entry.adjust_down(length), value);
}
}
}
@@ -3989,8 +4016,8 @@ class SloppyArgumentsElementsAccessor
FixedArrayBase arguments = elements.arguments();
uint32_t nof_elements = 0;
uint32_t length = elements.parameter_map_length();
- for (uint32_t entry = 0; entry < length; entry++) {
- if (HasParameterMapArg(isolate, elements, entry)) nof_elements++;
+ for (uint32_t index = 0; index < length; index++) {
+ if (HasParameterMapArg(isolate, elements, index)) nof_elements++;
}
return nof_elements +
ArgumentsAccessor::NumberOfElementsImpl(receiver, arguments);
@@ -4002,7 +4029,8 @@ class SloppyArgumentsElementsAccessor
Isolate* isolate = accumulator->isolate();
Handle<FixedArrayBase> elements(receiver->elements(), isolate);
uint32_t length = GetCapacityImpl(*receiver, *elements);
- for (uint32_t entry = 0; entry < length; entry++) {
+ for (uint32_t index = 0; index < length; index++) {
+ InternalIndex entry(index);
if (!HasEntryImpl(isolate, *elements, entry)) continue;
Handle<Object> value = GetImpl(isolate, *elements, entry);
RETURN_FAILURE_IF_NOT_SUCCESSFUL(accumulator->AddKey(value, convert));
@@ -4011,15 +4039,16 @@ class SloppyArgumentsElementsAccessor
}
static bool HasEntryImpl(Isolate* isolate, FixedArrayBase parameters,
- uint32_t entry) {
+ InternalIndex entry) {
SloppyArgumentsElements elements =
SloppyArgumentsElements::cast(parameters);
uint32_t length = elements.parameter_map_length();
- if (entry < length) {
- return HasParameterMapArg(isolate, elements, entry);
+ if (entry.as_uint32() < length) {
+ return HasParameterMapArg(isolate, elements, entry.as_uint32());
}
FixedArrayBase arguments = elements.arguments();
- return ArgumentsAccessor::HasEntryImpl(isolate, arguments, entry - length);
+ return ArgumentsAccessor::HasEntryImpl(isolate, arguments,
+ entry.adjust_down(length));
}
static bool HasAccessorsImpl(JSObject holder, FixedArrayBase backing_store) {
@@ -4030,39 +4059,45 @@ class SloppyArgumentsElementsAccessor
}
static uint32_t GetIndexForEntryImpl(FixedArrayBase parameters,
- uint32_t entry) {
+ InternalIndex entry) {
SloppyArgumentsElements elements =
SloppyArgumentsElements::cast(parameters);
uint32_t length = elements.parameter_map_length();
- if (entry < length) return entry;
+ uint32_t index = entry.as_uint32();
+ if (index < length) return index;
FixedArray arguments = elements.arguments();
- return ArgumentsAccessor::GetIndexForEntryImpl(arguments, entry - length);
+ return ArgumentsAccessor::GetIndexForEntryImpl(arguments,
+ entry.adjust_down(length));
}
- static uint32_t GetEntryForIndexImpl(Isolate* isolate, JSObject holder,
- FixedArrayBase parameters,
- uint32_t index, PropertyFilter filter) {
+ static InternalIndex GetEntryForIndexImpl(Isolate* isolate, JSObject holder,
+ FixedArrayBase parameters,
+ uint32_t index,
+ PropertyFilter filter) {
SloppyArgumentsElements elements =
SloppyArgumentsElements::cast(parameters);
- if (HasParameterMapArg(isolate, elements, index)) return index;
+ if (HasParameterMapArg(isolate, elements, index)) {
+ return InternalIndex(index);
+ }
FixedArray arguments = elements.arguments();
- uint32_t entry = ArgumentsAccessor::GetEntryForIndexImpl(
+ InternalIndex entry = ArgumentsAccessor::GetEntryForIndexImpl(
isolate, holder, arguments, index, filter);
- if (entry == kMaxUInt32) return kMaxUInt32;
+ if (entry.is_not_found()) return entry;
// Arguments entries could overlap with the dictionary entries, hence offset
// them by the number of context mapped entries.
- return elements.parameter_map_length() + entry;
+ return entry.adjust_up(elements.parameter_map_length());
}
- static PropertyDetails GetDetailsImpl(JSObject holder, uint32_t entry) {
+ static PropertyDetails GetDetailsImpl(JSObject holder, InternalIndex entry) {
SloppyArgumentsElements elements =
SloppyArgumentsElements::cast(holder.elements());
uint32_t length = elements.parameter_map_length();
- if (entry < length) {
+ if (entry.as_uint32() < length) {
return PropertyDetails(kData, NONE, PropertyCellType::kNoCell);
}
FixedArray arguments = elements.arguments();
- return ArgumentsAccessor::GetDetailsImpl(arguments, entry - length);
+ return ArgumentsAccessor::GetDetailsImpl(arguments,
+ entry.adjust_down(length));
}
static bool HasParameterMapArg(Isolate* isolate,
@@ -4073,26 +4108,26 @@ class SloppyArgumentsElementsAccessor
return !elements.get_mapped_entry(index).IsTheHole(isolate);
}
- static void DeleteImpl(Handle<JSObject> obj, uint32_t entry) {
+ static void DeleteImpl(Handle<JSObject> obj, InternalIndex entry) {
Handle<SloppyArgumentsElements> elements(
SloppyArgumentsElements::cast(obj->elements()), obj->GetIsolate());
uint32_t length = elements->parameter_map_length();
- uint32_t delete_or_entry = entry;
- if (entry < length) {
- delete_or_entry = kMaxUInt32;
+ InternalIndex delete_or_entry = entry;
+ if (entry.as_uint32() < length) {
+ delete_or_entry = InternalIndex::NotFound();
}
Subclass::SloppyDeleteImpl(obj, elements, delete_or_entry);
// SloppyDeleteImpl allocates a new dictionary elements store. For making
// heap verification happy we postpone clearing out the mapped entry.
- if (entry < length) {
- elements->set_mapped_entry(entry,
+ if (entry.as_uint32() < length) {
+ elements->set_mapped_entry(entry.as_uint32(),
obj->GetReadOnlyRoots().the_hole_value());
}
}
static void SloppyDeleteImpl(Handle<JSObject> obj,
Handle<SloppyArgumentsElements> elements,
- uint32_t entry) {
+ InternalIndex entry) {
// Implemented in subclasses.
UNREACHABLE();
}
@@ -4152,9 +4187,9 @@ class SloppyArgumentsElementsAccessor
for (uint32_t k = start_from; k < length; ++k) {
DCHECK_EQ(object->map(), *original_map);
- uint32_t entry =
+ InternalIndex entry =
GetEntryForIndexImpl(isolate, *object, *elements, k, ALL_PROPERTIES);
- if (entry == kMaxUInt32) {
+ if (entry.is_not_found()) {
if (search_for_hole) return Just(true);
continue;
}
@@ -4193,9 +4228,9 @@ class SloppyArgumentsElementsAccessor
for (uint32_t k = start_from; k < length; ++k) {
DCHECK_EQ(object->map(), *original_map);
- uint32_t entry =
+ InternalIndex entry =
GetEntryForIndexImpl(isolate, *object, *elements, k, ALL_PROPERTIES);
- if (entry == kMaxUInt32) {
+ if (entry.is_not_found()) {
continue;
}
@@ -4246,14 +4281,15 @@ class SlowSloppyArgumentsElementsAccessor
}
static void SloppyDeleteImpl(Handle<JSObject> obj,
Handle<SloppyArgumentsElements> elements,
- uint32_t entry) {
+ InternalIndex entry) {
// No need to delete a context mapped entry from the arguments elements.
- if (entry == kMaxUInt32) return;
+ if (entry.is_not_found()) return;
Isolate* isolate = obj->GetIsolate();
Handle<NumberDictionary> dict(NumberDictionary::cast(elements->arguments()),
isolate);
- int length = elements->parameter_map_length();
- dict = NumberDictionary::DeleteEntry(isolate, dict, entry - length);
+ uint32_t length = elements->parameter_map_length();
+ dict = NumberDictionary::DeleteEntry(isolate, dict,
+ entry.as_uint32() - length);
elements->set_arguments(*dict);
}
static void AddImpl(Handle<JSObject> object, uint32_t index,
@@ -4278,15 +4314,15 @@ class SlowSloppyArgumentsElementsAccessor
}
static void ReconfigureImpl(Handle<JSObject> object,
- Handle<FixedArrayBase> store, uint32_t entry,
+ Handle<FixedArrayBase> store, InternalIndex entry,
Handle<Object> value,
PropertyAttributes attributes) {
Isolate* isolate = object->GetIsolate();
Handle<SloppyArgumentsElements> elements =
Handle<SloppyArgumentsElements>::cast(store);
uint32_t length = elements->parameter_map_length();
- if (entry < length) {
- Object probe = elements->get_mapped_entry(entry);
+ if (entry.as_uint32() < length) {
+ Object probe = elements->get_mapped_entry(entry.as_uint32());
DCHECK(!probe.IsTheHole(isolate));
Context context = elements->context();
int context_entry = Smi::ToInt(probe);
@@ -4294,7 +4330,7 @@ class SlowSloppyArgumentsElementsAccessor
context.set(context_entry, *value);
// Redefining attributes of an aliased element destroys fast aliasing.
- elements->set_mapped_entry(entry,
+ elements->set_mapped_entry(entry.as_uint32(),
ReadOnlyRoots(isolate).the_hole_value());
// For elements that are still writable we re-establish slow aliasing.
if ((attributes & READ_ONLY) == 0) {
@@ -4304,8 +4340,8 @@ class SlowSloppyArgumentsElementsAccessor
PropertyDetails details(kData, attributes, PropertyCellType::kNoCell);
Handle<NumberDictionary> arguments(
NumberDictionary::cast(elements->arguments()), isolate);
- arguments =
- NumberDictionary::Add(isolate, arguments, entry, value, details);
+ arguments = NumberDictionary::Add(isolate, arguments, entry.as_uint32(),
+ value, details);
// If the attributes were NONE, we would have called set rather than
// reconfigure.
DCHECK_NE(NONE, attributes);
@@ -4314,7 +4350,7 @@ class SlowSloppyArgumentsElementsAccessor
} else {
Handle<FixedArrayBase> arguments(elements->arguments(), isolate);
DictionaryElementsAccessor::ReconfigureImpl(
- object, arguments, entry - length, value, attributes);
+ object, arguments, entry.adjust_down(length), value, attributes);
}
}
};
@@ -4346,23 +4382,25 @@ class FastSloppyArgumentsElementsAccessor
static Handle<NumberDictionary> NormalizeArgumentsElements(
Handle<JSObject> object, Handle<SloppyArgumentsElements> elements,
- uint32_t* entry) {
+ InternalIndex* entry) {
Handle<NumberDictionary> dictionary = JSObject::NormalizeElements(object);
elements->set_arguments(*dictionary);
// kMaxUInt32 indicates that a context mapped element got deleted. In this
// case we only normalize the elements (aka. migrate to SLOW_SLOPPY).
- if (*entry == kMaxUInt32) return dictionary;
+ if (entry->is_not_found()) return dictionary;
uint32_t length = elements->parameter_map_length();
- if (*entry >= length) {
+ if (entry->as_uint32() >= length) {
*entry =
- dictionary->FindEntry(object->GetIsolate(), *entry - length) + length;
+ InternalIndex(dictionary->FindEntry(object->GetIsolate(),
+ entry->as_uint32() - length) +
+ length);
}
return dictionary;
}
static void SloppyDeleteImpl(Handle<JSObject> obj,
Handle<SloppyArgumentsElements> elements,
- uint32_t entry) {
+ InternalIndex entry) {
// Always normalize element on deleting an entry.
NormalizeArgumentsElements(obj, elements, &entry);
SlowSloppyArgumentsElementsAccessor::SloppyDeleteImpl(obj, elements, entry);
@@ -4386,11 +4424,12 @@ class FastSloppyArgumentsElementsAccessor
// index to entry explicitly since the slot still contains the hole, so the
// current EntryForIndex would indicate that it is "absent" by returning
// kMaxUInt32.
- FastHoleyObjectElementsAccessor::SetImpl(arguments, index, *value);
+ FastHoleyObjectElementsAccessor::SetImpl(arguments, InternalIndex(index),
+ *value);
}
static void ReconfigureImpl(Handle<JSObject> object,
- Handle<FixedArrayBase> store, uint32_t entry,
+ Handle<FixedArrayBase> store, InternalIndex entry,
Handle<Object> value,
PropertyAttributes attributes) {
DCHECK_EQ(object->elements(), *store);
@@ -4443,63 +4482,67 @@ class StringWrapperElementsAccessor
: public ElementsAccessorBase<Subclass, KindTraits> {
public:
static Handle<Object> GetInternalImpl(Handle<JSObject> holder,
- uint32_t entry) {
+ InternalIndex entry) {
return GetImpl(holder, entry);
}
- static Handle<Object> GetImpl(Handle<JSObject> holder, uint32_t entry) {
+ static Handle<Object> GetImpl(Handle<JSObject> holder, InternalIndex entry) {
Isolate* isolate = holder->GetIsolate();
Handle<String> string(GetString(*holder), isolate);
uint32_t length = static_cast<uint32_t>(string->length());
- if (entry < length) {
+ if (entry.as_uint32() < length) {
return isolate->factory()->LookupSingleCharacterStringFromCode(
- String::Flatten(isolate, string)->Get(entry));
+ String::Flatten(isolate, string)->Get(entry.as_int()));
}
return BackingStoreAccessor::GetImpl(isolate, holder->elements(),
- entry - length);
+ entry.adjust_down(length));
}
static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase elements,
- uint32_t entry) {
+ InternalIndex entry) {
UNREACHABLE();
}
- static PropertyDetails GetDetailsImpl(JSObject holder, uint32_t entry) {
+ static PropertyDetails GetDetailsImpl(JSObject holder, InternalIndex entry) {
uint32_t length = static_cast<uint32_t>(GetString(holder).length());
- if (entry < length) {
+ if (entry.as_uint32() < length) {
PropertyAttributes attributes =
static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
return PropertyDetails(kData, attributes, PropertyCellType::kNoCell);
}
- return BackingStoreAccessor::GetDetailsImpl(holder, entry - length);
+ return BackingStoreAccessor::GetDetailsImpl(holder,
+ entry.adjust_down(length));
}
- static uint32_t GetEntryForIndexImpl(Isolate* isolate, JSObject holder,
- FixedArrayBase backing_store,
- uint32_t index, PropertyFilter filter) {
+ static InternalIndex GetEntryForIndexImpl(Isolate* isolate, JSObject holder,
+ FixedArrayBase backing_store,
+ uint32_t index,
+ PropertyFilter filter) {
uint32_t length = static_cast<uint32_t>(GetString(holder).length());
- if (index < length) return index;
- uint32_t backing_store_entry = BackingStoreAccessor::GetEntryForIndexImpl(
- isolate, holder, backing_store, index, filter);
- if (backing_store_entry == kMaxUInt32) return kMaxUInt32;
- DCHECK(backing_store_entry < kMaxUInt32 - length);
- return backing_store_entry + length;
+ if (index < length) return InternalIndex(index);
+ InternalIndex backing_store_entry =
+ BackingStoreAccessor::GetEntryForIndexImpl(
+ isolate, holder, backing_store, index, filter);
+ if (backing_store_entry.is_not_found()) return backing_store_entry;
+ return backing_store_entry.adjust_up(length);
}
- static void DeleteImpl(Handle<JSObject> holder, uint32_t entry) {
+ static void DeleteImpl(Handle<JSObject> holder, InternalIndex entry) {
uint32_t length = static_cast<uint32_t>(GetString(*holder).length());
- if (entry < length) {
+ if (entry.as_uint32() < length) {
return; // String contents can't be deleted.
}
- BackingStoreAccessor::DeleteImpl(holder, entry - length);
+ BackingStoreAccessor::DeleteImpl(holder, entry.adjust_down(length));
}
- static void SetImpl(Handle<JSObject> holder, uint32_t entry, Object value) {
+ static void SetImpl(Handle<JSObject> holder, InternalIndex entry,
+ Object value) {
uint32_t length = static_cast<uint32_t>(GetString(*holder).length());
- if (entry < length) {
+ if (entry.as_uint32() < length) {
return; // String contents are read-only.
}
- BackingStoreAccessor::SetImpl(holder->elements(), entry - length, value);
+ BackingStoreAccessor::SetImpl(holder->elements(), entry.adjust_down(length),
+ value);
}
static void AddImpl(Handle<JSObject> object, uint32_t index,
@@ -4519,15 +4562,15 @@ class StringWrapperElementsAccessor
}
static void ReconfigureImpl(Handle<JSObject> object,
- Handle<FixedArrayBase> store, uint32_t entry,
+ Handle<FixedArrayBase> store, InternalIndex entry,
Handle<Object> value,
PropertyAttributes attributes) {
uint32_t length = static_cast<uint32_t>(GetString(*object).length());
- if (entry < length) {
+ if (entry.as_uint32() < length) {
return; // String contents can't be reconfigured.
}
- BackingStoreAccessor::ReconfigureImpl(object, store, entry - length, value,
- attributes);
+ BackingStoreAccessor::ReconfigureImpl(
+ object, store, entry.adjust_down(length), value, attributes);
}
V8_WARN_UNUSED_RESULT static ExceptionStatus AddElementsToKeyAccumulatorImpl(
diff --git a/deps/v8/src/objects/elements.h b/deps/v8/src/objects/elements.h
index b7fcd907a3..219a9ad73a 100644
--- a/deps/v8/src/objects/elements.h
+++ b/deps/v8/src/objects/elements.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_ELEMENTS_H_
#include "src/objects/elements-kind.h"
+#include "src/objects/internal-index.h"
#include "src/objects/keys.h"
#include "src/objects/objects.h"
@@ -50,11 +51,9 @@ class ElementsAccessor {
// Note: this is currently not implemented for string wrapper and
// typed array elements.
- virtual bool HasEntry(JSObject holder, uint32_t entry) = 0;
+ virtual bool HasEntry(JSObject holder, InternalIndex entry) = 0;
- // TODO(cbruni): HasEntry and Get should not be exposed publicly with the
- // entry parameter.
- virtual Handle<Object> Get(Handle<JSObject> holder, uint32_t entry) = 0;
+ virtual Handle<Object> Get(Handle<JSObject> holder, InternalIndex entry) = 0;
virtual bool HasAccessors(JSObject holder) = 0;
virtual uint32_t NumberOfElements(JSObject holder) = 0;
@@ -105,7 +104,8 @@ class ElementsAccessor {
static void InitializeOncePerProcess();
static void TearDown();
- virtual void Set(Handle<JSObject> holder, uint32_t entry, Object value) = 0;
+ virtual void Set(Handle<JSObject> holder, InternalIndex entry,
+ Object value) = 0;
virtual void Add(Handle<JSObject> object, uint32_t index,
Handle<Object> value, PropertyAttributes attributes,
@@ -178,18 +178,18 @@ class ElementsAccessor {
// indices are equivalent to entries. In the NumberDictionary
// ElementsAccessor, entries are mapped to an index using the KeyAt method on
// the NumberDictionary.
- virtual uint32_t GetEntryForIndex(Isolate* isolate, JSObject holder,
- FixedArrayBase backing_store,
- uint32_t index) = 0;
+ virtual InternalIndex GetEntryForIndex(Isolate* isolate, JSObject holder,
+ FixedArrayBase backing_store,
+ uint32_t index) = 0;
- virtual PropertyDetails GetDetails(JSObject holder, uint32_t entry) = 0;
+ virtual PropertyDetails GetDetails(JSObject holder, InternalIndex entry) = 0;
virtual void Reconfigure(Handle<JSObject> object,
- Handle<FixedArrayBase> backing_store, uint32_t entry,
- Handle<Object> value,
+ Handle<FixedArrayBase> backing_store,
+ InternalIndex entry, Handle<Object> value,
PropertyAttributes attributes) = 0;
// Deletes an element in an object.
- virtual void Delete(Handle<JSObject> holder, uint32_t entry) = 0;
+ virtual void Delete(Handle<JSObject> holder, InternalIndex entry) = 0;
// NOTE: this method violates the handlified function signature convention:
// raw pointer parameter |source_holder| in the function that allocates.
diff --git a/deps/v8/src/objects/feedback-cell-inl.h b/deps/v8/src/objects/feedback-cell-inl.h
index e06cfce7de..188666d462 100644
--- a/deps/v8/src/objects/feedback-cell-inl.h
+++ b/deps/v8/src/objects/feedback-cell-inl.h
@@ -17,12 +17,7 @@
namespace v8 {
namespace internal {
-OBJECT_CONSTRUCTORS_IMPL(FeedbackCell, Struct)
-
-CAST_ACCESSOR(FeedbackCell)
-
-ACCESSORS(FeedbackCell, value, HeapObject, kValueOffset)
-INT32_ACCESSORS(FeedbackCell, interrupt_budget, kInterruptBudgetOffset)
+TQ_OBJECT_CONSTRUCTORS_IMPL(FeedbackCell)
void FeedbackCell::clear_padding() {
if (FeedbackCell::kAlignedSize == FeedbackCell::kUnalignedSize) return;
diff --git a/deps/v8/src/objects/feedback-cell.h b/deps/v8/src/objects/feedback-cell.h
index 3c085f72d9..669efaeaec 100644
--- a/deps/v8/src/objects/feedback-cell.h
+++ b/deps/v8/src/objects/feedback-cell.h
@@ -18,7 +18,7 @@ namespace internal {
// number of closures created for a certain function per native
// context. There's at most one FeedbackCell for each function in
// a native context.
-class FeedbackCell : public Struct {
+class FeedbackCell : public TorqueGeneratedFeedbackCell<FeedbackCell, Struct> {
public:
static int GetInitialInterruptBudget() {
if (FLAG_lazy_feedback_allocation) {
@@ -27,19 +27,8 @@ class FeedbackCell : public Struct {
return FLAG_interrupt_budget;
}
- // [value]: value of the cell.
- DECL_ACCESSORS(value, HeapObject)
- DECL_INT32_ACCESSORS(interrupt_budget)
-
- DECL_CAST(FeedbackCell)
-
// Dispatched behavior.
DECL_PRINTER(FeedbackCell)
- DECL_VERIFIER(FeedbackCell)
-
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- TORQUE_GENERATED_FEEDBACK_CELL_FIELDS)
static const int kUnalignedSize = kSize;
static const int kAlignedSize = RoundUp<kObjectAlignment>(int{kSize});
@@ -50,7 +39,7 @@ class FeedbackCell : public Struct {
using BodyDescriptor =
FixedBodyDescriptor<kValueOffset, kInterruptBudgetOffset, kAlignedSize>;
- OBJECT_CONSTRUCTORS(FeedbackCell, Struct);
+ TQ_OBJECT_CONSTRUCTORS(FeedbackCell)
};
} // namespace internal
diff --git a/deps/v8/src/objects/feedback-vector-inl.h b/deps/v8/src/objects/feedback-vector-inl.h
index 9cdc03b5c2..024b92165d 100644
--- a/deps/v8/src/objects/feedback-vector-inl.h
+++ b/deps/v8/src/objects/feedback-vector-inl.h
@@ -286,10 +286,6 @@ Handle<Symbol> FeedbackVector::MegamorphicSentinel(Isolate* isolate) {
return isolate->factory()->megamorphic_symbol();
}
-Handle<Symbol> FeedbackVector::PremonomorphicSentinel(Isolate* isolate) {
- return isolate->factory()->premonomorphic_symbol();
-}
-
Symbol FeedbackVector::RawUninitializedSentinel(Isolate* isolate) {
return ReadOnlyRoots(isolate).uninitialized_symbol();
}
diff --git a/deps/v8/src/objects/feedback-vector.cc b/deps/v8/src/objects/feedback-vector.cc
index 2fbc48a95e..4fe75ab325 100644
--- a/deps/v8/src/objects/feedback-vector.cc
+++ b/deps/v8/src/objects/feedback-vector.cc
@@ -52,7 +52,6 @@ static bool IsPropertyNameFeedback(MaybeObject feedback) {
Symbol symbol = Symbol::cast(heap_object);
ReadOnlyRoots roots = symbol.GetReadOnlyRoots();
return symbol != roots.uninitialized_symbol() &&
- symbol != roots.premonomorphic_symbol() &&
symbol != roots.megamorphic_symbol();
}
@@ -233,8 +232,8 @@ Handle<FeedbackVector> FeedbackVector::New(
const int slot_count = shared->feedback_metadata().slot_count();
- Handle<FeedbackVector> vector = factory->NewFeedbackVector(
- shared, closure_feedback_cell_array, AllocationType::kOld);
+ Handle<FeedbackVector> vector =
+ factory->NewFeedbackVector(shared, closure_feedback_cell_array);
DCHECK_EQ(vector->length(), slot_count);
@@ -524,12 +523,6 @@ bool FeedbackNexus::Clear() {
return feedback_updated;
}
-void FeedbackNexus::ConfigurePremonomorphic(Handle<Map> receiver_map) {
- SetFeedback(*FeedbackVector::PremonomorphicSentinel(GetIsolate()),
- SKIP_WRITE_BARRIER);
- SetFeedbackExtra(HeapObjectReference::Weak(*receiver_map));
-}
-
bool FeedbackNexus::ConfigureMegamorphic() {
DisallowHeapAllocation no_gc;
Isolate* isolate = GetIsolate();
@@ -585,13 +578,6 @@ InlineCacheState FeedbackNexus::ic_state() const {
case FeedbackSlotKind::kLoadGlobalInsideTypeof: {
if (feedback->IsSmi()) return MONOMORPHIC;
- if (feedback == MaybeObject::FromObject(
- *FeedbackVector::PremonomorphicSentinel(isolate))) {
- DCHECK(kind() == FeedbackSlotKind::kStoreGlobalSloppy ||
- kind() == FeedbackSlotKind::kStoreGlobalStrict);
- return PREMONOMORPHIC;
- }
-
DCHECK(feedback->IsWeakOrCleared());
MaybeObject extra = GetFeedbackExtra();
if (!feedback->IsCleared() ||
@@ -619,10 +605,6 @@ InlineCacheState FeedbackNexus::ic_state() const {
*FeedbackVector::MegamorphicSentinel(isolate))) {
return MEGAMORPHIC;
}
- if (feedback == MaybeObject::FromObject(
- *FeedbackVector::PremonomorphicSentinel(isolate))) {
- return PREMONOMORPHIC;
- }
if (feedback->IsWeakOrCleared()) {
// Don't check if the map is cleared.
return MONOMORPHIC;
@@ -974,14 +956,6 @@ int FeedbackNexus::ExtractMaps(MapHandles* maps) const {
Map map = Map::cast(heap_object);
maps->push_back(handle(map, isolate));
return 1;
- } else if (feedback->GetHeapObjectIfStrong(&heap_object) &&
- heap_object ==
- heap_object.GetReadOnlyRoots().premonomorphic_symbol()) {
- if (GetFeedbackExtra()->GetHeapObjectIfWeak(&heap_object)) {
- Map map = Map::cast(heap_object);
- maps->push_back(handle(map, isolate));
- return 1;
- }
}
return 0;
@@ -1203,9 +1177,11 @@ KeyedAccessStoreMode FeedbackNexus::GetKeyedAccessStoreMode() const {
handler = handle(Code::cast(data_handler->smi_handler()),
vector().GetIsolate());
} else if (maybe_code_handler.object()->IsSmi()) {
- // Skip proxy handlers.
- DCHECK_EQ(*(maybe_code_handler.object()),
- *StoreHandler::StoreProxy(GetIsolate()));
+ // Skip proxy handlers and the slow handler.
+ DCHECK(*(maybe_code_handler.object()) ==
+ *StoreHandler::StoreProxy(GetIsolate()) ||
+ *(maybe_code_handler.object()) ==
+ *StoreHandler::StoreSlow(GetIsolate()));
continue;
} else {
// Element store without prototype chain check.
diff --git a/deps/v8/src/objects/feedback-vector.h b/deps/v8/src/objects/feedback-vector.h
index af03bb4130..1c34266dc8 100644
--- a/deps/v8/src/objects/feedback-vector.h
+++ b/deps/v8/src/objects/feedback-vector.h
@@ -305,9 +305,6 @@ class FeedbackVector : public HeapObject {
// The object that indicates a megamorphic state.
static inline Handle<Symbol> MegamorphicSentinel(Isolate* isolate);
- // The object that indicates a premonomorphic state.
- static inline Handle<Symbol> PremonomorphicSentinel(Isolate* isolate);
-
// A raw version of the uninitialized sentinel that's safe to read during
// garbage collection (e.g., for patching the cache).
static inline Symbol RawUninitializedSentinel(Isolate* isolate);
@@ -567,7 +564,7 @@ class FeedbackMetadata : public HeapObject {
// possibly be confused with a pointer.
// NOLINTNEXTLINE(runtime/references) (false positive)
STATIC_ASSERT((Name::kEmptyHashField & kHeapObjectTag) == kHeapObjectTag);
-STATIC_ASSERT(Name::kEmptyHashField == 0x3);
+STATIC_ASSERT(Name::kEmptyHashField == 0x7);
// Verify that a set hash field will not look like a tagged object.
STATIC_ASSERT(Name::kHashNotComputedMask == kHeapObjectTag);
@@ -657,13 +654,12 @@ class V8_EXPORT_PRIVATE FeedbackNexus final {
bool IsCleared() const {
InlineCacheState state = ic_state();
- return !FLAG_use_ic || state == UNINITIALIZED || state == PREMONOMORPHIC;
+ return !FLAG_use_ic || state == UNINITIALIZED;
}
// Clear() returns true if the state of the underlying vector was changed.
bool Clear();
void ConfigureUninitialized();
- void ConfigurePremonomorphic(Handle<Map> receiver_map);
// ConfigureMegamorphic() returns true if the state of the underlying vector
// was changed. Extra feedback is cleared if the 0 parameter version is used.
bool ConfigureMegamorphic();
diff --git a/deps/v8/src/objects/field-index-inl.h b/deps/v8/src/objects/field-index-inl.h
index 997cd68c32..93ffc59c72 100644
--- a/deps/v8/src/objects/field-index-inl.h
+++ b/deps/v8/src/objects/field-index-inl.h
@@ -60,13 +60,13 @@ int FieldIndex::GetLoadByFieldIndex() const {
return is_double() ? (result | 1) : result;
}
-FieldIndex FieldIndex::ForDescriptor(Map map, int descriptor_index) {
+FieldIndex FieldIndex::ForDescriptor(Map map, InternalIndex descriptor_index) {
Isolate* isolate = GetIsolateForPtrCompr(map);
return ForDescriptor(isolate, map, descriptor_index);
}
FieldIndex FieldIndex::ForDescriptor(Isolate* isolate, Map map,
- int descriptor_index) {
+ InternalIndex descriptor_index) {
PropertyDetails details =
map.instance_descriptors(isolate).GetDetails(descriptor_index);
int field_index = details.field_index();
diff --git a/deps/v8/src/objects/field-index.h b/deps/v8/src/objects/field-index.h
index 4fae87774d..fbde0bc609 100644
--- a/deps/v8/src/objects/field-index.h
+++ b/deps/v8/src/objects/field-index.h
@@ -5,6 +5,8 @@
#ifndef V8_OBJECTS_FIELD_INDEX_H_
#define V8_OBJECTS_FIELD_INDEX_H_
+// TODO(jkummerow): Consider forward-declaring instead.
+#include "src/objects/internal-index.h"
#include "src/objects/property-details.h"
#include "src/utils/utils.h"
@@ -27,9 +29,10 @@ class FieldIndex final {
Map map, int index,
Representation representation = Representation::Tagged());
static inline FieldIndex ForInObjectOffset(int offset, Encoding encoding);
- static inline FieldIndex ForDescriptor(Map map, int descriptor_index);
+ static inline FieldIndex ForDescriptor(Map map,
+ InternalIndex descriptor_index);
static inline FieldIndex ForDescriptor(Isolate* isolate, Map map,
- int descriptor_index);
+ InternalIndex descriptor_index);
inline int GetLoadByFieldIndex() const;
diff --git a/deps/v8/src/objects/fixed-array-inl.h b/deps/v8/src/objects/fixed-array-inl.h
index 79c29a6eeb..9701f8ef09 100644
--- a/deps/v8/src/objects/fixed-array-inl.h
+++ b/deps/v8/src/objects/fixed-array-inl.h
@@ -240,7 +240,7 @@ int BinarySearch(T* array, Name name, int valid_entries,
for (; low <= limit; ++low) {
int sort_index = array->GetSortedKeyIndex(low);
- Name entry = array->GetKey(sort_index);
+ Name entry = array->GetKey(InternalIndex(sort_index));
uint32_t current_hash = entry.hash_field();
if (current_hash != hash) {
if (search_mode == ALL_ENTRIES && out_insertion_index != nullptr) {
@@ -272,7 +272,7 @@ int LinearSearch(T* array, Name name, int valid_entries,
int len = array->number_of_entries();
for (int number = 0; number < len; number++) {
int sorted_index = array->GetSortedKeyIndex(number);
- Name entry = array->GetKey(sorted_index);
+ Name entry = array->GetKey(InternalIndex(sorted_index));
uint32_t current_hash = entry.hash_field();
if (current_hash > hash) {
*out_insertion_index = sorted_index;
@@ -286,7 +286,7 @@ int LinearSearch(T* array, Name name, int valid_entries,
DCHECK_LE(valid_entries, array->number_of_entries());
DCHECK_NULL(out_insertion_index); // Not supported here.
for (int number = 0; number < valid_entries; number++) {
- if (array->GetKey(number) == name) return number;
+ if (array->GetKey(InternalIndex(number)) == name) return number;
}
return T::kNotFound;
}
diff --git a/deps/v8/src/objects/fixed-array.h b/deps/v8/src/objects/fixed-array.h
index 40290797f7..1963eef728 100644
--- a/deps/v8/src/objects/fixed-array.h
+++ b/deps/v8/src/objects/fixed-array.h
@@ -86,14 +86,14 @@ class FixedArrayBase : public HeapObject {
V8_EXPORT_PRIVATE bool IsCowArray() const;
-// Maximal allowed size, in bytes, of a single FixedArrayBase.
-// Prevents overflowing size computations, as well as extreme memory
-// consumption.
-#ifdef V8_HOST_ARCH_32_BIT
- static const int kMaxSize = 512 * MB;
-#else
- static const int kMaxSize = 1024 * MB;
-#endif // V8_HOST_ARCH_32_BIT
+ // Maximal allowed size, in bytes, of a single FixedArrayBase.
+ // Prevents overflowing size computations, as well as extreme memory
+ // consumption. It's either (512Mb - kTaggedSize) or (1024Mb - kTaggedSize).
+ // -kTaggedSize is here to ensure that this max size always fits into Smi
+ // which is necessary for being able to create a free space filler for the
+ // whole array of kMaxSize.
+ static const int kMaxSize = 128 * kTaggedSize * MB - kTaggedSize;
+ STATIC_ASSERT(Smi::IsValid(kMaxSize));
// Layout description.
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
@@ -120,7 +120,7 @@ class FixedArray : public FixedArrayBase {
// Return a grown copy if the index is bigger than the array's length.
V8_EXPORT_PRIVATE static Handle<FixedArray> SetAndGrow(
Isolate* isolate, Handle<FixedArray> array, int index,
- Handle<Object> value, AllocationType allocation = AllocationType::kYoung);
+ Handle<Object> value);
// Setter that uses write barrier.
inline void set(int index, Object value);
@@ -303,7 +303,6 @@ class WeakFixedArray : public HeapObject {
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
TORQUE_GENERATED_WEAK_FIXED_ARRAY_FIELDS)
- static constexpr int kHeaderSize = kSize;
static const int kMaxLength =
(FixedArray::kMaxSize - kHeaderSize) / kTaggedSize;
diff --git a/deps/v8/src/objects/function-kind.h b/deps/v8/src/objects/function-kind.h
index 8e9c68e426..9b0de76126 100644
--- a/deps/v8/src/objects/function-kind.h
+++ b/deps/v8/src/objects/function-kind.h
@@ -14,6 +14,7 @@ enum FunctionKind : uint8_t {
// BEGIN constructable functions
kNormalFunction,
kModule,
+ kAsyncModule,
// BEGIN class constructors
// BEGIN base constructors
kBaseConstructor,
@@ -61,7 +62,11 @@ inline bool IsArrowFunction(FunctionKind kind) {
}
inline bool IsModule(FunctionKind kind) {
- return kind == FunctionKind::kModule;
+ return IsInRange(kind, FunctionKind::kModule, FunctionKind::kAsyncModule);
+}
+
+inline bool IsAsyncModule(FunctionKind kind) {
+ return kind == FunctionKind::kAsyncModule;
}
inline bool IsAsyncGeneratorFunction(FunctionKind kind) {
@@ -163,6 +168,8 @@ inline const char* FunctionKind2String(FunctionKind kind) {
return "AsyncFunction";
case FunctionKind::kModule:
return "Module";
+ case FunctionKind::kAsyncModule:
+ return "AsyncModule";
case FunctionKind::kClassMembersInitializerFunction:
return "ClassMembersInitializerFunction";
case FunctionKind::kDefaultBaseConstructor:
diff --git a/deps/v8/src/objects/heap-number-inl.h b/deps/v8/src/objects/heap-number-inl.h
index 78e65ca231..546b16e93d 100644
--- a/deps/v8/src/objects/heap-number-inl.h
+++ b/deps/v8/src/objects/heap-number-inl.h
@@ -7,8 +7,8 @@
#include "src/objects/heap-number.h"
-#include "src/objects/heap-object-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/primitive-heap-object-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -16,7 +16,7 @@
namespace v8 {
namespace internal {
-OBJECT_CONSTRUCTORS_IMPL(HeapNumber, HeapObject)
+OBJECT_CONSTRUCTORS_IMPL(HeapNumber, PrimitiveHeapObject)
CAST_ACCESSOR(HeapNumber)
diff --git a/deps/v8/src/objects/heap-number.h b/deps/v8/src/objects/heap-number.h
index 9063f3d22c..0982cc232e 100644
--- a/deps/v8/src/objects/heap-number.h
+++ b/deps/v8/src/objects/heap-number.h
@@ -5,7 +5,7 @@
#ifndef V8_OBJECTS_HEAP_NUMBER_H_
#define V8_OBJECTS_HEAP_NUMBER_H_
-#include "src/objects/heap-object.h"
+#include "src/objects/primitive-heap-object.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -15,7 +15,7 @@ namespace internal {
// The HeapNumber class describes heap allocated numbers that cannot be
// represented in a Smi (small integer).
-class HeapNumber : public HeapObject {
+class HeapNumber : public PrimitiveHeapObject {
public:
// [value]: number value.
inline double value() const;
@@ -28,7 +28,7 @@ class HeapNumber : public HeapObject {
inline int get_sign();
// Layout description.
- static const int kValueOffset = HeapObject::kHeaderSize;
+ static const int kValueOffset = PrimitiveHeapObject::kHeaderSize;
// IEEE doubles are two 32 bit words. The first is just mantissa, the second
// is a mixture of sign, exponent and mantissa. The offsets of two 32 bit
// words within double numbers are endian dependent and they are set
@@ -59,7 +59,7 @@ class HeapNumber : public HeapObject {
DECL_CAST(HeapNumber)
V8_EXPORT_PRIVATE void HeapNumberPrint(std::ostream& os);
- OBJECT_CONSTRUCTORS(HeapNumber, HeapObject);
+ OBJECT_CONSTRUCTORS(HeapNumber, PrimitiveHeapObject);
};
} // namespace internal
diff --git a/deps/v8/src/objects/instance-type.h b/deps/v8/src/objects/instance-type.h
index 9a855de95b..f993197204 100644
--- a/deps/v8/src/objects/instance-type.h
+++ b/deps/v8/src/objects/instance-type.h
@@ -80,8 +80,8 @@ static inline bool IsShortcutCandidate(int type) {
enum InstanceType : uint16_t {
// String types.
- INTERNALIZED_STRING_TYPE = kTwoByteStringTag | kSeqStringTag |
- kInternalizedTag, // FIRST_PRIMITIVE_TYPE
+ INTERNALIZED_STRING_TYPE =
+ kTwoByteStringTag | kSeqStringTag | kInternalizedTag,
ONE_BYTE_INTERNALIZED_STRING_TYPE =
kOneByteStringTag | kSeqStringTag | kInternalizedTag,
EXTERNAL_INTERNALIZED_STRING_TYPE =
@@ -116,262 +116,41 @@ enum InstanceType : uint16_t {
THIN_ONE_BYTE_STRING_TYPE =
kOneByteStringTag | kThinStringTag | kNotInternalizedTag,
- // Non-string names
- SYMBOL_TYPE =
- 1 + (kIsNotInternalizedMask | kUncachedExternalStringMask |
- kStringEncodingMask |
- kStringRepresentationMask), // FIRST_NONSTRING_TYPE, LAST_NAME_TYPE
-
- // Other primitives (cannot contain non-map-word pointers to heap objects).
- HEAP_NUMBER_TYPE,
- BIGINT_TYPE,
- ODDBALL_TYPE, // LAST_PRIMITIVE_TYPE
-
- // Objects allocated in their own spaces (never in new space).
- MAP_TYPE,
- CODE_TYPE,
-
- // "Data", objects that cannot contain non-map-word pointers to heap
- // objects.
- FOREIGN_TYPE,
- BYTE_ARRAY_TYPE,
- BYTECODE_ARRAY_TYPE,
- FREE_SPACE_TYPE,
- FIXED_DOUBLE_ARRAY_TYPE,
- FEEDBACK_METADATA_TYPE,
- FILLER_TYPE, // LAST_DATA_TYPE
-
- // Structs.
- ACCESS_CHECK_INFO_TYPE,
- ACCESSOR_INFO_TYPE,
- ACCESSOR_PAIR_TYPE,
- ALIASED_ARGUMENTS_ENTRY_TYPE,
- ALLOCATION_MEMENTO_TYPE,
- ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
- ASM_WASM_DATA_TYPE,
- ASYNC_GENERATOR_REQUEST_TYPE,
- CLASS_POSITIONS_TYPE,
- DEBUG_INFO_TYPE,
- ENUM_CACHE_TYPE,
- FUNCTION_TEMPLATE_INFO_TYPE,
- FUNCTION_TEMPLATE_RARE_DATA_TYPE,
- INTERCEPTOR_INFO_TYPE,
- INTERPRETER_DATA_TYPE,
- OBJECT_TEMPLATE_INFO_TYPE,
- PROMISE_CAPABILITY_TYPE,
- PROMISE_REACTION_TYPE,
- PROTOTYPE_INFO_TYPE,
- SCRIPT_TYPE,
- SOURCE_POSITION_TABLE_WITH_FRAME_CACHE_TYPE,
- SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE,
- STACK_FRAME_INFO_TYPE,
- STACK_TRACE_FRAME_TYPE,
- TEMPLATE_OBJECT_DESCRIPTION_TYPE,
- TUPLE2_TYPE,
- TUPLE3_TYPE,
- WASM_CAPI_FUNCTION_DATA_TYPE,
- WASM_DEBUG_INFO_TYPE,
- WASM_EXCEPTION_TAG_TYPE,
- WASM_EXPORTED_FUNCTION_DATA_TYPE,
- WASM_INDIRECT_FUNCTION_TABLE_TYPE,
- WASM_JS_FUNCTION_DATA_TYPE,
-
- CALLABLE_TASK_TYPE, // FIRST_MICROTASK_TYPE
- CALLBACK_TASK_TYPE,
- PROMISE_FULFILL_REACTION_JOB_TASK_TYPE,
- PROMISE_REJECT_REACTION_JOB_TASK_TYPE,
- PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE, // LAST_MICROTASK_TYPE
-
-#define MAKE_TORQUE_INSTANCE_TYPE(V) V,
- TORQUE_DEFINED_INSTANCE_TYPES(MAKE_TORQUE_INSTANCE_TYPE)
+// Most instance types are defined in Torque, with the exception of the string
+// types above. They are ordered by inheritance hierarchy so that we can easily
+// use range checks to determine whether an object is an instance of a subclass
+// of any type. There are a few more constraints specified in the Torque type
+// definitions:
+// - Some instance types are exposed in v8.h, so they are locked to specific
+// values to not unnecessarily change the ABI.
+// - JSSpecialObject and JSCustomElementsObject are aligned with the beginning
+// of the JSObject range, so that we can use a larger range check from
+// FIRST_JS_RECEIVER_TYPE to the end of those ranges and include JSProxy too.
+// - JSFunction is last, meaning we can use a single inequality check to
+// determine whether an instance type is within the range for any class in the
+// inheritance hierarchy of JSFunction. This includes commonly-checked classes
+// JSObject and JSReceiver.
+#define MAKE_TORQUE_INSTANCE_TYPE(TYPE, value) TYPE = value,
+ TORQUE_ASSIGNED_INSTANCE_TYPES(MAKE_TORQUE_INSTANCE_TYPE)
#undef MAKE_TORQUE_INSTANCE_TYPE
- // Modules
- SOURCE_TEXT_MODULE_TYPE, // FIRST_MODULE_TYPE
- SYNTHETIC_MODULE_TYPE, // LAST_MODULE_TYPE
-
- ALLOCATION_SITE_TYPE,
- EMBEDDER_DATA_ARRAY_TYPE,
- // FixedArrays.
- FIXED_ARRAY_TYPE, // FIRST_FIXED_ARRAY_TYPE
- OBJECT_BOILERPLATE_DESCRIPTION_TYPE,
- CLOSURE_FEEDBACK_CELL_ARRAY_TYPE,
- HASH_TABLE_TYPE, // FIRST_HASH_TABLE_TYPE
- ORDERED_HASH_MAP_TYPE,
- ORDERED_HASH_SET_TYPE,
- ORDERED_NAME_DICTIONARY_TYPE,
- NAME_DICTIONARY_TYPE,
- GLOBAL_DICTIONARY_TYPE,
- NUMBER_DICTIONARY_TYPE,
- SIMPLE_NUMBER_DICTIONARY_TYPE,
- STRING_TABLE_TYPE,
- EPHEMERON_HASH_TABLE_TYPE, // LAST_HASH_TABLE_TYPE
- SCOPE_INFO_TYPE,
- SCRIPT_CONTEXT_TABLE_TYPE, // LAST_FIXED_ARRAY_TYPE,
-
- // Contexts.
- AWAIT_CONTEXT_TYPE, // FIRST_CONTEXT_TYPE
- BLOCK_CONTEXT_TYPE,
- CATCH_CONTEXT_TYPE,
- DEBUG_EVALUATE_CONTEXT_TYPE,
- EVAL_CONTEXT_TYPE,
- FUNCTION_CONTEXT_TYPE,
- MODULE_CONTEXT_TYPE,
- NATIVE_CONTEXT_TYPE,
- SCRIPT_CONTEXT_TYPE,
- WITH_CONTEXT_TYPE, // LAST_CONTEXT_TYPE
-
- WEAK_FIXED_ARRAY_TYPE, // FIRST_WEAK_FIXED_ARRAY_TYPE
- TRANSITION_ARRAY_TYPE, // LAST_WEAK_FIXED_ARRAY_TYPE
-
- // Misc.
- CALL_HANDLER_INFO_TYPE,
- CELL_TYPE,
- CODE_DATA_CONTAINER_TYPE,
- DESCRIPTOR_ARRAY_TYPE,
- FEEDBACK_CELL_TYPE,
- FEEDBACK_VECTOR_TYPE,
- LOAD_HANDLER_TYPE,
- PREPARSE_DATA_TYPE,
- PROPERTY_ARRAY_TYPE,
- PROPERTY_CELL_TYPE,
- SHARED_FUNCTION_INFO_TYPE,
- SMALL_ORDERED_HASH_MAP_TYPE,
- SMALL_ORDERED_HASH_SET_TYPE,
- SMALL_ORDERED_NAME_DICTIONARY_TYPE,
- STORE_HANDLER_TYPE,
- UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE,
- UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE,
- WEAK_ARRAY_LIST_TYPE,
- WEAK_CELL_TYPE,
-
- // All the following types are subtypes of JSReceiver, which corresponds to
- // objects in the JS sense. The first and the last type in this range are
- // the two forms of function. This organization enables using the same
- // compares for checking the JS_RECEIVER and the NONCALLABLE_JS_OBJECT range.
- // Some of the following instance types are exposed in v8.h, so to not
- // unnecessarily change the ABI when we introduce new instance types in the
- // future, we leave some space between instance types.
- JS_PROXY_TYPE = 0x0400, // FIRST_JS_RECEIVER_TYPE
- JS_GLOBAL_OBJECT_TYPE, // FIRST_JS_OBJECT_TYPE
- JS_GLOBAL_PROXY_TYPE,
- JS_MODULE_NAMESPACE_TYPE,
- // Like JS_API_OBJECT_TYPE, but requires access checks and/or has
- // interceptors.
- JS_SPECIAL_API_OBJECT_TYPE = 0x0410, // LAST_SPECIAL_RECEIVER_TYPE
- JS_PRIMITIVE_WRAPPER_TYPE, // LAST_CUSTOM_ELEMENTS_RECEIVER
- // Like JS_OBJECT_TYPE, but created from API function.
- JS_API_OBJECT_TYPE = 0x0420,
- JS_OBJECT_TYPE,
- JS_ARGUMENTS_TYPE,
- JS_ARRAY_BUFFER_TYPE,
- JS_ARRAY_ITERATOR_TYPE,
- JS_ARRAY_TYPE,
- JS_ASYNC_FROM_SYNC_ITERATOR_TYPE,
- JS_ASYNC_FUNCTION_OBJECT_TYPE,
- JS_ASYNC_GENERATOR_OBJECT_TYPE,
- JS_CONTEXT_EXTENSION_OBJECT_TYPE,
- JS_DATE_TYPE,
- JS_ERROR_TYPE,
- JS_GENERATOR_OBJECT_TYPE,
- JS_MAP_TYPE,
- JS_MAP_KEY_ITERATOR_TYPE,
- JS_MAP_KEY_VALUE_ITERATOR_TYPE,
- JS_MAP_VALUE_ITERATOR_TYPE,
- JS_MESSAGE_OBJECT_TYPE,
- JS_PROMISE_TYPE,
- JS_REGEXP_TYPE,
- JS_REGEXP_STRING_ITERATOR_TYPE,
- JS_SET_TYPE,
- JS_SET_KEY_VALUE_ITERATOR_TYPE,
- JS_SET_VALUE_ITERATOR_TYPE,
- JS_STRING_ITERATOR_TYPE,
- JS_WEAK_REF_TYPE,
- JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_TYPE,
- JS_FINALIZATION_GROUP_TYPE,
- JS_WEAK_MAP_TYPE,
- JS_WEAK_SET_TYPE,
-
- JS_TYPED_ARRAY_TYPE,
- JS_DATA_VIEW_TYPE,
-
-#ifdef V8_INTL_SUPPORT
- JS_INTL_V8_BREAK_ITERATOR_TYPE,
- JS_INTL_COLLATOR_TYPE,
- JS_INTL_DATE_TIME_FORMAT_TYPE,
- JS_INTL_LIST_FORMAT_TYPE,
- JS_INTL_LOCALE_TYPE,
- JS_INTL_NUMBER_FORMAT_TYPE,
- JS_INTL_PLURAL_RULES_TYPE,
- JS_INTL_RELATIVE_TIME_FORMAT_TYPE,
- JS_INTL_SEGMENT_ITERATOR_TYPE,
- JS_INTL_SEGMENTER_TYPE,
-#endif // V8_INTL_SUPPORT
-
- WASM_EXCEPTION_TYPE,
- WASM_GLOBAL_TYPE,
- WASM_INSTANCE_TYPE,
- WASM_MEMORY_TYPE,
- WASM_MODULE_TYPE,
- WASM_TABLE_TYPE,
- JS_BOUND_FUNCTION_TYPE,
- JS_FUNCTION_TYPE, // LAST_JS_OBJECT_TYPE, LAST_JS_RECEIVER_TYPE
-
// Pseudo-types
- FIRST_TYPE = 0x0,
- LAST_TYPE = JS_FUNCTION_TYPE,
- FIRST_STRING_TYPE = FIRST_TYPE,
- FIRST_NAME_TYPE = FIRST_STRING_TYPE,
- LAST_NAME_TYPE = SYMBOL_TYPE,
FIRST_UNIQUE_NAME_TYPE = INTERNALIZED_STRING_TYPE,
LAST_UNIQUE_NAME_TYPE = SYMBOL_TYPE,
FIRST_NONSTRING_TYPE = SYMBOL_TYPE,
- FIRST_PRIMITIVE_TYPE = FIRST_NAME_TYPE,
- LAST_PRIMITIVE_TYPE = ODDBALL_TYPE,
- FIRST_FUNCTION_TYPE = JS_BOUND_FUNCTION_TYPE,
- LAST_FUNCTION_TYPE = JS_FUNCTION_TYPE,
- // Boundaries for testing if given HeapObject is a subclass of FixedArray.
- FIRST_FIXED_ARRAY_TYPE = FIXED_ARRAY_TYPE,
- LAST_FIXED_ARRAY_TYPE = SCRIPT_CONTEXT_TABLE_TYPE,
- // Boundaries for testing if given HeapObject is a subclass of HashTable
- FIRST_HASH_TABLE_TYPE = HASH_TABLE_TYPE,
- LAST_HASH_TABLE_TYPE = EPHEMERON_HASH_TABLE_TYPE,
- // Boundaries for testing if given HeapObject is a subclass of WeakFixedArray.
- FIRST_WEAK_FIXED_ARRAY_TYPE = WEAK_FIXED_ARRAY_TYPE,
- LAST_WEAK_FIXED_ARRAY_TYPE = TRANSITION_ARRAY_TYPE,
- // Boundaries for testing if given HeapObject is a Context
- FIRST_CONTEXT_TYPE = AWAIT_CONTEXT_TYPE,
- LAST_CONTEXT_TYPE = WITH_CONTEXT_TYPE,
- // Boundaries for testing if given HeapObject is a subclass of Microtask.
- FIRST_MICROTASK_TYPE = CALLABLE_TASK_TYPE,
- LAST_MICROTASK_TYPE = PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE,
- // Boundaries of module record types
- FIRST_MODULE_TYPE = SOURCE_TEXT_MODULE_TYPE,
- LAST_MODULE_TYPE = SYNTHETIC_MODULE_TYPE,
- // Boundary for promotion to old space.
- LAST_DATA_TYPE = FILLER_TYPE,
- // Boundary for objects represented as JSReceiver (i.e. JSObject or JSProxy).
- // Note that there is no range for JSObject or JSProxy, since their subtypes
- // are not continuous in this enum! The enum ranges instead reflect the
- // external class names, where proxies are treated as either ordinary objects,
- // or functions.
- FIRST_JS_RECEIVER_TYPE = JS_PROXY_TYPE,
- LAST_JS_RECEIVER_TYPE = LAST_TYPE,
- // Boundaries for testing the types represented as JSObject
- FIRST_JS_OBJECT_TYPE = JS_GLOBAL_OBJECT_TYPE,
- LAST_JS_OBJECT_TYPE = LAST_TYPE,
// Boundary for testing JSReceivers that need special property lookup handling
- LAST_SPECIAL_RECEIVER_TYPE = JS_SPECIAL_API_OBJECT_TYPE,
+ LAST_SPECIAL_RECEIVER_TYPE = LAST_JS_SPECIAL_OBJECT_TYPE,
// Boundary case for testing JSReceivers that may have elements while having
// an empty fixed array as elements backing store. This is true for string
// wrappers.
- LAST_CUSTOM_ELEMENTS_RECEIVER = JS_PRIMITIVE_WRAPPER_TYPE,
-
- FIRST_SET_ITERATOR_TYPE = JS_SET_KEY_VALUE_ITERATOR_TYPE,
- LAST_SET_ITERATOR_TYPE = JS_SET_VALUE_ITERATOR_TYPE,
-
- FIRST_MAP_ITERATOR_TYPE = JS_MAP_KEY_ITERATOR_TYPE,
- LAST_MAP_ITERATOR_TYPE = JS_MAP_VALUE_ITERATOR_TYPE,
+ LAST_CUSTOM_ELEMENTS_RECEIVER = LAST_JS_CUSTOM_ELEMENTS_OBJECT_TYPE,
+
+ // Convenient names for things where the generated name is awkward:
+ FIRST_TYPE = FIRST_HEAP_OBJECT_TYPE,
+ LAST_TYPE = LAST_HEAP_OBJECT_TYPE,
+ FIRST_FUNCTION_TYPE = FIRST_JS_FUNCTION_OR_BOUND_FUNCTION_TYPE,
+ LAST_FUNCTION_TYPE = LAST_JS_FUNCTION_OR_BOUND_FUNCTION_TYPE,
+ BIGINT_TYPE = BIG_INT_BASE_TYPE,
};
// This constant is defined outside of the InstanceType enum because the
@@ -389,6 +168,40 @@ STATIC_ASSERT(FIRST_NONSTRING_TYPE == Internals::kFirstNonstringType);
STATIC_ASSERT(ODDBALL_TYPE == Internals::kOddballType);
STATIC_ASSERT(FOREIGN_TYPE == Internals::kForeignType);
+// Verify that string types are all less than other types.
+#define CHECK_STRING_RANGE(TYPE, ...) \
+ STATIC_ASSERT(TYPE < FIRST_NONSTRING_TYPE);
+STRING_TYPE_LIST(CHECK_STRING_RANGE)
+#undef CHECK_STRING_RANGE
+#define CHECK_NONSTRING_RANGE(TYPE) STATIC_ASSERT(TYPE >= FIRST_NONSTRING_TYPE);
+TORQUE_ASSIGNED_INSTANCE_TYPE_LIST(CHECK_NONSTRING_RANGE)
+#undef CHECK_NONSTRING_RANGE
+
+// Two ranges don't cleanly follow the inheritance hierarchy. Here we ensure
+// that only expected types fall within these ranges.
+// - From FIRST_JS_RECEIVER_TYPE to LAST_SPECIAL_RECEIVER_TYPE should correspond
+// to the union type JSProxy | JSSpecialObject.
+// - From FIRST_JS_RECEIVER_TYPE to LAST_CUSTOM_ELEMENTS_RECEIVER should
+// correspond to the union type JSProxy | JSCustomElementsObject.
+// Note in particular that these ranges include all subclasses of JSReceiver
+// that are not also subclasses of JSObject (currently only JSProxy).
+#define CHECK_INSTANCE_TYPE(TYPE) \
+ STATIC_ASSERT((TYPE >= FIRST_JS_RECEIVER_TYPE && \
+ TYPE <= LAST_SPECIAL_RECEIVER_TYPE) == \
+ (TYPE == JS_PROXY_TYPE || TYPE == JS_GLOBAL_OBJECT_TYPE || \
+ TYPE == JS_GLOBAL_PROXY_TYPE || \
+ TYPE == JS_MODULE_NAMESPACE_TYPE || \
+ TYPE == JS_SPECIAL_API_OBJECT_TYPE)); \
+ STATIC_ASSERT((TYPE >= FIRST_JS_RECEIVER_TYPE && \
+ TYPE <= LAST_CUSTOM_ELEMENTS_RECEIVER) == \
+ (TYPE == JS_PROXY_TYPE || TYPE == JS_GLOBAL_OBJECT_TYPE || \
+ TYPE == JS_GLOBAL_PROXY_TYPE || \
+ TYPE == JS_MODULE_NAMESPACE_TYPE || \
+ TYPE == JS_SPECIAL_API_OBJECT_TYPE || \
+ TYPE == JS_PRIMITIVE_WRAPPER_TYPE));
+TORQUE_ASSIGNED_INSTANCE_TYPE_LIST(CHECK_INSTANCE_TYPE)
+#undef CHECK_INSTANCE_TYPE
+
// Make sure it doesn't matter whether we sign-extend or zero-extend these
// values, because Torque treats InstanceType as signed.
STATIC_ASSERT(LAST_TYPE < 1 << 15);
@@ -424,8 +237,8 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
V(FreeSpace, FREE_SPACE_TYPE) \
V(GlobalDictionary, GLOBAL_DICTIONARY_TYPE) \
V(HeapNumber, HEAP_NUMBER_TYPE) \
- V(JSArgumentsObject, JS_ARGUMENTS_TYPE) \
- V(JSArgumentsObjectWithLength, JS_ARGUMENTS_TYPE) \
+ V(JSArgumentsObject, JS_ARGUMENTS_OBJECT_TYPE) \
+ V(JSArgumentsObjectWithLength, JS_ARGUMENTS_OBJECT_TYPE) \
V(JSArray, JS_ARRAY_TYPE) \
V(JSArrayBuffer, JS_ARRAY_BUFFER_TYPE) \
V(JSArrayIterator, JS_ARRAY_ITERATOR_TYPE) \
@@ -449,9 +262,10 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
V(JSPrimitiveWrapper, JS_PRIMITIVE_WRAPPER_TYPE) \
V(JSPromise, JS_PROMISE_TYPE) \
V(JSProxy, JS_PROXY_TYPE) \
- V(JSRegExp, JS_REGEXP_TYPE) \
+ V(JSRegExp, JS_REG_EXP_TYPE) \
V(JSRegExpResult, JS_ARRAY_TYPE) \
- V(JSRegExpStringIterator, JS_REGEXP_STRING_ITERATOR_TYPE) \
+ V(JSRegExpResultIndices, JS_ARRAY_TYPE) \
+ V(JSRegExpStringIterator, JS_REG_EXP_STRING_ITERATOR_TYPE) \
V(JSSet, JS_SET_TYPE) \
V(JSStringIterator, JS_STRING_ITERATOR_TYPE) \
V(JSTypedArray, JS_TYPED_ARRAY_TYPE) \
@@ -487,28 +301,28 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
V(UncompiledDataWithoutPreparseData, \
UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE) \
V(UncompiledDataWithPreparseData, UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE) \
- V(WasmExceptionObject, WASM_EXCEPTION_TYPE) \
- V(WasmGlobalObject, WASM_GLOBAL_TYPE) \
- V(WasmInstanceObject, WASM_INSTANCE_TYPE) \
- V(WasmMemoryObject, WASM_MEMORY_TYPE) \
- V(WasmModuleObject, WASM_MODULE_TYPE) \
- V(WasmTableObject, WASM_TABLE_TYPE) \
+ V(WasmExceptionObject, WASM_EXCEPTION_OBJECT_TYPE) \
+ V(WasmGlobalObject, WASM_GLOBAL_OBJECT_TYPE) \
+ V(WasmInstanceObject, WASM_INSTANCE_OBJECT_TYPE) \
+ V(WasmMemoryObject, WASM_MEMORY_OBJECT_TYPE) \
+ V(WasmModuleObject, WASM_MODULE_OBJECT_TYPE) \
+ V(WasmTableObject, WASM_TABLE_OBJECT_TYPE) \
V(WeakArrayList, WEAK_ARRAY_LIST_TYPE) \
V(WeakCell, WEAK_CELL_TYPE)
#ifdef V8_INTL_SUPPORT
-#define INSTANCE_TYPE_CHECKERS_SINGLE(V) \
- INSTANCE_TYPE_CHECKERS_SINGLE_BASE(V) \
- V(JSV8BreakIterator, JS_INTL_V8_BREAK_ITERATOR_TYPE) \
- V(JSCollator, JS_INTL_COLLATOR_TYPE) \
- V(JSDateTimeFormat, JS_INTL_DATE_TIME_FORMAT_TYPE) \
- V(JSListFormat, JS_INTL_LIST_FORMAT_TYPE) \
- V(JSLocale, JS_INTL_LOCALE_TYPE) \
- V(JSNumberFormat, JS_INTL_NUMBER_FORMAT_TYPE) \
- V(JSPluralRules, JS_INTL_PLURAL_RULES_TYPE) \
- V(JSRelativeTimeFormat, JS_INTL_RELATIVE_TIME_FORMAT_TYPE) \
- V(JSSegmentIterator, JS_INTL_SEGMENT_ITERATOR_TYPE) \
- V(JSSegmenter, JS_INTL_SEGMENTER_TYPE)
+#define INSTANCE_TYPE_CHECKERS_SINGLE(V) \
+ INSTANCE_TYPE_CHECKERS_SINGLE_BASE(V) \
+ V(JSV8BreakIterator, JS_V8_BREAK_ITERATOR_TYPE) \
+ V(JSCollator, JS_COLLATOR_TYPE) \
+ V(JSDateTimeFormat, JS_DATE_TIME_FORMAT_TYPE) \
+ V(JSListFormat, JS_LIST_FORMAT_TYPE) \
+ V(JSLocale, JS_LOCALE_TYPE) \
+ V(JSNumberFormat, JS_NUMBER_FORMAT_TYPE) \
+ V(JSPluralRules, JS_PLURAL_RULES_TYPE) \
+ V(JSRelativeTimeFormat, JS_RELATIVE_TIME_FORMAT_TYPE) \
+ V(JSSegmentIterator, JS_SEGMENT_ITERATOR_TYPE) \
+ V(JSSegmenter, JS_SEGMENTER_TYPE)
#else
@@ -516,16 +330,23 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
#endif // V8_INTL_SUPPORT
-#define INSTANCE_TYPE_CHECKERS_RANGE(V) \
- V(Context, FIRST_CONTEXT_TYPE, LAST_CONTEXT_TYPE) \
- V(FixedArray, FIRST_FIXED_ARRAY_TYPE, LAST_FIXED_ARRAY_TYPE) \
- V(HashTable, FIRST_HASH_TABLE_TYPE, LAST_HASH_TABLE_TYPE) \
- V(JSMapIterator, FIRST_MAP_ITERATOR_TYPE, LAST_MAP_ITERATOR_TYPE) \
- V(JSSetIterator, FIRST_SET_ITERATOR_TYPE, LAST_SET_ITERATOR_TYPE) \
- V(Microtask, FIRST_MICROTASK_TYPE, LAST_MICROTASK_TYPE) \
- V(Module, FIRST_MODULE_TYPE, LAST_MODULE_TYPE) \
- V(Name, FIRST_NAME_TYPE, LAST_NAME_TYPE) \
- V(String, FIRST_STRING_TYPE, LAST_STRING_TYPE) \
+#define INSTANCE_TYPE_CHECKERS_RANGE(V) \
+ V(Context, FIRST_CONTEXT_TYPE, LAST_CONTEXT_TYPE) \
+ V(FixedArray, FIRST_FIXED_ARRAY_TYPE, LAST_FIXED_ARRAY_TYPE) \
+ V(HashTable, FIRST_HASH_TABLE_TYPE, LAST_HASH_TABLE_TYPE) \
+ V(JSCustomElementsObject, FIRST_JS_CUSTOM_ELEMENTS_OBJECT_TYPE, \
+ LAST_JS_CUSTOM_ELEMENTS_OBJECT_TYPE) \
+ V(JSFunctionOrBoundFunction, FIRST_FUNCTION_TYPE, LAST_FUNCTION_TYPE) \
+ V(JSMapIterator, FIRST_JS_MAP_ITERATOR_TYPE, LAST_JS_MAP_ITERATOR_TYPE) \
+ V(JSSetIterator, FIRST_JS_SET_ITERATOR_TYPE, LAST_JS_SET_ITERATOR_TYPE) \
+ V(JSSpecialObject, FIRST_JS_SPECIAL_OBJECT_TYPE, \
+ LAST_JS_SPECIAL_OBJECT_TYPE) \
+ V(Microtask, FIRST_MICROTASK_TYPE, LAST_MICROTASK_TYPE) \
+ V(Module, FIRST_MODULE_TYPE, LAST_MODULE_TYPE) \
+ V(Name, FIRST_NAME_TYPE, LAST_NAME_TYPE) \
+ V(PrimitiveHeapObject, FIRST_PRIMITIVE_HEAP_OBJECT_TYPE, \
+ LAST_PRIMITIVE_HEAP_OBJECT_TYPE) \
+ V(String, FIRST_STRING_TYPE, LAST_STRING_TYPE) \
V(WeakFixedArray, FIRST_WEAK_FIXED_ARRAY_TYPE, LAST_WEAK_FIXED_ARRAY_TYPE)
#define INSTANCE_TYPE_CHECKERS_CUSTOM(V) \
diff --git a/deps/v8/src/objects/internal-index.h b/deps/v8/src/objects/internal-index.h
new file mode 100644
index 0000000000..ce7378a901
--- /dev/null
+++ b/deps/v8/src/objects/internal-index.h
@@ -0,0 +1,79 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_INTERNAL_INDEX_H_
+#define V8_OBJECTS_INTERNAL_INDEX_H_
+
+#include <stdint.h>
+
+#include <limits>
+
+#include "src/base/logging.h"
+
+namespace v8 {
+namespace internal {
+
+// Simple wrapper around an entry (which is notably different from "index" for
+// dictionary backing stores). Most code should treat this as an opaque
+// wrapper: get it via GetEntryForIndex, pass it on to consumers.
+class InternalIndex {
+ public:
+ explicit InternalIndex(size_t raw) : entry_(raw) {}
+ static InternalIndex NotFound() { return InternalIndex(kNotFound); }
+
+ InternalIndex adjust_down(size_t subtract) {
+ DCHECK_GE(entry_, subtract);
+ return InternalIndex(entry_ - subtract);
+ }
+ InternalIndex adjust_up(size_t add) {
+ DCHECK_LT(entry_, std::numeric_limits<size_t>::max() - add);
+ return InternalIndex(entry_ + add);
+ }
+
+ bool is_found() const { return entry_ != kNotFound; }
+ bool is_not_found() const { return entry_ == kNotFound; }
+
+ size_t raw_value() const { return entry_; }
+ uint32_t as_uint32() const {
+ DCHECK_LE(entry_, std::numeric_limits<uint32_t>::max());
+ return static_cast<uint32_t>(entry_);
+ }
+ int as_int() const {
+ DCHECK(entry_ >= 0 && entry_ <= std::numeric_limits<int>::max());
+ return static_cast<int>(entry_);
+ }
+
+ bool operator==(const InternalIndex& other) { return entry_ == other.entry_; }
+
+ // Iteration support.
+ InternalIndex operator*() { return *this; }
+ bool operator!=(const InternalIndex& other) { return entry_ != other.entry_; }
+ InternalIndex& operator++() {
+ entry_++;
+ return *this;
+ }
+
+ class Range {
+ public:
+ explicit Range(size_t max) : min_(0), max_(max) {}
+ Range(size_t min, size_t max) : min_(min), max_(max) {}
+
+ InternalIndex begin() { return InternalIndex(min_); }
+ InternalIndex end() { return InternalIndex(max_); }
+
+ private:
+ size_t min_;
+ size_t max_;
+ };
+
+ private:
+ static const size_t kNotFound = std::numeric_limits<size_t>::max();
+
+ size_t entry_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_INTERNAL_INDEX_H_
diff --git a/deps/v8/src/objects/intl-objects.cc b/deps/v8/src/objects/intl-objects.cc
index dbf212aaf8..a6a2fdd229 100644
--- a/deps/v8/src/objects/intl-objects.cc
+++ b/deps/v8/src/objects/intl-objects.cc
@@ -20,6 +20,7 @@
#include "src/objects/js-collator-inl.h"
#include "src/objects/js-date-time-format-inl.h"
#include "src/objects/js-locale-inl.h"
+#include "src/objects/js-locale.h"
#include "src/objects/js-number-format-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/property-descriptor.h"
@@ -32,6 +33,7 @@
#include "unicode/datefmt.h"
#include "unicode/decimfmt.h"
#include "unicode/formattedvalue.h"
+#include "unicode/localebuilder.h"
#include "unicode/locid.h"
#include "unicode/normalizer2.h"
#include "unicode/numberformatter.h"
@@ -177,12 +179,13 @@ const UChar* GetUCharBufferFromFlat(const String::FlatContent& flat,
template <typename T>
MaybeHandle<T> New(Isolate* isolate, Handle<JSFunction> constructor,
- Handle<Object> locales, Handle<Object> options) {
+ Handle<Object> locales, Handle<Object> options,
+ const char* method) {
Handle<Map> map;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, map,
JSFunction::GetDerivedMap(isolate, constructor, constructor), T);
- return T::New(isolate, map, locales, options);
+ return T::New(isolate, map, locales, options, method);
}
} // namespace
@@ -783,6 +786,11 @@ Maybe<std::string> Intl::CanonicalizeLanguageTag(Isolate* isolate,
}
std::string locale(locale_str->ToCString().get());
+ if (!IsStructurallyValidLanguageTag(locale)) {
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate, NewRangeError(MessageTemplate::kLocaleBadParameters),
+ Nothing<std::string>());
+ }
return Intl::CanonicalizeLanguageTag(isolate, locale);
}
@@ -995,11 +1003,9 @@ MaybeHandle<String> Intl::StringLocaleConvertCase(Isolate* isolate,
}
}
-MaybeHandle<Object> Intl::StringLocaleCompare(Isolate* isolate,
- Handle<String> string1,
- Handle<String> string2,
- Handle<Object> locales,
- Handle<Object> options) {
+MaybeHandle<Object> Intl::StringLocaleCompare(
+ Isolate* isolate, Handle<String> string1, Handle<String> string2,
+ Handle<Object> locales, Handle<Object> options, const char* method) {
// We only cache the instance when both locales and options are undefined,
// as that is the only case when the specified side-effects of examining
// those arguments are unobservable.
@@ -1025,7 +1031,7 @@ MaybeHandle<Object> Intl::StringLocaleCompare(Isolate* isolate,
Handle<JSCollator> collator;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, collator,
- New<JSCollator>(isolate, constructor, locales, options), Object);
+ New<JSCollator>(isolate, constructor, locales, options, method), Object);
if (can_cache) {
isolate->set_icu_object_in_cache(
Isolate::ICUObjectCacheType::kDefaultCollator,
@@ -1084,15 +1090,11 @@ Handle<Object> Intl::CompareStrings(Isolate* isolate,
MaybeHandle<String> Intl::NumberToLocaleString(Isolate* isolate,
Handle<Object> num,
Handle<Object> locales,
- Handle<Object> options) {
+ Handle<Object> options,
+ const char* method) {
Handle<Object> numeric_obj;
- if (FLAG_harmony_intl_bigint) {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, numeric_obj,
- Object::ToNumeric(isolate, num), String);
- } else {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, numeric_obj,
- Object::ToNumber(isolate, num), String);
- }
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, numeric_obj,
+ Object::ToNumeric(isolate, num), String);
// We only cache the instance when both locales and options are undefined,
// as that is the only case when the specified side-effects of examining
@@ -1119,7 +1121,8 @@ MaybeHandle<String> Intl::NumberToLocaleString(Isolate* isolate,
// 2. Let numberFormat be ? Construct(%NumberFormat%, « locales, options »).
ASSIGN_RETURN_ON_EXCEPTION(
isolate, number_format,
- New<JSNumberFormat>(isolate, constructor, locales, options), String);
+ New<JSNumberFormat>(isolate, constructor, locales, options, method),
+ String);
if (can_cache) {
isolate->set_icu_object_in_cache(
@@ -1203,40 +1206,18 @@ Maybe<Intl::NumberFormatDigitOptions> Intl::SetNumberFormatDigitOptions(
int mxfd = 0;
Handle<Object> mnfd_obj;
Handle<Object> mxfd_obj;
- if (FLAG_harmony_intl_numberformat_unified) {
- // 6. Let mnfd be ? Get(options, "minimumFractionDigits").
- Handle<String> mnfd_str = factory->minimumFractionDigits_string();
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, mnfd_obj, JSReceiver::GetProperty(isolate, options, mnfd_str),
- Nothing<NumberFormatDigitOptions>());
-
- // 8. Let mnfd be ? Get(options, "maximumFractionDigits").
- Handle<String> mxfd_str = factory->maximumFractionDigits_string();
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, mxfd_obj, JSReceiver::GetProperty(isolate, options, mxfd_str),
- Nothing<NumberFormatDigitOptions>());
- } else {
- // 6. Let mnfd be ? GetNumberOption(options, "minimumFractionDigits", 0, 20,
- // mnfdDefault).
- if (!Intl::GetNumberOption(isolate, options,
- factory->minimumFractionDigits_string(), 0, 20,
- mnfd_default)
- .To(&mnfd)) {
- return Nothing<NumberFormatDigitOptions>();
- }
- // 7. Let mxfdActualDefault be max( mnfd, mxfdDefault ).
- int mxfd_actual_default = std::max(mnfd, mxfd_default);
+ // 6. Let mnfd be ? Get(options, "minimumFractionDigits").
+ Handle<String> mnfd_str = factory->minimumFractionDigits_string();
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, mnfd_obj, JSReceiver::GetProperty(isolate, options, mnfd_str),
+ Nothing<NumberFormatDigitOptions>());
- // 8. Let mxfd be ? GetNumberOption(options,
- // "maximumFractionDigits", mnfd, 20, mxfdActualDefault).
- if (!Intl::GetNumberOption(isolate, options,
- factory->maximumFractionDigits_string(), mnfd,
- 20, mxfd_actual_default)
- .To(&mxfd)) {
- return Nothing<NumberFormatDigitOptions>();
- }
- }
+ // 8. Let mxfd be ? Get(options, "maximumFractionDigits").
+ Handle<String> mxfd_str = factory->maximumFractionDigits_string();
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, mxfd_obj, JSReceiver::GetProperty(isolate, options, mxfd_str),
+ Nothing<NumberFormatDigitOptions>());
// 9. Let mnsd be ? Get(options, "minimumSignificantDigits").
Handle<Object> mnsd_obj;
@@ -1285,47 +1266,44 @@ Maybe<Intl::NumberFormatDigitOptions> Intl::SetNumberFormatDigitOptions(
digit_options.minimum_significant_digits = 0;
digit_options.maximum_significant_digits = 0;
- if (FLAG_harmony_intl_numberformat_unified) {
- // 15. Else If mnfd is not undefined or mxfd is not undefined, then
- if (!mnfd_obj->IsUndefined(isolate) || !mxfd_obj->IsUndefined(isolate)) {
- // 15. b. Let mnfd be ? DefaultNumberOption(mnfd, 0, 20, mnfdDefault).
- Handle<String> mnfd_str = factory->minimumFractionDigits_string();
- if (!DefaultNumberOption(isolate, mnfd_obj, 0, 20, mnfd_default,
- mnfd_str)
- .To(&mnfd)) {
- return Nothing<NumberFormatDigitOptions>();
- }
-
- // 15. c. Let mxfdActualDefault be max( mnfd, mxfdDefault ).
- int mxfd_actual_default = std::max(mnfd, mxfd_default);
+ // 15. Else If mnfd is not undefined or mxfd is not undefined, then
+ if (!mnfd_obj->IsUndefined(isolate) || !mxfd_obj->IsUndefined(isolate)) {
+ // 15. b. Let mnfd be ? DefaultNumberOption(mnfd, 0, 20, mnfdDefault).
+ Handle<String> mnfd_str = factory->minimumFractionDigits_string();
+ if (!DefaultNumberOption(isolate, mnfd_obj, 0, 20, mnfd_default, mnfd_str)
+ .To(&mnfd)) {
+ return Nothing<NumberFormatDigitOptions>();
+ }
- // 15. d. Let mxfd be ? DefaultNumberOption(mxfd, mnfd, 20,
- // mxfdActualDefault).
- Handle<String> mxfd_str = factory->maximumFractionDigits_string();
- if (!DefaultNumberOption(isolate, mxfd_obj, mnfd, 20,
- mxfd_actual_default, mxfd_str)
- .To(&mxfd)) {
- return Nothing<NumberFormatDigitOptions>();
- }
- // 15. e. Set intlObj.[[MinimumFractionDigits]] to mnfd.
- digit_options.minimum_fraction_digits = mnfd;
-
- // 15. f. Set intlObj.[[MaximumFractionDigits]] to mxfd.
- digit_options.maximum_fraction_digits = mxfd;
- // Else If intlObj.[[Notation]] is "compact", then
- } else if (notation_is_compact) {
- // a. Set intlObj.[[RoundingType]] to "compact-rounding".
- // Set minimum_significant_digits to -1 to represent roundingtype is
- // "compact-rounding".
- digit_options.minimum_significant_digits = -1;
- // 17. Else,
- } else {
- // 17. b. Set intlObj.[[MinimumFractionDigits]] to mnfdDefault.
- digit_options.minimum_fraction_digits = mnfd_default;
+ // 15. c. Let mxfdActualDefault be max( mnfd, mxfdDefault ).
+ int mxfd_actual_default = std::max(mnfd, mxfd_default);
- // 17. c. Set intlObj.[[MaximumFractionDigits]] to mxfdDefault.
- digit_options.maximum_fraction_digits = mxfd_default;
+ // 15. d. Let mxfd be ? DefaultNumberOption(mxfd, mnfd, 20,
+ // mxfdActualDefault).
+ Handle<String> mxfd_str = factory->maximumFractionDigits_string();
+ if (!DefaultNumberOption(isolate, mxfd_obj, mnfd, 20, mxfd_actual_default,
+ mxfd_str)
+ .To(&mxfd)) {
+ return Nothing<NumberFormatDigitOptions>();
}
+ // 15. e. Set intlObj.[[MinimumFractionDigits]] to mnfd.
+ digit_options.minimum_fraction_digits = mnfd;
+
+ // 15. f. Set intlObj.[[MaximumFractionDigits]] to mxfd.
+ digit_options.maximum_fraction_digits = mxfd;
+ // Else If intlObj.[[Notation]] is "compact", then
+ } else if (notation_is_compact) {
+ // a. Set intlObj.[[RoundingType]] to "compact-rounding".
+ // Set minimum_significant_digits to -1 to represent roundingtype is
+ // "compact-rounding".
+ digit_options.minimum_significant_digits = -1;
+ // 17. Else,
+ } else {
+ // 17. b. Set intlObj.[[MinimumFractionDigits]] to mnfdDefault.
+ digit_options.minimum_fraction_digits = mnfd_default;
+
+ // 17. c. Set intlObj.[[MaximumFractionDigits]] to mxfdDefault.
+ digit_options.maximum_fraction_digits = mxfd_default;
}
}
return Just(digit_options);
@@ -1605,14 +1583,16 @@ bool IsValidCollation(const icu::Locale& locale, const std::string& value) {
} // namespace
+bool Intl::IsWellFormedCalendar(const std::string& value) {
+ return JSLocale::Is38AlphaNumList(value);
+}
+
bool Intl::IsValidCalendar(const icu::Locale& locale,
const std::string& value) {
return IsValidExtension<icu::Calendar>(locale, "calendar", value);
}
-namespace {
-
-bool IsValidNumberingSystem(const std::string& value) {
+bool Intl::IsValidNumberingSystem(const std::string& value) {
std::set<std::string> invalid_values = {"native", "traditio", "finance"};
if (invalid_values.find(value) != invalid_values.end()) return false;
UErrorCode status = U_ZERO_ERROR;
@@ -1621,11 +1601,19 @@ bool IsValidNumberingSystem(const std::string& value) {
return U_SUCCESS(status) && numbering_system.get() != nullptr;
}
+namespace {
+
+bool IsWellFormedNumberingSystem(const std::string& value) {
+ return JSLocale::Is38AlphaNumList(value);
+}
+
std::map<std::string, std::string> LookupAndValidateUnicodeExtensions(
icu::Locale* icu_locale, const std::set<std::string>& relevant_keys) {
std::map<std::string, std::string> extensions;
UErrorCode status = U_ZERO_ERROR;
+ icu::LocaleBuilder builder;
+ builder.setLocale(*icu_locale).clearExtensions();
std::unique_ptr<icu::StringEnumeration> keywords(
icu_locale->createKeywords(status));
if (U_FAILURE(status)) return extensions;
@@ -1682,20 +1670,19 @@ std::map<std::string, std::string> LookupAndValidateUnicodeExtensions(
std::set<std::string> valid_values = {"upper", "lower", "false"};
is_valid_value = valid_values.find(bcp47_value) != valid_values.end();
} else if (strcmp("nu", bcp47_key) == 0) {
- is_valid_value = IsValidNumberingSystem(bcp47_value);
+ is_valid_value = Intl::IsValidNumberingSystem(bcp47_value);
}
if (is_valid_value) {
extensions.insert(
std::pair<std::string, std::string>(bcp47_key, bcp47_value));
- continue;
+ builder.setUnicodeLocaleKeyword(bcp47_key, bcp47_value);
}
}
- status = U_ZERO_ERROR;
- icu_locale->setUnicodeKeywordValue(
- bcp47_key == nullptr ? keyword : bcp47_key, nullptr, status);
- CHECK(U_SUCCESS(status));
}
+ status = U_ZERO_ERROR;
+ *icu_locale = builder.build(status);
+
return extensions;
}
@@ -2003,7 +1990,7 @@ Maybe<bool> Intl::GetNumberingSystem(Isolate* isolate,
empty_values, method, result);
MAYBE_RETURN(maybe, Nothing<bool>());
if (maybe.FromJust() && *result != nullptr) {
- if (!IsValidNumberingSystem(result->get())) {
+ if (!IsWellFormedNumberingSystem(result->get())) {
THROW_NEW_ERROR_RETURN_VALUE(
isolate,
NewRangeError(
@@ -2120,5 +2107,9 @@ MaybeHandle<String> Intl::FormattedToString(
return Intl::ToString(isolate, result);
}
+bool Intl::IsStructurallyValidLanguageTag(const std::string& tag) {
+ return JSLocale::StartsWithUnicodeLanguageId(tag);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/intl-objects.h b/deps/v8/src/objects/intl-objects.h
index 4d4d3245fd..0c4a77b745 100644
--- a/deps/v8/src/objects/intl-objects.h
+++ b/deps/v8/src/objects/intl-objects.h
@@ -10,6 +10,7 @@
#define V8_OBJECTS_INTL_OBJECTS_H_
#include <map>
+#include <memory>
#include <set>
#include <string>
@@ -164,7 +165,7 @@ class Intl {
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> StringLocaleCompare(
Isolate* isolate, Handle<String> s1, Handle<String> s2,
- Handle<Object> locales, Handle<Object> options);
+ Handle<Object> locales, Handle<Object> options, const char* method);
V8_WARN_UNUSED_RESULT static Handle<Object> CompareStrings(
Isolate* isolate, const icu::Collator& collator, Handle<String> s1,
@@ -173,7 +174,7 @@ class Intl {
// ecma402/#sup-properties-of-the-number-prototype-object
V8_WARN_UNUSED_RESULT static MaybeHandle<String> NumberToLocaleString(
Isolate* isolate, Handle<Object> num, Handle<Object> locales,
- Handle<Object> options);
+ Handle<Object> options, const char* method);
// ecma402/#sec-setnfdigitoptions
struct NumberFormatDigitOptions {
@@ -239,14 +240,14 @@ class Intl {
Handle<JSFunction> constructor, bool has_initialized_slot);
// enum for "caseFirst" option: shared by Intl.Locale and Intl.Collator.
- enum class CaseFirst { kUpper, kLower, kFalse, kUndefined };
+ enum class CaseFirst { kUndefined, kUpper, kLower, kFalse };
// Shared function to read the "caseFirst" option.
V8_WARN_UNUSED_RESULT static Maybe<CaseFirst> GetCaseFirst(
Isolate* isolate, Handle<JSReceiver> options, const char* method);
// enum for "hourCycle" option: shared by Intl.Locale and Intl.DateTimeFormat.
- enum class HourCycle { kH11, kH12, kH23, kH24, kUndefined };
+ enum class HourCycle { kUndefined, kH11, kH12, kH23, kH24 };
static HourCycle ToHourCycle(const std::string& str);
@@ -270,6 +271,12 @@ class Intl {
static bool IsValidCalendar(const icu::Locale& locale,
const std::string& value);
+ // Check the numberingSystem is valid.
+ static bool IsValidNumberingSystem(const std::string& value);
+
+ // Check the calendar is well formed.
+ static bool IsWellFormedCalendar(const std::string& value);
+
struct ResolvedLocale {
std::string locale;
icu::Locale icu_locale;
@@ -336,6 +343,8 @@ class Intl {
static const std::set<std::string>& GetAvailableLocalesForLocale();
static const std::set<std::string>& GetAvailableLocalesForDateFormat();
+
+ static bool IsStructurallyValidLanguageTag(const std::string& tag);
};
} // namespace internal
diff --git a/deps/v8/src/objects/js-array-buffer-inl.h b/deps/v8/src/objects/js-array-buffer-inl.h
index 9151be6da4..4ed347baa8 100644
--- a/deps/v8/src/objects/js-array-buffer-inl.h
+++ b/deps/v8/src/objects/js-array-buffer-inl.h
@@ -48,14 +48,6 @@ size_t JSArrayBuffer::allocation_length() const {
if (backing_store() == nullptr) {
return 0;
}
- // If this buffer is managed by the WasmMemoryTracker
- if (is_wasm_memory()) {
- const auto* data =
- GetIsolate()->wasm_engine()->memory_tracker()->FindAllocationData(
- backing_store());
- DCHECK_NOT_NULL(data);
- return data->allocation_length;
- }
return byte_length();
}
@@ -63,25 +55,9 @@ void* JSArrayBuffer::allocation_base() const {
if (backing_store() == nullptr) {
return nullptr;
}
- // If this buffer is managed by the WasmMemoryTracker
- if (is_wasm_memory()) {
- const auto* data =
- GetIsolate()->wasm_engine()->memory_tracker()->FindAllocationData(
- backing_store());
- DCHECK_NOT_NULL(data);
- return data->allocation_base;
- }
return backing_store();
}
-bool JSArrayBuffer::is_wasm_memory() const {
- return IsWasmMemoryBit::decode(bit_field());
-}
-
-void JSArrayBuffer::set_is_wasm_memory(bool is_wasm_memory) {
- set_bit_field(IsWasmMemoryBit::update(bit_field(), is_wasm_memory));
-}
-
void JSArrayBuffer::clear_padding() {
if (FIELD_SIZE(kOptionalPaddingOffset) != 0) {
DCHECK_EQ(4, FIELD_SIZE(kOptionalPaddingOffset));
@@ -105,6 +81,8 @@ BIT_FIELD_ACCESSORS(JSArrayBuffer, bit_field, is_detachable,
JSArrayBuffer::IsDetachableBit)
BIT_FIELD_ACCESSORS(JSArrayBuffer, bit_field, was_detached,
JSArrayBuffer::WasDetachedBit)
+BIT_FIELD_ACCESSORS(JSArrayBuffer, bit_field, is_asmjs_memory,
+ JSArrayBuffer::IsAsmJsMemoryBit)
BIT_FIELD_ACCESSORS(JSArrayBuffer, bit_field, is_shared,
JSArrayBuffer::IsSharedBit)
@@ -136,31 +114,61 @@ void JSTypedArray::set_length(size_t value) {
WriteField<size_t>(kLengthOffset, value);
}
-void* JSTypedArray::external_pointer() const {
- return reinterpret_cast<void*>(ReadField<Address>(kExternalPointerOffset));
+Address JSTypedArray::external_pointer() const {
+ return ReadField<Address>(kExternalPointerOffset);
+}
+
+void JSTypedArray::set_external_pointer(Address value) {
+ WriteField<Address>(kExternalPointerOffset, value);
}
-void JSTypedArray::set_external_pointer(void* value) {
- WriteField<Address>(kExternalPointerOffset, reinterpret_cast<Address>(value));
+Address JSTypedArray::ExternalPointerCompensationForOnHeapArray(
+ Isolate* isolate) {
+#ifdef V8_COMPRESS_POINTERS
+ return GetIsolateRoot(isolate);
+#else
+ return 0;
+#endif
+}
+
+void JSTypedArray::RemoveExternalPointerCompensationForSerialization() {
+ DCHECK(is_on_heap());
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ set_external_pointer(external_pointer() -
+ ExternalPointerCompensationForOnHeapArray(isolate));
}
ACCESSORS(JSTypedArray, base_pointer, Object, kBasePointerOffset)
void* JSTypedArray::DataPtr() {
- return reinterpret_cast<void*>(
- base_pointer().ptr() + reinterpret_cast<intptr_t>(external_pointer()));
+ // Zero-extend Tagged_t to Address according to current compression scheme
+ // so that the addition with |external_pointer| (which already contains
+ // compensated offset value) will decompress the tagged value.
+ // See JSTypedArray::ExternalPointerCompensationForOnHeapArray() for details.
+ return reinterpret_cast<void*>(external_pointer() +
+ static_cast<Tagged_t>(base_pointer().ptr()));
+}
+
+void JSTypedArray::SetOffHeapDataPtr(void* base, Address offset) {
+ set_base_pointer(Smi::kZero, SKIP_WRITE_BARRIER);
+ Address address = reinterpret_cast<Address>(base) + offset;
+ set_external_pointer(address);
+ DCHECK_EQ(address, reinterpret_cast<Address>(DataPtr()));
+}
+
+void JSTypedArray::SetOnHeapDataPtr(HeapObject base, Address offset) {
+ set_base_pointer(base);
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ set_external_pointer(offset +
+ ExternalPointerCompensationForOnHeapArray(isolate));
+ DCHECK_EQ(base.ptr() + offset, reinterpret_cast<Address>(DataPtr()));
}
bool JSTypedArray::is_on_heap() const {
DisallowHeapAllocation no_gc;
// Checking that buffer()->backing_store() is not nullptr is not sufficient;
// it will be nullptr when byte_length is 0 as well.
- return base_pointer().ptr() == elements().ptr();
-}
-
-// static
-void* JSTypedArray::ExternalPointerForOnHeapArray() {
- return reinterpret_cast<void*>(ByteArray::kHeaderSize - kHeapObjectTag);
+ return base_pointer() == elements();
}
// static
diff --git a/deps/v8/src/objects/js-array-buffer.cc b/deps/v8/src/objects/js-array-buffer.cc
index a506920f95..d3f5a0a952 100644
--- a/deps/v8/src/objects/js-array-buffer.cc
+++ b/deps/v8/src/objects/js-array-buffer.cc
@@ -5,6 +5,7 @@
#include "src/objects/js-array-buffer.h"
#include "src/objects/js-array-buffer-inl.h"
+#include "src/execution/protectors-inl.h"
#include "src/logging/counters.h"
#include "src/objects/property-descriptor.h"
@@ -31,167 +32,105 @@ bool CanonicalNumericIndexString(Isolate* isolate, Handle<Object> s,
*index = result;
return true;
}
-
-inline int ConvertToMb(size_t size) {
- return static_cast<int>(size / static_cast<size_t>(MB));
-}
-
} // anonymous namespace
-void JSArrayBuffer::Detach() {
- CHECK(is_detachable());
- CHECK(!was_detached());
- CHECK(is_external());
- set_backing_store(nullptr);
- set_byte_length(0);
- set_was_detached(true);
- set_is_detachable(false);
- // Invalidate the detaching protector.
- Isolate* const isolate = GetIsolate();
- if (isolate->IsArrayBufferDetachingIntact()) {
- isolate->InvalidateArrayBufferDetachingProtector();
+void JSArrayBuffer::Setup(SharedFlag shared,
+ std::shared_ptr<BackingStore> backing_store) {
+ clear_padding();
+ set_bit_field(0);
+ set_is_shared(shared == SharedFlag::kShared);
+ set_is_detachable(shared != SharedFlag::kShared);
+ for (int i = 0; i < v8::ArrayBuffer::kEmbedderFieldCount; i++) {
+ SetEmbedderField(i, Smi::kZero);
+ }
+ if (!backing_store) {
+ set_backing_store(nullptr);
+ set_byte_length(0);
+ } else {
+ Attach(std::move(backing_store));
}
}
-void JSArrayBuffer::FreeBackingStoreFromMainThread() {
- if (allocation_base() == nullptr) {
- return;
- }
- FreeBackingStore(GetIsolate(), {allocation_base(), allocation_length(),
- backing_store(), is_wasm_memory()});
- // Zero out the backing store and allocation base to avoid dangling
- // pointers.
- set_backing_store(nullptr);
+void JSArrayBuffer::Attach(std::shared_ptr<BackingStore> backing_store) {
+ DCHECK_NOT_NULL(backing_store);
+ DCHECK_EQ(is_shared(), backing_store->is_shared());
+ set_backing_store(backing_store->buffer_start());
+ set_byte_length(backing_store->byte_length());
+ if (backing_store->is_wasm_memory()) set_is_detachable(false);
+ if (!backing_store->free_on_destruct()) set_is_external(true);
+ GetIsolate()->heap()->RegisterBackingStore(*this, std::move(backing_store));
}
-// static
-void JSArrayBuffer::FreeBackingStore(Isolate* isolate, Allocation allocation) {
- if (allocation.is_wasm_memory) {
- wasm::WasmMemoryTracker* memory_tracker =
- isolate->wasm_engine()->memory_tracker();
- memory_tracker->FreeWasmMemory(isolate, allocation.backing_store);
- } else {
- isolate->array_buffer_allocator()->Free(allocation.allocation_base,
- allocation.length);
+void JSArrayBuffer::Detach(bool force_for_wasm_memory) {
+ if (was_detached()) return;
+
+ if (force_for_wasm_memory) {
+ // Skip the is_detachable() check.
+ } else if (!is_detachable()) {
+ // Not detachable, do nothing.
+ return;
}
-}
-void JSArrayBuffer::Setup(Handle<JSArrayBuffer> array_buffer, Isolate* isolate,
- bool is_external, void* data, size_t byte_length,
- SharedFlag shared_flag, bool is_wasm_memory) {
- DCHECK_EQ(array_buffer->GetEmbedderFieldCount(),
- v8::ArrayBuffer::kEmbedderFieldCount);
- DCHECK_LE(byte_length, JSArrayBuffer::kMaxByteLength);
- for (int i = 0; i < v8::ArrayBuffer::kEmbedderFieldCount; i++) {
- array_buffer->SetEmbedderField(i, Smi::kZero);
+ Isolate* const isolate = GetIsolate();
+ if (backing_store()) {
+ auto backing_store = isolate->heap()->UnregisterBackingStore(*this);
+ CHECK_IMPLIES(force_for_wasm_memory, backing_store->is_wasm_memory());
}
- array_buffer->set_byte_length(byte_length);
- array_buffer->set_bit_field(0);
- array_buffer->clear_padding();
- array_buffer->set_is_external(is_external);
- array_buffer->set_is_detachable(shared_flag == SharedFlag::kNotShared);
- array_buffer->set_is_shared(shared_flag == SharedFlag::kShared);
- array_buffer->set_is_wasm_memory(is_wasm_memory);
- // Initialize backing store at last to avoid handling of |JSArrayBuffers| that
- // are currently being constructed in the |ArrayBufferTracker|. The
- // registration method below handles the case of registering a buffer that has
- // already been promoted.
- array_buffer->set_backing_store(data);
- if (data && !is_external) {
- isolate->heap()->RegisterNewArrayBuffer(*array_buffer);
+ if (Protectors::IsArrayBufferDetachingIntact(isolate)) {
+ Protectors::InvalidateArrayBufferDetaching(isolate);
}
-}
-void JSArrayBuffer::SetupAsEmpty(Handle<JSArrayBuffer> array_buffer,
- Isolate* isolate) {
- Setup(array_buffer, isolate, false, nullptr, 0, SharedFlag::kNotShared);
+ DCHECK(!is_shared());
+ DCHECK(!is_asmjs_memory());
+ set_backing_store(nullptr);
+ set_byte_length(0);
+ set_was_detached(true);
}
-bool JSArrayBuffer::SetupAllocatingData(Handle<JSArrayBuffer> array_buffer,
- Isolate* isolate,
- size_t allocated_length,
- bool initialize,
- SharedFlag shared_flag) {
- void* data;
- CHECK_NOT_NULL(isolate->array_buffer_allocator());
- if (allocated_length != 0) {
- if (allocated_length >= MB)
- isolate->counters()->array_buffer_big_allocations()->AddSample(
- ConvertToMb(allocated_length));
- if (shared_flag == SharedFlag::kShared)
- isolate->counters()->shared_array_allocations()->AddSample(
- ConvertToMb(allocated_length));
- if (initialize) {
- data = isolate->array_buffer_allocator()->Allocate(allocated_length);
- } else {
- data = isolate->array_buffer_allocator()->AllocateUninitialized(
- allocated_length);
- }
- if (data == nullptr) {
- isolate->counters()->array_buffer_new_size_failures()->AddSample(
- ConvertToMb(allocated_length));
- SetupAsEmpty(array_buffer, isolate);
- return false;
- }
- } else {
- data = nullptr;
- }
-
- const bool is_external = false;
- JSArrayBuffer::Setup(array_buffer, isolate, is_external, data,
- allocated_length, shared_flag);
- return true;
+std::shared_ptr<BackingStore> JSArrayBuffer::GetBackingStore() {
+ return GetIsolate()->heap()->LookupBackingStore(*this);
}
-Handle<JSArrayBuffer> JSTypedArray::MaterializeArrayBuffer(
- Handle<JSTypedArray> typed_array) {
- DCHECK(typed_array->is_on_heap());
+Handle<JSArrayBuffer> JSTypedArray::GetBuffer() {
+ Isolate* isolate = GetIsolate();
+ Handle<JSTypedArray> self(*this, isolate);
+ DCHECK(IsTypedArrayElementsKind(self->GetElementsKind()));
+
+ Handle<JSArrayBuffer> array_buffer(JSArrayBuffer::cast(self->buffer()),
+ isolate);
+ if (!is_on_heap()) {
+ // Already is off heap, so return the existing buffer.
+ return array_buffer;
+ }
- Isolate* isolate = typed_array->GetIsolate();
+ // The existing array buffer should be empty.
+ DCHECK_NULL(array_buffer->backing_store());
- DCHECK(IsTypedArrayElementsKind(typed_array->GetElementsKind()));
+ // Allocate a new backing store and attach it to the existing array buffer.
+ size_t byte_length = self->byte_length();
+ auto backing_store =
+ BackingStore::Allocate(isolate, byte_length, SharedFlag::kNotShared,
+ InitializedFlag::kUninitialized);
- Handle<JSArrayBuffer> buffer(JSArrayBuffer::cast(typed_array->buffer()),
- isolate);
- // This code does not know how to materialize from wasm buffers.
- DCHECK(!buffer->is_wasm_memory());
+ if (!backing_store) {
+ isolate->heap()->FatalProcessOutOfMemory("JSTypedArray::GetBuffer");
+ }
- void* backing_store =
- isolate->array_buffer_allocator()->AllocateUninitialized(
- typed_array->byte_length());
- if (backing_store == nullptr) {
- isolate->heap()->FatalProcessOutOfMemory(
- "JSTypedArray::MaterializeArrayBuffer");
+ // Copy the elements into the backing store of the array buffer.
+ if (byte_length > 0) {
+ memcpy(backing_store->buffer_start(), self->DataPtr(), byte_length);
}
- buffer->set_is_external(false);
- DCHECK_EQ(buffer->byte_length(), typed_array->byte_length());
- // Initialize backing store at last to avoid handling of |JSArrayBuffers| that
- // are currently being constructed in the |ArrayBufferTracker|. The
- // registration method below handles the case of registering a buffer that has
- // already been promoted.
- buffer->set_backing_store(backing_store);
- // RegisterNewArrayBuffer expects a valid length for adjusting counters.
- isolate->heap()->RegisterNewArrayBuffer(*buffer);
- memcpy(buffer->backing_store(), typed_array->DataPtr(),
- typed_array->byte_length());
- typed_array->set_elements(ReadOnlyRoots(isolate).empty_byte_array());
- typed_array->set_external_pointer(backing_store);
- typed_array->set_base_pointer(Smi::kZero);
- DCHECK(!typed_array->is_on_heap());
+ // Attach the backing store to the array buffer.
+ array_buffer->Setup(SharedFlag::kNotShared, std::move(backing_store));
- return buffer;
-}
+ // Clear the elements of the typed array.
+ self->set_elements(ReadOnlyRoots(isolate).empty_byte_array());
+ self->SetOffHeapDataPtr(array_buffer->backing_store(), 0);
+ DCHECK(!self->is_on_heap());
-Handle<JSArrayBuffer> JSTypedArray::GetBuffer() {
- if (!is_on_heap()) {
- Handle<JSArrayBuffer> array_buffer(JSArrayBuffer::cast(buffer()),
- GetIsolate());
- return array_buffer;
- }
- Handle<JSTypedArray> self(*this, GetIsolate());
- return MaterializeArrayBuffer(self);
+ return array_buffer;
}
// ES#sec-integer-indexed-exotic-objects-defineownproperty-p-desc
diff --git a/deps/v8/src/objects/js-array-buffer.h b/deps/v8/src/objects/js-array-buffer.h
index 7bf2e1ae94..71adb42ae8 100644
--- a/deps/v8/src/objects/js-array-buffer.h
+++ b/deps/v8/src/objects/js-array-buffer.h
@@ -5,6 +5,7 @@
#ifndef V8_OBJECTS_JS_ARRAY_BUFFER_H_
#define V8_OBJECTS_JS_ARRAY_BUFFER_H_
+#include "src/objects/backing-store.h"
#include "src/objects/js-objects.h"
// Has to be the last include (doesn't have include guards):
@@ -13,9 +14,6 @@
namespace v8 {
namespace internal {
-// Whether a JSArrayBuffer is a SharedArrayBuffer or not.
-enum class SharedFlag : uint32_t { kNotShared, kShared };
-
class JSArrayBuffer : public JSObject {
public:
// The maximum length for JSArrayBuffer's supported by V8.
@@ -51,8 +49,8 @@ class JSArrayBuffer : public JSObject {
V(IsExternalBit, bool, 1, _) \
V(IsDetachableBit, bool, 1, _) \
V(WasDetachedBit, bool, 1, _) \
- V(IsSharedBit, bool, 1, _) \
- V(IsWasmMemoryBit, bool, 1, _)
+ V(IsAsmJsMemoryBit, bool, 1, _) \
+ V(IsSharedBit, bool, 1, _)
DEFINE_BIT_FIELDS(JS_ARRAY_BUFFER_BIT_FIELD_FIELDS)
#undef JS_ARRAY_BUFFER_BIT_FIELD_FIELDS
@@ -61,57 +59,45 @@ class JSArrayBuffer : public JSObject {
// memory block once all ArrayBuffers referencing it are collected by the GC.
DECL_BOOLEAN_ACCESSORS(is_external)
- // [is_detachable]: false indicates that this buffer cannot be detached.
+ // [is_detachable]: false => this buffer cannot be detached.
DECL_BOOLEAN_ACCESSORS(is_detachable)
- // [was_detached]: true if the buffer was previously detached.
+ // [was_detached]: true => the buffer was previously detached.
DECL_BOOLEAN_ACCESSORS(was_detached)
+ // [is_asmjs_memory]: true => this buffer was once used as asm.js memory.
+ DECL_BOOLEAN_ACCESSORS(is_asmjs_memory)
+
// [is_shared]: tells whether this is an ArrayBuffer or a SharedArrayBuffer.
DECL_BOOLEAN_ACCESSORS(is_shared)
- // [is_wasm_memory]: whether the buffer is tracked by the WasmMemoryTracker.
- DECL_BOOLEAN_ACCESSORS(is_wasm_memory)
-
DECL_CAST(JSArrayBuffer)
- void Detach();
-
- struct Allocation {
- Allocation(void* allocation_base, size_t length, void* backing_store,
- bool is_wasm_memory)
- : allocation_base(allocation_base),
- length(length),
- backing_store(backing_store),
- is_wasm_memory(is_wasm_memory) {}
-
- void* allocation_base;
- size_t length;
- void* backing_store;
- bool is_wasm_memory;
- };
-
- V8_EXPORT_PRIVATE void FreeBackingStoreFromMainThread();
- V8_EXPORT_PRIVATE static void FreeBackingStore(Isolate* isolate,
- Allocation allocation);
-
- V8_EXPORT_PRIVATE static void Setup(
- Handle<JSArrayBuffer> array_buffer, Isolate* isolate, bool is_external,
- void* data, size_t allocated_length,
- SharedFlag shared_flag = SharedFlag::kNotShared,
- bool is_wasm_memory = false);
-
- // Initialize the object as empty one to avoid confusing heap verifier if
- // the failure happened in the middle of JSArrayBuffer construction.
- V8_EXPORT_PRIVATE static void SetupAsEmpty(Handle<JSArrayBuffer> array_buffer,
- Isolate* isolate);
-
- // Returns false if array buffer contents could not be allocated.
- // In this case, |array_buffer| will not be set up.
- V8_EXPORT_PRIVATE static bool SetupAllocatingData(
- Handle<JSArrayBuffer> array_buffer, Isolate* isolate,
- size_t allocated_length, bool initialize = true,
- SharedFlag shared_flag = SharedFlag::kNotShared) V8_WARN_UNUSED_RESULT;
+ // Initializes the fields of the ArrayBuffer. The provided backing_store can
+ // be nullptr. If it is not nullptr, then the function registers it with
+ // src/heap/array-buffer-tracker.h.
+ V8_EXPORT_PRIVATE void Setup(SharedFlag shared,
+ std::shared_ptr<BackingStore> backing_store);
+
+ // Attaches the backing store to an already constructed empty ArrayBuffer.
+ // This is intended to be used only in ArrayBufferConstructor builtin.
+ V8_EXPORT_PRIVATE void Attach(std::shared_ptr<BackingStore> backing_store);
+ // Detach the backing store from this array buffer if it is detachable.
+ // This sets the internal pointer and length to 0 and unregisters the backing
+ // store from the array buffer tracker. If the array buffer is not detachable,
+ // this is a nop.
+ //
+ // Array buffers that wrap wasm memory objects are special in that they
+ // are normally not detachable, but can become detached as a side effect
+ // of growing the underlying memory object. The {force_for_wasm_memory} flag
+ // is used by the implementation of Wasm memory growth in order to bypass the
+ // non-detachable check.
+ V8_EXPORT_PRIVATE void Detach(bool force_for_wasm_memory = false);
+
+ // Get a reference to backing store of this array buffer, if there is a
+ // backing store. Returns nullptr if there is no backing store (e.g. detached
+ // or a zero-length array buffer).
+ std::shared_ptr<BackingStore> GetBackingStore();
// Dispatched behavior.
DECL_PRINTER(JSArrayBuffer)
@@ -187,12 +173,6 @@ class JSTypedArray : public JSArrayBufferView {
// [length]: length of typed array in elements.
DECL_PRIMITIVE_ACCESSORS(length, size_t)
- // [external_pointer]: TODO(v8:4153)
- DECL_PRIMITIVE_ACCESSORS(external_pointer, void*)
-
- // [base_pointer]: TODO(v8:4153)
- DECL_ACCESSORS(base_pointer, Object)
-
// ES6 9.4.5.3
V8_WARN_UNUSED_RESULT static Maybe<bool> DefineOwnProperty(
Isolate* isolate, Handle<JSTypedArray> o, Handle<Object> key,
@@ -208,10 +188,26 @@ class JSTypedArray : public JSArrayBufferView {
// Use with care: returns raw pointer into heap.
inline void* DataPtr();
+ inline void SetOffHeapDataPtr(void* base, Address offset);
+ inline void SetOnHeapDataPtr(HeapObject base, Address offset);
+
// Whether the buffer's backing store is on-heap or off-heap.
inline bool is_on_heap() const;
- static inline void* ExternalPointerForOnHeapArray();
+ // Note: this is a pointer compression specific optimization.
+ // Normally, on-heap typed arrays contain HeapObject value in |base_pointer|
+ // field and an offset in |external_pointer|.
+ // When pointer compression is enabled we want to combine decompression with
+ // the offset addition. In order to do that we add an isolate root to the
+ // |external_pointer| value and therefore the data pointer computation can
+ // is a simple addition of a (potentially sign-extended) |base_pointer| loaded
+ // as Tagged_t value and an |external_pointer| value.
+ // For full-pointer mode the compensation value is zero.
+ static inline Address ExternalPointerCompensationForOnHeapArray(
+ Isolate* isolate);
+
+ // Subtracts external pointer compensation from the external pointer value.
+ inline void RemoveExternalPointerCompensationForSerialization();
static inline MaybeHandle<JSTypedArray> Validate(Isolate* isolate,
Handle<Object> receiver,
@@ -250,8 +246,13 @@ class JSTypedArray : public JSArrayBufferView {
#endif
private:
- static Handle<JSArrayBuffer> MaterializeArrayBuffer(
- Handle<JSTypedArray> typed_array);
+ friend class Deserializer;
+
+ // [base_pointer]: TODO(v8:4153)
+ DECL_ACCESSORS(base_pointer, Object)
+
+ // [external_pointer]: TODO(v8:4153)
+ DECL_PRIMITIVE_ACCESSORS(external_pointer, Address)
OBJECT_CONSTRUCTORS(JSTypedArray, JSArrayBufferView);
};
diff --git a/deps/v8/src/objects/js-array.h b/deps/v8/src/objects/js-array.h
index eb581c104e..c990151b27 100644
--- a/deps/v8/src/objects/js-array.h
+++ b/deps/v8/src/objects/js-array.h
@@ -108,7 +108,7 @@ class JSArray : public JSObject {
static const int kPreallocatedArrayElements = 4;
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_JSARRAY_FIELDS)
+ TORQUE_GENERATED_JS_ARRAY_FIELDS)
static const int kLengthDescriptorIndex = 0;
@@ -178,7 +178,7 @@ class JSArrayIterator : public JSObject {
inline void set_kind(IterationKind kind);
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_JSARRAY_ITERATOR_FIELDS)
+ TORQUE_GENERATED_JS_ARRAY_ITERATOR_FIELDS)
private:
DECL_INT_ACCESSORS(raw_kind)
diff --git a/deps/v8/src/objects/js-break-iterator.cc b/deps/v8/src/objects/js-break-iterator.cc
index 31ed3f8611..1a9d096411 100644
--- a/deps/v8/src/objects/js-break-iterator.cc
+++ b/deps/v8/src/objects/js-break-iterator.cc
@@ -17,7 +17,7 @@ namespace internal {
MaybeHandle<JSV8BreakIterator> JSV8BreakIterator::New(
Isolate* isolate, Handle<Map> map, Handle<Object> locales,
- Handle<Object> options_obj) {
+ Handle<Object> options_obj, const char* service) {
Factory* factory = isolate->factory();
// 1. Let requestedLocales be ? CanonicalizeLocaleList(locales).
@@ -31,15 +31,14 @@ MaybeHandle<JSV8BreakIterator> JSV8BreakIterator::New(
if (options_obj->IsUndefined(isolate)) {
options = factory->NewJSObjectWithNullProto();
} else {
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, options,
- Object::ToObject(isolate, options_obj, "Intl.JSV8BreakIterator"),
- JSV8BreakIterator);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, options,
+ Object::ToObject(isolate, options_obj, service),
+ JSV8BreakIterator);
}
// Extract locale string
Maybe<Intl::MatcherOption> maybe_locale_matcher =
- Intl::GetLocaleMatcher(isolate, options, "Intl.JSV8BreakIterator");
+ Intl::GetLocaleMatcher(isolate, options, service);
MAYBE_RETURN(maybe_locale_matcher, MaybeHandle<JSV8BreakIterator>());
Intl::MatcherOption matcher = maybe_locale_matcher.FromJust();
@@ -49,7 +48,7 @@ MaybeHandle<JSV8BreakIterator> JSV8BreakIterator::New(
// Extract type from options
Maybe<Type> maybe_type = Intl::GetStringOption<Type>(
- isolate, options, "type", "Intl.v8BreakIterator",
+ isolate, options, "type", service,
{"word", "character", "sentence", "line"},
{Type::WORD, Type::CHARACTER, Type::SENTENCE, Type::LINE}, Type::WORD);
MAYBE_RETURN(maybe_type, MaybeHandle<JSV8BreakIterator>());
diff --git a/deps/v8/src/objects/js-break-iterator.h b/deps/v8/src/objects/js-break-iterator.h
index 4b40192c81..ea66fe6732 100644
--- a/deps/v8/src/objects/js-break-iterator.h
+++ b/deps/v8/src/objects/js-break-iterator.h
@@ -31,7 +31,7 @@ class JSV8BreakIterator : public JSObject {
public:
V8_WARN_UNUSED_RESULT static MaybeHandle<JSV8BreakIterator> New(
Isolate* isolate, Handle<Map> map, Handle<Object> input_locales,
- Handle<Object> input_options);
+ Handle<Object> input_options, const char* service);
static Handle<JSObject> ResolvedOptions(
Isolate* isolate, Handle<JSV8BreakIterator> break_iterator);
@@ -72,7 +72,7 @@ class JSV8BreakIterator : public JSObject {
// Layout description.
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_JSV8BREAK_ITERATOR_FIELDS)
+ TORQUE_GENERATED_JS_V8_BREAK_ITERATOR_FIELDS)
private:
DECL_INT_ACCESSORS(raw_type)
diff --git a/deps/v8/src/objects/js-collator.cc b/deps/v8/src/objects/js-collator.cc
index 0413e2acd1..39178b3acf 100644
--- a/deps/v8/src/objects/js-collator.cc
+++ b/deps/v8/src/objects/js-collator.cc
@@ -243,7 +243,8 @@ void SetCaseFirstOption(icu::Collator* icu_collator,
// static
MaybeHandle<JSCollator> JSCollator::New(Isolate* isolate, Handle<Map> map,
Handle<Object> locales,
- Handle<Object> options_obj) {
+ Handle<Object> options_obj,
+ const char* service) {
// 1. Let requestedLocales be ? CanonicalizeLocaleList(locales).
Maybe<std::vector<std::string>> maybe_requested_locales =
Intl::CanonicalizeLocaleList(isolate, locales);
@@ -258,9 +259,9 @@ MaybeHandle<JSCollator> JSCollator::New(Isolate* isolate, Handle<Map> map,
} else {
// 3. Else
// 3. a. Let options be ? ToObject(options).
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, options_obj,
- Object::ToObject(isolate, options_obj, "Intl.Collator"), JSCollator);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, options_obj,
+ Object::ToObject(isolate, options_obj, service),
+ JSCollator);
}
// At this point, options_obj can either be a JSObject or a JSProxy only.
@@ -269,7 +270,7 @@ MaybeHandle<JSCollator> JSCollator::New(Isolate* isolate, Handle<Map> map,
// 4. Let usage be ? GetOption(options, "usage", "string", « "sort",
// "search" », "sort").
Maybe<Usage> maybe_usage = Intl::GetStringOption<Usage>(
- isolate, options, "usage", "Intl.Collator", {"sort", "search"},
+ isolate, options, "usage", service, {"sort", "search"},
{Usage::SORT, Usage::SEARCH}, Usage::SORT);
MAYBE_RETURN(maybe_usage, MaybeHandle<JSCollator>());
Usage usage = maybe_usage.FromJust();
@@ -278,7 +279,7 @@ MaybeHandle<JSCollator> JSCollator::New(Isolate* isolate, Handle<Map> map,
// « "lookup", "best fit" », "best fit").
// 10. Set opt.[[localeMatcher]] to matcher.
Maybe<Intl::MatcherOption> maybe_locale_matcher =
- Intl::GetLocaleMatcher(isolate, options, "Intl.Collator");
+ Intl::GetLocaleMatcher(isolate, options, service);
MAYBE_RETURN(maybe_locale_matcher, MaybeHandle<JSCollator>());
Intl::MatcherOption matcher = maybe_locale_matcher.FromJust();
@@ -293,14 +294,14 @@ MaybeHandle<JSCollator> JSCollator::New(Isolate* isolate, Handle<Map> map,
//
// 13. Set opt.[[kn]] to numeric.
bool numeric;
- Maybe<bool> found_numeric = Intl::GetBoolOption(isolate, options, "numeric",
- "Intl.Collator", &numeric);
+ Maybe<bool> found_numeric =
+ Intl::GetBoolOption(isolate, options, "numeric", service, &numeric);
MAYBE_RETURN(found_numeric, MaybeHandle<JSCollator>());
// 14. Let caseFirst be ? GetOption(options, "caseFirst", "string",
// « "upper", "lower", "false" », undefined).
Maybe<Intl::CaseFirst> maybe_case_first =
- Intl::GetCaseFirst(isolate, options, "Intl.Collator");
+ Intl::GetCaseFirst(isolate, options, service);
MAYBE_RETURN(maybe_case_first, MaybeHandle<JSCollator>());
Intl::CaseFirst case_first = maybe_case_first.FromJust();
@@ -411,7 +412,7 @@ MaybeHandle<JSCollator> JSCollator::New(Isolate* isolate, Handle<Map> map,
// 24. Let sensitivity be ? GetOption(options, "sensitivity",
// "string", « "base", "accent", "case", "variant" », undefined).
Maybe<Sensitivity> maybe_sensitivity = Intl::GetStringOption<Sensitivity>(
- isolate, options, "sensitivity", "Intl.Collator",
+ isolate, options, "sensitivity", service,
{"base", "accent", "case", "variant"},
{Sensitivity::kBase, Sensitivity::kAccent, Sensitivity::kCase,
Sensitivity::kVariant},
@@ -451,9 +452,8 @@ MaybeHandle<JSCollator> JSCollator::New(Isolate* isolate, Handle<Map> map,
// 27.Let ignorePunctuation be ? GetOption(options,
// "ignorePunctuation", "boolean", undefined, false).
bool ignore_punctuation;
- Maybe<bool> found_ignore_punctuation =
- Intl::GetBoolOption(isolate, options, "ignorePunctuation",
- "Intl.Collator", &ignore_punctuation);
+ Maybe<bool> found_ignore_punctuation = Intl::GetBoolOption(
+ isolate, options, "ignorePunctuation", service, &ignore_punctuation);
MAYBE_RETURN(found_ignore_punctuation, MaybeHandle<JSCollator>());
// 28. Set collator.[[IgnorePunctuation]] to ignorePunctuation.
diff --git a/deps/v8/src/objects/js-collator.h b/deps/v8/src/objects/js-collator.h
index e9114afeb1..0147b80ebb 100644
--- a/deps/v8/src/objects/js-collator.h
+++ b/deps/v8/src/objects/js-collator.h
@@ -34,7 +34,7 @@ class JSCollator : public JSObject {
// ecma402/#sec-initializecollator
V8_WARN_UNUSED_RESULT static MaybeHandle<JSCollator> New(
Isolate* isolate, Handle<Map> map, Handle<Object> locales,
- Handle<Object> options);
+ Handle<Object> options, const char* service);
// ecma402/#sec-intl.collator.prototype.resolvedoptions
static Handle<JSObject> ResolvedOptions(Isolate* isolate,
@@ -48,7 +48,7 @@ class JSCollator : public JSObject {
// Layout description.
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_JSCOLLATOR_FIELDS)
+ TORQUE_GENERATED_JS_COLLATOR_FIELDS)
DECL_ACCESSORS(icu_collator, Managed<icu::Collator>)
DECL_ACCESSORS(bound_compare, Object)
diff --git a/deps/v8/src/objects/js-collection-iterator.h b/deps/v8/src/objects/js-collection-iterator.h
index b193aa84cd..0a40837677 100644
--- a/deps/v8/src/objects/js-collection-iterator.h
+++ b/deps/v8/src/objects/js-collection-iterator.h
@@ -22,6 +22,10 @@ class JSCollectionIterator
public:
void JSCollectionIteratorPrint(std::ostream& os, const char* name);
+ // JSCollectionIterator is abstract, but also defines the size for all of its
+ // concrete subclasses.
+ static constexpr int kSize = kHeaderSize;
+
TQ_OBJECT_CONSTRUCTORS(JSCollectionIterator)
};
diff --git a/deps/v8/src/objects/js-date-time-format.cc b/deps/v8/src/objects/js-date-time-format.cc
index 29fcfb0d7c..835f3dc43a 100644
--- a/deps/v8/src/objects/js-date-time-format.cc
+++ b/deps/v8/src/objects/js-date-time-format.cc
@@ -79,16 +79,6 @@ static std::vector<PatternItem> BuildPatternItems() {
kNarrowLongShort),
PatternItem("year", {{"yy", "2-digit"}, {"y", "numeric"}},
k2DigitNumeric)};
- if (FLAG_harmony_intl_dateformat_quarter) {
- items.push_back(PatternItem("quarter",
- {{"QQQQQ", "narrow"},
- {"QQQQ", "long"},
- {"QQQ", "short"},
- {"qqqqq", "narrow"},
- {"qqqq", "long"},
- {"qqq", "short"}},
- kNarrowLongShort));
- }
// Sometimes we get L instead of M for month - standalone name.
items.push_back(PatternItem("month",
{{"MMMMM", "narrow"},
@@ -641,7 +631,8 @@ Isolate::ICUObjectCacheType ConvertToCacheType(
MaybeHandle<String> JSDateTimeFormat::ToLocaleDateTime(
Isolate* isolate, Handle<Object> date, Handle<Object> locales,
- Handle<Object> options, RequiredOption required, DefaultsOption defaults) {
+ Handle<Object> options, RequiredOption required, DefaultsOption defaults,
+ const char* method) {
Isolate::ICUObjectCacheType cache_type = ConvertToCacheType(defaults);
Factory* factory = isolate->factory();
@@ -691,7 +682,8 @@ MaybeHandle<String> JSDateTimeFormat::ToLocaleDateTime(
Handle<JSDateTimeFormat> date_time_format;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, date_time_format,
- JSDateTimeFormat::New(isolate, map, locales, internal_options), String);
+ JSDateTimeFormat::New(isolate, map, locales, internal_options, method),
+ String);
if (can_cache) {
isolate->set_icu_object_in_cache(
@@ -775,13 +767,10 @@ MaybeHandle<JSObject> JSDateTimeFormat::ToDateTimeOptions(
// 4. If required is "date" or "any", then
if (required == RequiredOption::kAny || required == RequiredOption::kDate) {
- // a. For each of the property names "weekday", "year", "quarter", "month",
+ // a. For each of the property names "weekday", "year", "month",
// "day", do
std::vector<Handle<String>> list(
{factory->weekday_string(), factory->year_string()});
- if (FLAG_harmony_intl_dateformat_quarter) {
- list.push_back(factory->quarter_string());
- }
list.push_back(factory->month_string());
list.push_back(factory->day_string());
Maybe<bool> maybe_needs_default = NeedsDefault(isolate, options, list);
@@ -941,7 +930,7 @@ icu::Calendar* CreateCalendar(Isolate* isolate, const icu::Locale& icu_locale,
std::unique_ptr<icu::SimpleDateFormat> CreateICUDateFormat(
const icu::Locale& icu_locale, const icu::UnicodeString& skeleton,
- icu::DateTimePatternGenerator& generator) { // NOLINT(runtime/references)
+ icu::DateTimePatternGenerator* generator) {
// See https://github.com/tc39/ecma402/issues/225 . The best pattern
// generation needs to be done in the base locale according to the
// current spec however odd it may be. See also crbug.com/826549 .
@@ -954,8 +943,8 @@ std::unique_ptr<icu::SimpleDateFormat> CreateICUDateFormat(
// has to be discussed. Revisit once the spec is clarified/revised.
icu::UnicodeString pattern;
UErrorCode status = U_ZERO_ERROR;
- pattern = generator.getBestPattern(skeleton, UDATPG_MATCH_HOUR_FIELD_LENGTH,
- status);
+ pattern = generator->getBestPattern(skeleton, UDATPG_MATCH_HOUR_FIELD_LENGTH,
+ status);
CHECK(U_SUCCESS(status));
// Make formatter from skeleton. Calendar and numbering system are added
@@ -971,9 +960,9 @@ std::unique_ptr<icu::SimpleDateFormat> CreateICUDateFormat(
class DateFormatCache {
public:
- icu::SimpleDateFormat* Create(
- const icu::Locale& icu_locale, const icu::UnicodeString& skeleton,
- icu::DateTimePatternGenerator& generator) { // NOLINT(runtime/references)
+ icu::SimpleDateFormat* Create(const icu::Locale& icu_locale,
+ const icu::UnicodeString& skeleton,
+ icu::DateTimePatternGenerator* generator) {
std::string key;
skeleton.toUTF8String<std::string>(key);
key += ":";
@@ -1002,7 +991,7 @@ class DateFormatCache {
std::unique_ptr<icu::SimpleDateFormat> CreateICUDateFormatFromCache(
const icu::Locale& icu_locale, const icu::UnicodeString& skeleton,
- icu::DateTimePatternGenerator& generator) { // NOLINT(runtime/references)
+ icu::DateTimePatternGenerator* generator) {
static base::LazyInstance<DateFormatCache>::type cache =
LAZY_INSTANCE_INITIALIZER;
return std::unique_ptr<icu::SimpleDateFormat>(
@@ -1138,8 +1127,7 @@ icu::UnicodeString ReplaceSkeleton(const icu::UnicodeString input,
std::unique_ptr<icu::SimpleDateFormat> DateTimeStylePattern(
JSDateTimeFormat::DateTimeStyle date_style,
JSDateTimeFormat::DateTimeStyle time_style, const icu::Locale& icu_locale,
- Intl::HourCycle hc,
- icu::DateTimePatternGenerator& generator) { // NOLINT(runtime/references)
+ Intl::HourCycle hc, icu::DateTimePatternGenerator* generator) {
std::unique_ptr<icu::SimpleDateFormat> result;
if (date_style != JSDateTimeFormat::DateTimeStyle::kUndefined) {
if (time_style != JSDateTimeFormat::DateTimeStyle::kUndefined) {
@@ -1164,10 +1152,40 @@ std::unique_ptr<icu::SimpleDateFormat> DateTimeStylePattern(
UNREACHABLE();
}
}
+
+ UErrorCode status = U_ZERO_ERROR;
+ // Somehow we fail to create the instance.
+ if (result.get() == nullptr) {
+ icu::Locale modified_locale(icu_locale);
+ // Fallback to the locale without "nu".
+ if (!icu_locale.getUnicodeKeywordValue<std::string>("nu", status).empty()) {
+ status = U_ZERO_ERROR;
+ modified_locale.setUnicodeKeywordValue("nu", nullptr, status);
+ return DateTimeStylePattern(date_style, time_style, modified_locale, hc,
+ generator);
+ }
+ status = U_ZERO_ERROR;
+ // Fallback to the locale without "hc".
+ if (!icu_locale.getUnicodeKeywordValue<std::string>("hc", status).empty()) {
+ status = U_ZERO_ERROR;
+ modified_locale.setUnicodeKeywordValue("hc", nullptr, status);
+ return DateTimeStylePattern(date_style, time_style, modified_locale, hc,
+ generator);
+ }
+ status = U_ZERO_ERROR;
+ // Fallback to the locale without "ca".
+ if (!icu_locale.getUnicodeKeywordValue<std::string>("ca", status).empty()) {
+ status = U_ZERO_ERROR;
+ modified_locale.setUnicodeKeywordValue("ca", nullptr, status);
+ return DateTimeStylePattern(date_style, time_style, modified_locale, hc,
+ generator);
+ }
+ return nullptr;
+ }
icu::UnicodeString pattern;
pattern = result->toPattern(pattern);
- UErrorCode status = U_ZERO_ERROR;
+ status = U_ZERO_ERROR;
icu::UnicodeString skeleton =
icu::DateTimePatternGenerator::staticGetSkeleton(pattern, status);
CHECK(U_SUCCESS(status));
@@ -1185,7 +1203,8 @@ class DateTimePatternGeneratorCache {
public:
// Return a clone copy that the caller have to free.
icu::DateTimePatternGenerator* CreateGenerator(const icu::Locale& locale) {
- std::string key(locale.getBaseName());
+ std::string key(FLAG_harmony_intl_other_calendars ? locale.getName()
+ : locale.getBaseName());
base::MutexGuard guard(&mutex_);
auto it = map_.find(key);
if (it != map_.end()) {
@@ -1193,7 +1212,8 @@ class DateTimePatternGeneratorCache {
}
UErrorCode status = U_ZERO_ERROR;
map_[key].reset(icu::DateTimePatternGenerator::createInstance(
- icu::Locale(key.c_str()), status));
+ FLAG_harmony_intl_other_calendars ? locale : icu::Locale(key.c_str()),
+ status));
// Fallback to use "root".
if (U_FAILURE(status)) {
status = U_ZERO_ERROR;
@@ -1216,7 +1236,7 @@ enum FormatMatcherOption { kBestFit, kBasic };
// ecma402/#sec-initializedatetimeformat
MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
Isolate* isolate, Handle<Map> map, Handle<Object> locales,
- Handle<Object> input_options) {
+ Handle<Object> input_options, const char* service) {
Factory* factory = isolate->factory();
// 1. Let requestedLocales be ? CanonicalizeLocaleList(locales).
Maybe<std::vector<std::string>> maybe_requested_locales =
@@ -1235,6 +1255,10 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
// 4. Let matcher be ? GetOption(options, "localeMatcher", "string",
// « "lookup", "best fit" », "best fit").
// 5. Set opt.[[localeMatcher]] to matcher.
+ Maybe<Intl::MatcherOption> maybe_locale_matcher =
+ Intl::GetLocaleMatcher(isolate, options, service);
+ MAYBE_RETURN(maybe_locale_matcher, MaybeHandle<JSDateTimeFormat>());
+ Intl::MatcherOption locale_matcher = maybe_locale_matcher.FromJust();
std::unique_ptr<char[]> calendar_str = nullptr;
std::unique_ptr<char[]> numbering_system_str = nullptr;
@@ -1242,13 +1266,12 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
const std::vector<const char*> empty_values = {};
// 6. Let calendar be ? GetOption(options, "calendar",
// "string", undefined, undefined).
- Maybe<bool> maybe_calendar =
- Intl::GetStringOption(isolate, options, "calendar", empty_values,
- "Intl.NumberFormat", &calendar_str);
+ Maybe<bool> maybe_calendar = Intl::GetStringOption(
+ isolate, options, "calendar", empty_values, service, &calendar_str);
MAYBE_RETURN(maybe_calendar, MaybeHandle<JSDateTimeFormat>());
if (maybe_calendar.FromJust() && calendar_str != nullptr) {
icu::Locale default_locale;
- if (!Intl::IsValidCalendar(default_locale, calendar_str.get())) {
+ if (!Intl::IsWellFormedCalendar(calendar_str.get())) {
THROW_NEW_ERROR(
isolate,
NewRangeError(
@@ -1261,26 +1284,21 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
// 8. Let numberingSystem be ? GetOption(options, "numberingSystem",
// "string", undefined, undefined).
Maybe<bool> maybe_numberingSystem = Intl::GetNumberingSystem(
- isolate, options, "Intl.NumberFormat", &numbering_system_str);
+ isolate, options, service, &numbering_system_str);
MAYBE_RETURN(maybe_numberingSystem, MaybeHandle<JSDateTimeFormat>());
}
- Maybe<Intl::MatcherOption> maybe_locale_matcher =
- Intl::GetLocaleMatcher(isolate, options, "Intl.DateTimeFormat");
- MAYBE_RETURN(maybe_locale_matcher, MaybeHandle<JSDateTimeFormat>());
- Intl::MatcherOption locale_matcher = maybe_locale_matcher.FromJust();
-
// 6. Let hour12 be ? GetOption(options, "hour12", "boolean", undefined,
// undefined).
bool hour12;
- Maybe<bool> maybe_get_hour12 = Intl::GetBoolOption(
- isolate, options, "hour12", "Intl.DateTimeFormat", &hour12);
+ Maybe<bool> maybe_get_hour12 =
+ Intl::GetBoolOption(isolate, options, "hour12", service, &hour12);
MAYBE_RETURN(maybe_get_hour12, Handle<JSDateTimeFormat>());
// 7. Let hourCycle be ? GetOption(options, "hourCycle", "string", « "h11",
// "h12", "h23", "h24" », undefined).
Maybe<Intl::HourCycle> maybe_hour_cycle =
- Intl::GetHourCycle(isolate, options, "Intl.DateTimeFormat");
+ Intl::GetHourCycle(isolate, options, service);
MAYBE_RETURN(maybe_hour_cycle, MaybeHandle<JSDateTimeFormat>());
Intl::HourCycle hour_cycle = maybe_hour_cycle.FromJust();
@@ -1309,12 +1327,14 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
DCHECK(!icu_locale.isBogus());
UErrorCode status = U_ZERO_ERROR;
- if (calendar_str != nullptr) {
+ if (calendar_str != nullptr &&
+ Intl::IsValidCalendar(icu_locale, calendar_str.get())) {
icu_locale.setUnicodeKeywordValue("ca", calendar_str.get(), status);
CHECK(U_SUCCESS(status));
}
- if (numbering_system_str != nullptr) {
+ if (numbering_system_str != nullptr &&
+ Intl::IsValidNumberingSystem(numbering_system_str.get())) {
icu_locale.setUnicodeKeywordValue("nu", numbering_system_str.get(), status);
CHECK(U_SUCCESS(status));
}
@@ -1322,9 +1342,8 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
// 17. Let timeZone be ? Get(options, "timeZone").
const std::vector<const char*> empty_values;
std::unique_ptr<char[]> timezone = nullptr;
- Maybe<bool> maybe_timezone =
- Intl::GetStringOption(isolate, options, "timeZone", empty_values,
- "Intl.DateTimeFormat", &timezone);
+ Maybe<bool> maybe_timezone = Intl::GetStringOption(
+ isolate, options, "timeZone", empty_values, service, &timezone);
MAYBE_RETURN(maybe_timezone, Handle<JSDateTimeFormat>());
std::unique_ptr<icu::TimeZone> tz = CreateTimeZone(isolate, timezone.get());
@@ -1409,43 +1428,40 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
DateTimeStyle time_style = DateTimeStyle::kUndefined;
std::unique_ptr<icu::SimpleDateFormat> icu_date_format;
- if (FLAG_harmony_intl_datetime_style) {
- // 28. Let dateStyle be ? GetOption(options, "dateStyle", "string", «
- // "full", "long", "medium", "short" », undefined).
- Maybe<DateTimeStyle> maybe_date_style =
- Intl::GetStringOption<DateTimeStyle>(
- isolate, options, "dateStyle", "Intl.DateTimeFormat",
- {"full", "long", "medium", "short"},
- {DateTimeStyle::kFull, DateTimeStyle::kLong, DateTimeStyle::kMedium,
- DateTimeStyle::kShort},
- DateTimeStyle::kUndefined);
- MAYBE_RETURN(maybe_date_style, MaybeHandle<JSDateTimeFormat>());
- // 29. If dateStyle is not undefined, set dateTimeFormat.[[DateStyle]] to
- // dateStyle.
- date_style = maybe_date_style.FromJust();
-
- // 30. Let timeStyle be ? GetOption(options, "timeStyle", "string", «
- // "full", "long", "medium", "short" »).
- Maybe<DateTimeStyle> maybe_time_style =
- Intl::GetStringOption<DateTimeStyle>(
- isolate, options, "timeStyle", "Intl.DateTimeFormat",
- {"full", "long", "medium", "short"},
- {DateTimeStyle::kFull, DateTimeStyle::kLong, DateTimeStyle::kMedium,
- DateTimeStyle::kShort},
- DateTimeStyle::kUndefined);
- MAYBE_RETURN(maybe_time_style, MaybeHandle<JSDateTimeFormat>());
-
- // 31. If timeStyle is not undefined, set dateTimeFormat.[[TimeStyle]] to
- // timeStyle.
- time_style = maybe_time_style.FromJust();
-
- // 32. If dateStyle or timeStyle are not undefined, then
- if (date_style != DateTimeStyle::kUndefined ||
- time_style != DateTimeStyle::kUndefined) {
- icu_date_format = DateTimeStylePattern(date_style, time_style, icu_locale,
- hc, *generator);
- }
+ // 28. Let dateStyle be ? GetOption(options, "dateStyle", "string", «
+ // "full", "long", "medium", "short" », undefined).
+ Maybe<DateTimeStyle> maybe_date_style = Intl::GetStringOption<DateTimeStyle>(
+ isolate, options, "dateStyle", service,
+ {"full", "long", "medium", "short"},
+ {DateTimeStyle::kFull, DateTimeStyle::kLong, DateTimeStyle::kMedium,
+ DateTimeStyle::kShort},
+ DateTimeStyle::kUndefined);
+ MAYBE_RETURN(maybe_date_style, MaybeHandle<JSDateTimeFormat>());
+ // 29. If dateStyle is not undefined, set dateTimeFormat.[[DateStyle]] to
+ // dateStyle.
+ date_style = maybe_date_style.FromJust();
+
+ // 30. Let timeStyle be ? GetOption(options, "timeStyle", "string", «
+ // "full", "long", "medium", "short" »).
+ Maybe<DateTimeStyle> maybe_time_style = Intl::GetStringOption<DateTimeStyle>(
+ isolate, options, "timeStyle", service,
+ {"full", "long", "medium", "short"},
+ {DateTimeStyle::kFull, DateTimeStyle::kLong, DateTimeStyle::kMedium,
+ DateTimeStyle::kShort},
+ DateTimeStyle::kUndefined);
+ MAYBE_RETURN(maybe_time_style, MaybeHandle<JSDateTimeFormat>());
+
+ // 31. If timeStyle is not undefined, set dateTimeFormat.[[TimeStyle]] to
+ // timeStyle.
+ time_style = maybe_time_style.FromJust();
+
+ // 32. If dateStyle or timeStyle are not undefined, then
+ if (date_style != DateTimeStyle::kUndefined ||
+ time_style != DateTimeStyle::kUndefined) {
+ icu_date_format = DateTimeStylePattern(date_style, time_style, icu_locale,
+ hc, generator.get());
}
+
// 33. Else,
if (icu_date_format.get() == nullptr) {
bool has_hour_option = false;
@@ -1456,9 +1472,9 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
// i. Let prop be the name given in the Property column of the row.
// ii. Let value be ? GetOption(options, prop, "string", « the strings
// given in the Values column of the row », undefined).
- Maybe<bool> maybe_get_option = Intl::GetStringOption(
- isolate, options, item.property.c_str(), item.allowed_values,
- "Intl.DateTimeFormat", &input);
+ Maybe<bool> maybe_get_option =
+ Intl::GetStringOption(isolate, options, item.property.c_str(),
+ item.allowed_values, service, &input);
MAYBE_RETURN(maybe_get_option, Handle<JSDateTimeFormat>());
if (maybe_get_option.FromJust()) {
if (item.property == "hour") {
@@ -1487,8 +1503,7 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
// « "basic", "best fit" », "best fit").
Maybe<FormatMatcherOption> maybe_format_matcher =
Intl::GetStringOption<FormatMatcherOption>(
- isolate, options, "formatMatcher", "Intl.DateTimeFormat",
- {"best fit", "basic"},
+ isolate, options, "formatMatcher", service, {"best fit", "basic"},
{FormatMatcherOption::kBestFit, FormatMatcherOption::kBasic},
FormatMatcherOption::kBestFit);
MAYBE_RETURN(maybe_format_matcher, MaybeHandle<JSDateTimeFormat>());
@@ -1496,13 +1511,13 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
// FormatMatcherOption format_matcher = maybe_format_matcher.FromJust();
icu::UnicodeString skeleton_ustr(skeleton.c_str());
- icu_date_format =
- CreateICUDateFormatFromCache(icu_locale, skeleton_ustr, *generator);
+ icu_date_format = CreateICUDateFormatFromCache(icu_locale, skeleton_ustr,
+ generator.get());
if (icu_date_format.get() == nullptr) {
// Remove extensions and try again.
icu_locale = icu::Locale(icu_locale.getBaseName());
- icu_date_format =
- CreateICUDateFormatFromCache(icu_locale, skeleton_ustr, *generator);
+ icu_date_format = CreateICUDateFormatFromCache(icu_locale, skeleton_ustr,
+ generator.get());
if (icu_date_format.get() == nullptr) {
FATAL("Failed to create ICU date format, are ICU data files missing?");
}
@@ -1561,12 +1576,16 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
isolate->factory()->NewFastOrSlowJSObjectFromMap(map));
DisallowHeapAllocation no_gc;
date_time_format->set_flags(0);
- date_time_format->set_hour_cycle(hc);
if (date_style != DateTimeStyle::kUndefined) {
date_time_format->set_date_style(date_style);
}
if (time_style != DateTimeStyle::kUndefined) {
date_time_format->set_time_style(time_style);
+ date_time_format->set_hour_cycle(hc);
+ }
+ if ((date_style == DateTimeStyle::kUndefined) &&
+ (time_style == DateTimeStyle::kUndefined)) {
+ date_time_format->set_hour_cycle(hc);
}
date_time_format->set_icu_locale(*managed_locale);
date_time_format->set_icu_simple_date_format(*managed_format);
@@ -1585,11 +1604,9 @@ Handle<String> IcuDateFieldIdToDateType(int32_t field_id, Isolate* isolate) {
return isolate->factory()->literal_string();
case UDAT_YEAR_FIELD:
case UDAT_EXTENDED_YEAR_FIELD:
- case UDAT_YEAR_NAME_FIELD:
return isolate->factory()->year_string();
- case UDAT_QUARTER_FIELD:
- case UDAT_STANDALONE_QUARTER_FIELD:
- return isolate->factory()->quarter_string();
+ case UDAT_YEAR_NAME_FIELD:
+ return isolate->factory()->yearName_string();
case UDAT_MONTH_FIELD:
case UDAT_STANDALONE_MONTH_FIELD:
return isolate->factory()->month_string();
@@ -1624,6 +1641,11 @@ Handle<String> IcuDateFieldIdToDateType(int32_t field_id, Isolate* isolate) {
return isolate->factory()->era_string();
case UDAT_FRACTIONAL_SECOND_FIELD:
return isolate->factory()->fractionalSecond_string();
+ case UDAT_RELATED_YEAR_FIELD:
+ return isolate->factory()->relatedYear_string();
+
+ case UDAT_QUARTER_FIELD:
+ case UDAT_STANDALONE_QUARTER_FIELD:
default:
// Other UDAT_*_FIELD's cannot show up because there is no way to specify
// them via options of Intl.DateTimeFormat.
diff --git a/deps/v8/src/objects/js-date-time-format.h b/deps/v8/src/objects/js-date-time-format.h
index f4a8ccc8f5..acf99b3618 100644
--- a/deps/v8/src/objects/js-date-time-format.h
+++ b/deps/v8/src/objects/js-date-time-format.h
@@ -34,7 +34,7 @@ class JSDateTimeFormat : public JSObject {
public:
V8_WARN_UNUSED_RESULT static MaybeHandle<JSDateTimeFormat> New(
Isolate* isolate, Handle<Map> map, Handle<Object> locales,
- Handle<Object> options);
+ Handle<Object> options, const char* service);
V8_WARN_UNUSED_RESULT static MaybeHandle<JSObject> ResolvedOptions(
Isolate* isolate, Handle<JSDateTimeFormat> date_time_format);
@@ -82,7 +82,8 @@ class JSDateTimeFormat : public JSObject {
V8_WARN_UNUSED_RESULT static MaybeHandle<String> ToLocaleDateTime(
Isolate* isolate, Handle<Object> date, Handle<Object> locales,
- Handle<Object> options, RequiredOption required, DefaultsOption defaults);
+ Handle<Object> options, RequiredOption required, DefaultsOption defaults,
+ const char* method);
V8_EXPORT_PRIVATE static const std::set<std::string>& GetAvailableLocales();
@@ -94,7 +95,7 @@ class JSDateTimeFormat : public JSObject {
// Layout description.
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_JSDATE_TIME_FORMAT_FIELDS)
+ TORQUE_GENERATED_JS_DATE_TIME_FORMAT_FIELDS)
inline void set_hour_cycle(Intl::HourCycle hour_cycle);
inline Intl::HourCycle hour_cycle() const;
diff --git a/deps/v8/src/objects/js-list-format.cc b/deps/v8/src/objects/js-list-format.cc
index 4f303b1874..90b93e308a 100644
--- a/deps/v8/src/objects/js-list-format.cc
+++ b/deps/v8/src/objects/js-list-format.cc
@@ -252,40 +252,22 @@ namespace {
// Extract String from JSArray into array of UnicodeString
Maybe<std::vector<icu::UnicodeString>> ToUnicodeStringArray(
Isolate* isolate, Handle<JSArray> array) {
- Factory* factory = isolate->factory();
- // In general, ElementsAccessor::Get actually isn't guaranteed to give us the
- // elements in order. But if it is a holey array, it will cause the exception
- // with the IsString check.
+ // Thanks to iterable-to-list preprocessing, we never see dictionary-mode
+ // arrays here, so the loop below can construct an entry from the index.
+ DCHECK(array->HasFastElements(isolate));
auto* accessor = array->GetElementsAccessor();
uint32_t length = accessor->NumberOfElements(*array);
- // ecma402 #sec-createpartsfromlist
- // 2. If list contains any element value such that Type(value) is not String,
- // throw a TypeError exception.
- //
- // Per spec it looks like we're supposed to throw a TypeError exception if the
- // item isn't already a string, rather than coercing to a string.
std::vector<icu::UnicodeString> result;
for (uint32_t i = 0; i < length; i++) {
- DCHECK(accessor->HasElement(*array, i));
- Handle<Object> item = accessor->Get(array, i);
- DCHECK(!item.is_null());
- if (!item->IsString()) {
- THROW_NEW_ERROR_RETURN_VALUE(
- isolate,
- NewTypeError(MessageTemplate::kArrayItemNotType,
- factory->list_string(),
- // TODO(ftang): For dictionary-mode arrays, i isn't
- // actually the index in the array but the index in the
- // dictionary.
- factory->NewNumber(i), factory->String_string()),
- Nothing<std::vector<icu::UnicodeString>>());
- }
+ InternalIndex entry(i);
+ DCHECK(accessor->HasEntry(*array, entry));
+ Handle<Object> item = accessor->Get(array, entry);
+ DCHECK(item->IsString());
Handle<String> item_str = Handle<String>::cast(item);
if (!item_str->IsFlat()) item_str = String::Flatten(isolate, item_str);
result.push_back(Intl::ToICUUnicodeString(isolate, item_str));
}
- DCHECK(!array->HasDictionaryElements());
return Just(result);
}
@@ -294,9 +276,6 @@ MaybeHandle<T> FormatListCommon(
Isolate* isolate, Handle<JSListFormat> format, Handle<JSArray> list,
MaybeHandle<T> (*formatToResult)(Isolate*, const icu::FormattedValue&)) {
DCHECK(!list->IsUndefined());
- // ecma402 #sec-createpartsfromlist
- // 2. If list contains any element value such that Type(value) is not String,
- // throw a TypeError exception.
Maybe<std::vector<icu::UnicodeString>> maybe_array =
ToUnicodeStringArray(isolate, list);
MAYBE_RETURN(maybe_array, Handle<T>());
diff --git a/deps/v8/src/objects/js-list-format.h b/deps/v8/src/objects/js-list-format.h
index df937722e6..1ff76790f9 100644
--- a/deps/v8/src/objects/js-list-format.h
+++ b/deps/v8/src/objects/js-list-format.h
@@ -104,7 +104,7 @@ class JSListFormat : public JSObject {
// Layout description.
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_JSLIST_FORMAT_FIELDS)
+ TORQUE_GENERATED_JS_LIST_FORMAT_FIELDS)
OBJECT_CONSTRUCTORS(JSListFormat, JSObject);
};
diff --git a/deps/v8/src/objects/js-locale.cc b/deps/v8/src/objects/js-locale.cc
index 4a66ea9eca..9e8053b1dc 100644
--- a/deps/v8/src/objects/js-locale.cc
+++ b/deps/v8/src/objects/js-locale.cc
@@ -168,10 +168,20 @@ bool IsUnicodeVariantSubtag(const std::string& value) {
bool IsExtensionSingleton(const std::string& value) {
return IsAlphanum(value, 1, 1);
}
+} // namespace
+
+bool JSLocale::Is38AlphaNumList(const std::string& value) {
+ std::size_t found = value.find("-");
+ if (found == std::string::npos) {
+ return IsAlphanum(value, 3, 8);
+ }
+ return IsAlphanum(value.substr(0, found), 3, 8) &&
+ JSLocale::Is38AlphaNumList(value.substr(found + 1));
+}
// TODO(ftang) Replace the following check w/ icu::LocaleBuilder
// once ICU64 land in March 2019.
-bool StartsWithUnicodeLanguageId(const std::string& value) {
+bool JSLocale::StartsWithUnicodeLanguageId(const std::string& value) {
// unicode_language_id =
// unicode_language_subtag (sep unicode_script_subtag)?
// (sep unicode_region_subtag)? (sep unicode_variant_subtag)* ;
@@ -207,6 +217,7 @@ bool StartsWithUnicodeLanguageId(const std::string& value) {
return true;
}
+namespace {
Maybe<bool> ApplyOptionsToTag(Isolate* isolate, Handle<String> tag,
Handle<JSReceiver> options,
icu::LocaleBuilder* builder) {
@@ -223,7 +234,7 @@ Maybe<bool> ApplyOptionsToTag(Isolate* isolate, Handle<String> tag,
CHECK_NOT_NULL(*bcp47_tag);
// 2. If IsStructurallyValidLanguageTag(tag) is false, throw a RangeError
// exception.
- if (!StartsWithUnicodeLanguageId(*bcp47_tag)) {
+ if (!JSLocale::StartsWithUnicodeLanguageId(*bcp47_tag)) {
return Just(false);
}
UErrorCode status = U_ZERO_ERROR;
diff --git a/deps/v8/src/objects/js-locale.h b/deps/v8/src/objects/js-locale.h
index e1806e6b7f..f2fca3ce14 100644
--- a/deps/v8/src/objects/js-locale.h
+++ b/deps/v8/src/objects/js-locale.h
@@ -49,6 +49,13 @@ class JSLocale : public JSObject {
static Handle<String> ToString(Isolate* isolate, Handle<JSLocale> locale);
static std::string ToString(Handle<JSLocale> locale);
+ // Help function to validate locale by other Intl objects.
+ static bool StartsWithUnicodeLanguageId(const std::string& value);
+
+ // Help function to check well-formed
+ // "(3*8alphanum) *("-" (3*8alphanum)) sequence" sequence
+ static bool Is38AlphaNumList(const std::string& value);
+
DECL_CAST(JSLocale)
DECL_ACCESSORS(icu_locale, Managed<icu::Locale>)
@@ -58,7 +65,7 @@ class JSLocale : public JSObject {
// Layout description.
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_JSLOCALE_FIELDS)
+ TORQUE_GENERATED_JS_LOCALE_FIELDS)
OBJECT_CONSTRUCTORS(JSLocale, JSObject);
};
diff --git a/deps/v8/src/objects/js-number-format-inl.h b/deps/v8/src/objects/js-number-format-inl.h
index afdfef89f2..f68252ab0b 100644
--- a/deps/v8/src/objects/js-number-format-inl.h
+++ b/deps/v8/src/objects/js-number-format-inl.h
@@ -26,46 +26,8 @@ ACCESSORS(JSNumberFormat, icu_number_formatter,
kIcuNumberFormatterOffset)
ACCESSORS(JSNumberFormat, bound_format, Object, kBoundFormatOffset)
-// Currenct ECMA 402 spec mandate to record (Min|Max)imumFractionDigits
-// uncondictionally while the unified number proposal eventually will only
-// record either (Min|Max)imumFractionDigits or (Min|Max)imumSignaficantDigits
-// Since LocalizedNumberFormatter can only remember one set, and during
-// 2019-1-17 ECMA402 meeting that the committee decide not to take a PR to
-// address that prior to the unified number proposal, we have to add these two
-// 5 bits int into flags to remember the (Min|Max)imumFractionDigits while
-// (Min|Max)imumSignaficantDigits is present.
-// TODO(ftang) remove the following once we ship int-number-format-unified
-// * SMI_ACCESSORS of flags
-// * Four inline functions: (set_)?(min|max)imum_fraction_digits
-
SMI_ACCESSORS(JSNumberFormat, flags, kFlagsOffset)
-inline int JSNumberFormat::minimum_fraction_digits() const {
- return MinimumFractionDigitsBits::decode(flags());
-}
-
-inline void JSNumberFormat::set_minimum_fraction_digits(int digits) {
- DCHECK_GE(MinimumFractionDigitsBits::kMax, digits);
- DCHECK_LE(0, digits);
- DCHECK_GE(20, digits);
- int hints = flags();
- hints = MinimumFractionDigitsBits::update(hints, digits);
- set_flags(hints);
-}
-
-inline int JSNumberFormat::maximum_fraction_digits() const {
- return MaximumFractionDigitsBits::decode(flags());
-}
-
-inline void JSNumberFormat::set_maximum_fraction_digits(int digits) {
- DCHECK_GE(MaximumFractionDigitsBits::kMax, digits);
- DCHECK_LE(0, digits);
- DCHECK_GE(20, digits);
- int hints = flags();
- hints = MaximumFractionDigitsBits::update(hints, digits);
- set_flags(hints);
-}
-
inline void JSNumberFormat::set_style(Style style) {
DCHECK_GE(StyleBits::kMax, style);
int hints = flags();
diff --git a/deps/v8/src/objects/js-number-format.cc b/deps/v8/src/objects/js-number-format.cc
index ff564975d6..c065a3f725 100644
--- a/deps/v8/src/objects/js-number-format.cc
+++ b/deps/v8/src/objects/js-number-format.cc
@@ -33,7 +33,6 @@ namespace {
// [[CurrencyDisplay]] is one of the values "code", "symbol", "name",
// or "narrowSymbol" identifying the display of the currency number format.
-// Note: "narrowSymbol" is added in proposal-unified-intl-numberformat
enum class CurrencyDisplay {
CODE,
SYMBOL,
@@ -621,12 +620,11 @@ JSNumberFormat::SetDigitOptionsToFormatter(
result = result.integerWidth(icu::number::IntegerWidth::zeroFillTo(
digit_options.minimum_integer_digits));
}
- if (FLAG_harmony_intl_numberformat_unified) {
- // Value -1 of minimum_significant_digits represent the roundingtype is
- // "compact-rounding".
- if (digit_options.minimum_significant_digits < 0) {
- return result;
- }
+
+ // Value -1 of minimum_significant_digits represent the roundingtype is
+ // "compact-rounding".
+ if (digit_options.minimum_significant_digits < 0) {
+ return result;
}
icu::number::Precision precision =
(digit_options.minimum_significant_digits > 0)
@@ -704,15 +702,12 @@ Handle<JSObject> JSNumberFormat::ResolvedOptions(
isolate, options, factory->currencyDisplay_string(),
CurrencyDisplayString(isolate, skeleton), Just(kDontThrow))
.FromJust());
- if (FLAG_harmony_intl_numberformat_unified) {
- CHECK(JSReceiver::CreateDataProperty(
- isolate, options, factory->currencySign_string(),
- CurrencySignString(isolate, skeleton), Just(kDontThrow))
- .FromJust());
- }
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->currencySign_string(),
+ CurrencySignString(isolate, skeleton), Just(kDontThrow))
+ .FromJust());
}
- if (FLAG_harmony_intl_numberformat_unified) {
if (style == JSNumberFormat::Style::UNIT) {
std::string unit = UnitFromSkeleton(skeleton);
if (!unit.empty()) {
@@ -727,7 +722,6 @@ Handle<JSObject> JSNumberFormat::ResolvedOptions(
UnitDisplayString(isolate, skeleton), Just(kDontThrow))
.FromJust());
}
- }
CHECK(
JSReceiver::CreateDataProperty(
@@ -735,45 +729,25 @@ Handle<JSObject> JSNumberFormat::ResolvedOptions(
factory->NewNumberFromInt(MinimumIntegerDigitsFromSkeleton(skeleton)),
Just(kDontThrow))
.FromJust());
+
int32_t minimum = 0, maximum = 0;
- bool output_fraction =
- FractionDigitsFromSkeleton(skeleton, &minimum, &maximum);
-
- if (!FLAG_harmony_intl_numberformat_unified && !output_fraction) {
- // Currenct ECMA 402 spec mandate to record (Min|Max)imumFractionDigits
- // uncondictionally while the unified number proposal eventually will only
- // record either (Min|Max)imumFractionDigits or
- // (Min|Max)imumSignaficantDigits Since LocalizedNumberFormatter can only
- // remember one set, and during 2019-1-17 ECMA402 meeting that the committee
- // decide not to take a PR to address that prior to the unified number
- // proposal, we have to add these two 5 bits int into flags to remember the
- // (Min|Max)imumFractionDigits while (Min|Max)imumSignaficantDigits is
- // present.
- // TODO(ftang) remove the following two lines once we ship
- // int-number-format-unified
- output_fraction = true;
- minimum = number_format->minimum_fraction_digits();
- maximum = number_format->maximum_fraction_digits();
- }
- if (output_fraction) {
+ if (SignificantDigitsFromSkeleton(skeleton, &minimum, &maximum)) {
CHECK(JSReceiver::CreateDataProperty(
- isolate, options, factory->minimumFractionDigits_string(),
+ isolate, options, factory->minimumSignificantDigits_string(),
factory->NewNumberFromInt(minimum), Just(kDontThrow))
.FromJust());
CHECK(JSReceiver::CreateDataProperty(
- isolate, options, factory->maximumFractionDigits_string(),
+ isolate, options, factory->maximumSignificantDigits_string(),
factory->NewNumberFromInt(maximum), Just(kDontThrow))
.FromJust());
- }
- minimum = 0;
- maximum = 0;
- if (SignificantDigitsFromSkeleton(skeleton, &minimum, &maximum)) {
+ } else {
+ FractionDigitsFromSkeleton(skeleton, &minimum, &maximum);
CHECK(JSReceiver::CreateDataProperty(
- isolate, options, factory->minimumSignificantDigits_string(),
+ isolate, options, factory->minimumFractionDigits_string(),
factory->NewNumberFromInt(minimum), Just(kDontThrow))
.FromJust());
CHECK(JSReceiver::CreateDataProperty(
- isolate, options, factory->maximumSignificantDigits_string(),
+ isolate, options, factory->maximumFractionDigits_string(),
factory->NewNumberFromInt(maximum), Just(kDontThrow))
.FromJust());
}
@@ -783,24 +757,22 @@ Handle<JSObject> JSNumberFormat::ResolvedOptions(
factory->ToBoolean(UseGroupingFromSkeleton(skeleton)),
Just(kDontThrow))
.FromJust());
- if (FLAG_harmony_intl_numberformat_unified) {
- Notation notation = NotationFromSkeleton(skeleton);
- CHECK(JSReceiver::CreateDataProperty(
- isolate, options, factory->notation_string(),
- NotationAsString(isolate, notation), Just(kDontThrow))
- .FromJust());
- // Only output compactDisplay when notation is compact.
- if (notation == Notation::COMPACT) {
- CHECK(JSReceiver::CreateDataProperty(
- isolate, options, factory->compactDisplay_string(),
- CompactDisplayString(isolate, skeleton), Just(kDontThrow))
- .FromJust());
- }
+ Notation notation = NotationFromSkeleton(skeleton);
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->notation_string(),
+ NotationAsString(isolate, notation), Just(kDontThrow))
+ .FromJust());
+ // Only output compactDisplay when notation is compact.
+ if (notation == Notation::COMPACT) {
CHECK(JSReceiver::CreateDataProperty(
- isolate, options, factory->signDisplay_string(),
- SignDisplayString(isolate, skeleton), Just(kDontThrow))
+ isolate, options, factory->compactDisplay_string(),
+ CompactDisplayString(isolate, skeleton), Just(kDontThrow))
.FromJust());
}
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->signDisplay_string(),
+ SignDisplayString(isolate, skeleton), Just(kDontThrow))
+ .FromJust());
return options;
}
@@ -837,7 +809,8 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::UnwrapNumberFormat(
MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
Handle<Map> map,
Handle<Object> locales,
- Handle<Object> options_obj) {
+ Handle<Object> options_obj,
+ const char* service) {
Factory* factory = isolate->factory();
// 1. Let requestedLocales be ? CanonicalizeLocaleList(locales).
@@ -854,10 +827,9 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
} else {
// 3. Else
// 3. a. Let options be ? ToObject(options).
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, options_obj,
- Object::ToObject(isolate, options_obj, "Intl.NumberFormat"),
- JSNumberFormat);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, options_obj,
+ Object::ToObject(isolate, options_obj, service),
+ JSNumberFormat);
}
// At this point, options_obj can either be a JSObject or a JSProxy only.
@@ -868,7 +840,7 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
// "lookup", "best fit" », "best fit").
// 6. Set opt.[[localeMatcher]] to matcher.
Maybe<Intl::MatcherOption> maybe_locale_matcher =
- Intl::GetLocaleMatcher(isolate, options, "Intl.NumberFormat");
+ Intl::GetLocaleMatcher(isolate, options, service);
MAYBE_RETURN(maybe_locale_matcher, MaybeHandle<JSNumberFormat>());
Intl::MatcherOption matcher = maybe_locale_matcher.FromJust();
@@ -877,7 +849,7 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
// 7. Let _numberingSystem_ be ? GetOption(_options_, `"numberingSystem"`,
// `"string"`, *undefined*, *undefined*).
Maybe<bool> maybe_numberingSystem = Intl::GetNumberingSystem(
- isolate, options, "Intl.RelativeTimeFormat", &numbering_system_str);
+ isolate, options, service, &numbering_system_str);
// 8. If _numberingSystem_ is not *undefined*, then
// a. If _numberingSystem_ does not match the
// `(3*8alphanum) *("-" (3*8alphanum))` sequence, throw a *RangeError*
@@ -895,7 +867,8 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
requested_locales, matcher, relevant_extension_keys);
UErrorCode status = U_ZERO_ERROR;
- if (numbering_system_str != nullptr) {
+ if (numbering_system_str != nullptr &&
+ Intl::IsValidNumberingSystem(numbering_system_str.get())) {
r.icu_locale.setUnicodeKeywordValue("nu", numbering_system_str.get(),
status);
CHECK(U_SUCCESS(status));
@@ -913,21 +886,15 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
.roundingMode(UNUM_ROUND_HALFUP);
// 12. Let style be ? GetOption(options, "style", "string", « "decimal",
- // "percent", "currency" », "decimal").
- const char* service = "Intl.NumberFormat";
+ // "percent", "currency", "unit" », "decimal").
- std::vector<const char*> style_str_values({"decimal", "percent", "currency"});
- std::vector<JSNumberFormat::Style> style_enum_values(
- {JSNumberFormat::Style::DECIMAL, JSNumberFormat::Style::PERCENT,
- JSNumberFormat::Style::CURRENCY});
- if (FLAG_harmony_intl_numberformat_unified) {
- style_str_values.push_back("unit");
- style_enum_values.push_back(JSNumberFormat::Style::UNIT);
- }
Maybe<JSNumberFormat::Style> maybe_style =
Intl::GetStringOption<JSNumberFormat::Style>(
- isolate, options, "style", service, style_str_values,
- style_enum_values, JSNumberFormat::Style::DECIMAL);
+ isolate, options, "style", service,
+ {"decimal", "percent", "currency", "unit"},
+ {JSNumberFormat::Style::DECIMAL, JSNumberFormat::Style::PERCENT,
+ JSNumberFormat::Style::CURRENCY, JSNumberFormat::Style::UNIT},
+ JSNumberFormat::Style::DECIMAL);
MAYBE_RETURN(maybe_style, MaybeHandle<JSNumberFormat>());
JSNumberFormat::Style style = maybe_style.FromJust();
@@ -977,99 +944,87 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
}
// 18. Let currencyDisplay be ? GetOption(options, "currencyDisplay",
- // "string", « "code", "symbol", "name" », "symbol").
- std::vector<const char*> currency_display_str_values(
- {"code", "symbol", "name"});
- std::vector<CurrencyDisplay> currency_display_enum_values(
- {CurrencyDisplay::CODE, CurrencyDisplay::SYMBOL, CurrencyDisplay::NAME});
- if (FLAG_harmony_intl_numberformat_unified) {
- currency_display_str_values.push_back("narrowSymbol");
- currency_display_enum_values.push_back(CurrencyDisplay::NARROW_SYMBOL);
- }
+ // "string", « "code", "symbol", "name", "narrowSymbol" », "symbol").
Maybe<CurrencyDisplay> maybe_currency_display =
Intl::GetStringOption<CurrencyDisplay>(
isolate, options, "currencyDisplay", service,
- currency_display_str_values, currency_display_enum_values,
+ {"code", "symbol", "name", "narrowSymbol"},
+ {CurrencyDisplay::CODE, CurrencyDisplay::SYMBOL,
+ CurrencyDisplay::NAME, CurrencyDisplay::NARROW_SYMBOL},
CurrencyDisplay::SYMBOL);
MAYBE_RETURN(maybe_currency_display, MaybeHandle<JSNumberFormat>());
CurrencyDisplay currency_display = maybe_currency_display.FromJust();
CurrencySign currency_sign = CurrencySign::STANDARD;
- if (FLAG_harmony_intl_numberformat_unified) {
- // Let currencySign be ? GetOption(options, "currencySign", "string", «
- // "standard", "accounting" », "standard").
- Maybe<CurrencySign> maybe_currency_sign =
- Intl::GetStringOption<CurrencySign>(
- isolate, options, "currencySign", service,
- {"standard", "accounting"},
- {CurrencySign::STANDARD, CurrencySign::ACCOUNTING},
- CurrencySign::STANDARD);
- MAYBE_RETURN(maybe_currency_sign, MaybeHandle<JSNumberFormat>());
- currency_sign = maybe_currency_sign.FromJust();
-
- // Let unit be ? GetOption(options, "unit", "string", undefined, undefined).
- std::unique_ptr<char[]> unit_cstr;
- Maybe<bool> found_unit = Intl::GetStringOption(
- isolate, options, "unit", empty_values, service, &unit_cstr);
- MAYBE_RETURN(found_unit, MaybeHandle<JSNumberFormat>());
-
- std::string unit;
- if (found_unit.FromJust()) {
- DCHECK_NOT_NULL(unit_cstr.get());
- unit = unit_cstr.get();
+ // Let currencySign be ? GetOption(options, "currencySign", "string", «
+ // "standard", "accounting" », "standard").
+ Maybe<CurrencySign> maybe_currency_sign = Intl::GetStringOption<CurrencySign>(
+ isolate, options, "currencySign", service, {"standard", "accounting"},
+ {CurrencySign::STANDARD, CurrencySign::ACCOUNTING},
+ CurrencySign::STANDARD);
+ MAYBE_RETURN(maybe_currency_sign, MaybeHandle<JSNumberFormat>());
+ currency_sign = maybe_currency_sign.FromJust();
+
+ // Let unit be ? GetOption(options, "unit", "string", undefined, undefined).
+ std::unique_ptr<char[]> unit_cstr;
+ Maybe<bool> found_unit = Intl::GetStringOption(
+ isolate, options, "unit", empty_values, service, &unit_cstr);
+ MAYBE_RETURN(found_unit, MaybeHandle<JSNumberFormat>());
+
+ std::string unit;
+ if (found_unit.FromJust()) {
+ DCHECK_NOT_NULL(unit_cstr.get());
+ unit = unit_cstr.get();
+ }
+
+ // Let unitDisplay be ? GetOption(options, "unitDisplay", "string", «
+ // "short", "narrow", "long" », "short").
+ Maybe<UnitDisplay> maybe_unit_display = Intl::GetStringOption<UnitDisplay>(
+ isolate, options, "unitDisplay", service, {"short", "narrow", "long"},
+ {UnitDisplay::SHORT, UnitDisplay::NARROW, UnitDisplay::LONG},
+ UnitDisplay::SHORT);
+ MAYBE_RETURN(maybe_unit_display, MaybeHandle<JSNumberFormat>());
+ UnitDisplay unit_display = maybe_unit_display.FromJust();
+
+ // If style is "unit", then
+ if (style == JSNumberFormat::Style::UNIT) {
+ // If unit is undefined, throw a TypeError exception.
+ if (unit == "") {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kInvalidUnit,
+ factory->NewStringFromAsciiChecked(service),
+ factory->empty_string()),
+ JSNumberFormat);
}
- // Let unitDisplay be ? GetOption(options, "unitDisplay", "string", «
- // "short", "narrow", "long" », "short").
- Maybe<UnitDisplay> maybe_unit_display = Intl::GetStringOption<UnitDisplay>(
- isolate, options, "unitDisplay", service, {"short", "narrow", "long"},
- {UnitDisplay::SHORT, UnitDisplay::NARROW, UnitDisplay::LONG},
- UnitDisplay::SHORT);
- MAYBE_RETURN(maybe_unit_display, MaybeHandle<JSNumberFormat>());
- UnitDisplay unit_display = maybe_unit_display.FromJust();
-
- // If style is "unit", then
- if (style == JSNumberFormat::Style::UNIT) {
- // If unit is undefined, throw a TypeError exception.
- if (unit == "") {
- THROW_NEW_ERROR(
- isolate,
- NewTypeError(MessageTemplate::kInvalidUnit,
- factory->NewStringFromStaticChars("Intl.NumberFormat"),
- factory->empty_string()),
- JSNumberFormat);
- }
-
- // If the result of IsWellFormedUnitIdentifier(unit) is false, throw a
- // RangeError exception.
- Maybe<std::pair<icu::MeasureUnit, icu::MeasureUnit>> maybe_wellformed =
- IsWellFormedUnitIdentifier(isolate, unit);
- if (maybe_wellformed.IsNothing()) {
- THROW_NEW_ERROR(
- isolate,
- NewRangeError(
- MessageTemplate::kInvalidUnit,
- factory->NewStringFromStaticChars("Intl.NumberFormat"),
- factory->NewStringFromAsciiChecked(unit.c_str())),
- JSNumberFormat);
- }
- std::pair<icu::MeasureUnit, icu::MeasureUnit> unit_pair =
- maybe_wellformed.FromJust();
+ // If the result of IsWellFormedUnitIdentifier(unit) is false, throw a
+ // RangeError exception.
+ Maybe<std::pair<icu::MeasureUnit, icu::MeasureUnit>> maybe_wellformed =
+ IsWellFormedUnitIdentifier(isolate, unit);
+ if (maybe_wellformed.IsNothing()) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewRangeError(MessageTemplate::kInvalidUnit,
+ factory->NewStringFromAsciiChecked(service),
+ factory->NewStringFromAsciiChecked(unit.c_str())),
+ JSNumberFormat);
+ }
+ std::pair<icu::MeasureUnit, icu::MeasureUnit> unit_pair =
+ maybe_wellformed.FromJust();
- // Set intlObj.[[Unit]] to unit.
- if (unit_pair.first != icu::NoUnit::base()) {
- icu_number_formatter = icu_number_formatter.unit(unit_pair.first);
- }
- if (unit_pair.second != icu::NoUnit::base()) {
- icu_number_formatter = icu_number_formatter.perUnit(unit_pair.second);
- }
+ // Set intlObj.[[Unit]] to unit.
+ if (unit_pair.first != icu::NoUnit::base()) {
+ icu_number_formatter = icu_number_formatter.unit(unit_pair.first);
+ }
+ if (unit_pair.second != icu::NoUnit::base()) {
+ icu_number_formatter = icu_number_formatter.perUnit(unit_pair.second);
+ }
- // The default unitWidth is SHORT in ICU and that mapped from
- // Symbol so we can skip the setting for optimization.
- if (unit_display != UnitDisplay::SHORT) {
- icu_number_formatter =
- icu_number_formatter.unitWidth(ToUNumberUnitWidth(unit_display));
- }
+ // The default unitWidth is SHORT in ICU and that mapped from
+ // Symbol so we can skip the setting for optimization.
+ if (unit_display != UnitDisplay::SHORT) {
+ icu_number_formatter =
+ icu_number_formatter.unitWidth(ToUNumberUnitWidth(unit_display));
}
}
@@ -1125,18 +1080,16 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
}
Notation notation = Notation::STANDARD;
- if (FLAG_harmony_intl_numberformat_unified) {
- // 25. Let notation be ? GetOption(options, "notation", "string", «
- // "standard", "scientific", "engineering", "compact" », "standard").
- Maybe<Notation> maybe_notation = Intl::GetStringOption<Notation>(
- isolate, options, "notation", service,
- {"standard", "scientific", "engineering", "compact"},
- {Notation::STANDARD, Notation::SCIENTIFIC, Notation::ENGINEERING,
- Notation::COMPACT},
- Notation::STANDARD);
- MAYBE_RETURN(maybe_notation, MaybeHandle<JSNumberFormat>());
- notation = maybe_notation.FromJust();
- }
+ // 25. Let notation be ? GetOption(options, "notation", "string", «
+ // "standard", "scientific", "engineering", "compact" », "standard").
+ Maybe<Notation> maybe_notation = Intl::GetStringOption<Notation>(
+ isolate, options, "notation", service,
+ {"standard", "scientific", "engineering", "compact"},
+ {Notation::STANDARD, Notation::SCIENTIFIC, Notation::ENGINEERING,
+ Notation::COMPACT},
+ Notation::STANDARD);
+ MAYBE_RETURN(maybe_notation, MaybeHandle<JSNumberFormat>());
+ notation = maybe_notation.FromJust();
// 27. Perform ? SetNumberFormatDigitOptions(numberFormat, options,
// mnfdDefault, mxfdDefault).
@@ -1149,24 +1102,21 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
icu_number_formatter = JSNumberFormat::SetDigitOptionsToFormatter(
icu_number_formatter, digit_options);
- if (FLAG_harmony_intl_numberformat_unified) {
- // 28. Let compactDisplay be ? GetOption(options, "compactDisplay",
- // "string", « "short", "long" », "short").
- Maybe<CompactDisplay> maybe_compact_display =
- Intl::GetStringOption<CompactDisplay>(
- isolate, options, "compactDisplay", service, {"short", "long"},
- {CompactDisplay::SHORT, CompactDisplay::LONG},
- CompactDisplay::SHORT);
- MAYBE_RETURN(maybe_compact_display, MaybeHandle<JSNumberFormat>());
- CompactDisplay compact_display = maybe_compact_display.FromJust();
-
- // 26. Set numberFormat.[[Notation]] to notation.
- // The default notation in ICU is Simple, which mapped from STANDARD
- // so we can skip setting it.
- if (notation != Notation::STANDARD) {
- icu_number_formatter = icu_number_formatter.notation(
- ToICUNotation(notation, compact_display));
- }
+ // 28. Let compactDisplay be ? GetOption(options, "compactDisplay",
+ // "string", « "short", "long" », "short").
+ Maybe<CompactDisplay> maybe_compact_display =
+ Intl::GetStringOption<CompactDisplay>(
+ isolate, options, "compactDisplay", service, {"short", "long"},
+ {CompactDisplay::SHORT, CompactDisplay::LONG}, CompactDisplay::SHORT);
+ MAYBE_RETURN(maybe_compact_display, MaybeHandle<JSNumberFormat>());
+ CompactDisplay compact_display = maybe_compact_display.FromJust();
+
+ // 26. Set numberFormat.[[Notation]] to notation.
+ // The default notation in ICU is Simple, which mapped from STANDARD
+ // so we can skip setting it.
+ if (notation != Notation::STANDARD) {
+ icu_number_formatter =
+ icu_number_formatter.notation(ToICUNotation(notation, compact_display));
}
// 30. Let useGrouping be ? GetOption(options, "useGrouping", "boolean",
// undefined, true).
@@ -1180,27 +1130,25 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
UNumberGroupingStrategy::UNUM_GROUPING_OFF);
}
- if (FLAG_harmony_intl_numberformat_unified) {
- // 32. Let signDisplay be ? GetOption(options, "signDisplay", "string", «
- // "auto", "never", "always", "exceptZero" », "auto").
- Maybe<SignDisplay> maybe_sign_display = Intl::GetStringOption<SignDisplay>(
- isolate, options, "signDisplay", service,
- {"auto", "never", "always", "exceptZero"},
- {SignDisplay::AUTO, SignDisplay::NEVER, SignDisplay::ALWAYS,
- SignDisplay::EXCEPT_ZERO},
- SignDisplay::AUTO);
- MAYBE_RETURN(maybe_sign_display, MaybeHandle<JSNumberFormat>());
- SignDisplay sign_display = maybe_sign_display.FromJust();
-
- // 33. Set numberFormat.[[SignDisplay]] to signDisplay.
- // The default sign in ICU is UNUM_SIGN_AUTO which is mapped from
- // SignDisplay::AUTO and CurrencySign::STANDARD so we can skip setting
- // under that values for optimization.
- if (sign_display != SignDisplay::AUTO ||
- currency_sign != CurrencySign::STANDARD) {
- icu_number_formatter = icu_number_formatter.sign(
- ToUNumberSignDisplay(sign_display, currency_sign));
- }
+ // 32. Let signDisplay be ? GetOption(options, "signDisplay", "string", «
+ // "auto", "never", "always", "exceptZero" », "auto").
+ Maybe<SignDisplay> maybe_sign_display = Intl::GetStringOption<SignDisplay>(
+ isolate, options, "signDisplay", service,
+ {"auto", "never", "always", "exceptZero"},
+ {SignDisplay::AUTO, SignDisplay::NEVER, SignDisplay::ALWAYS,
+ SignDisplay::EXCEPT_ZERO},
+ SignDisplay::AUTO);
+ MAYBE_RETURN(maybe_sign_display, MaybeHandle<JSNumberFormat>());
+ SignDisplay sign_display = maybe_sign_display.FromJust();
+
+ // 33. Set numberFormat.[[SignDisplay]] to signDisplay.
+ // The default sign in ICU is UNUM_SIGN_AUTO which is mapped from
+ // SignDisplay::AUTO and CurrencySign::STANDARD so we can skip setting
+ // under that values for optimization.
+ if (sign_display != SignDisplay::AUTO ||
+ currency_sign != CurrencySign::STANDARD) {
+ icu_number_formatter = icu_number_formatter.sign(
+ ToUNumberSignDisplay(sign_display, currency_sign));
}
// 25. Let dataLocaleData be localeData.[[<dataLocale>]].
@@ -1231,24 +1179,6 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
number_format->set_style(style);
number_format->set_locale(*locale_str);
- if (digit_options.minimum_significant_digits > 0) {
- // The current ECMA 402 spec mandates recording (Min|Max)imumFractionDigits
- // unconditionally, while the unified number proposal eventually will only
- // record either (Min|Max)imumFractionDigits or
- // (Min|Max)imumSignificantDigits. Since LocalizedNumberFormatter can only
- // remember one set, and during 2019-1-17 ECMA402 meeting the committee
- // decided not to take a PR to address that prior to the unified number
- // proposal, we have to add these two 5-bit ints into flags to remember the
- // (Min|Max)imumFractionDigits while (Min|Max)imumSignificantDigits is
- // present.
- // TODO(ftang) remove the following two lines once we ship
- // int-number-format-unified
- number_format->set_minimum_fraction_digits(
- digit_options.minimum_fraction_digits);
- number_format->set_maximum_fraction_digits(
- digit_options.maximum_fraction_digits);
- }
-
number_format->set_icu_number_formatter(*managed_number_formatter);
number_format->set_bound_format(*factory->undefined_value());
diff --git a/deps/v8/src/objects/js-number-format.h b/deps/v8/src/objects/js-number-format.h
index 2979ab10f4..a5196f8d51 100644
--- a/deps/v8/src/objects/js-number-format.h
+++ b/deps/v8/src/objects/js-number-format.h
@@ -36,7 +36,7 @@ class JSNumberFormat : public JSObject {
// ecma402/#sec-initializenumberformat
V8_WARN_UNUSED_RESULT static MaybeHandle<JSNumberFormat> New(
Isolate* isolate, Handle<Map> map, Handle<Object> locales,
- Handle<Object> options);
+ Handle<Object> options, const char* service);
// ecma402/#sec-unwrapnumberformat
V8_WARN_UNUSED_RESULT static MaybeHandle<JSNumberFormat> UnwrapNumberFormat(
@@ -72,26 +72,6 @@ class JSNumberFormat : public JSObject {
DECL_PRINTER(JSNumberFormat)
DECL_VERIFIER(JSNumberFormat)
- // Current ECMA 402 spec mandates to record (Min|Max)imumFractionDigits
- // unconditionally while the unified number proposal eventually will only
- // record either (Min|Max)imumFractionDigits or (Min|Max)imumSignaficantDigits
- // Since LocalizedNumberFormatter can only remember one set, and during
- // 2019-1-17 ECMA402 meeting that the committee decide not to take a PR to
- // address that prior to the unified number proposal, we have to add these two
- // 5 bits int into flags to remember the (Min|Max)imumFractionDigits while
- // (Min|Max)imumSignaficantDigits is present.
- // TODO(ftang) remove the following once we ship int-number-format-unified
- // * Four inline functions: (set_)?(min|max)imum_fraction_digits
- // * kFlagsOffset
- // * #define FLAGS_BIT_FIELDS
- // * DECL_INT_ACCESSORS(flags)
-
- inline int minimum_fraction_digits() const;
- inline void set_minimum_fraction_digits(int digits);
-
- inline int maximum_fraction_digits() const;
- inline void set_maximum_fraction_digits(int digits);
-
// [[Style]] is one of the values "decimal", "percent", "currency",
// or "unit" identifying the style of the number format.
// Note: "unit" is added in proposal-unified-intl-numberformat
@@ -102,19 +82,15 @@ class JSNumberFormat : public JSObject {
// Layout description.
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_JSNUMBER_FORMAT_FIELDS)
+ TORQUE_GENERATED_JS_NUMBER_FORMAT_FIELDS)
// Bit positions in |flags|.
#define FLAGS_BIT_FIELDS(V, _) \
- V(MinimumFractionDigitsBits, int, 5, _) \
- V(MaximumFractionDigitsBits, int, 5, _) \
V(StyleBits, Style, 2, _)
DEFINE_BIT_FIELDS(FLAGS_BIT_FIELDS)
#undef FLAGS_BIT_FIELDS
- STATIC_ASSERT(20 <= MinimumFractionDigitsBits::kMax);
- STATIC_ASSERT(20 <= MaximumFractionDigitsBits::kMax);
STATIC_ASSERT(Style::DECIMAL <= StyleBits::kMax);
STATIC_ASSERT(Style::PERCENT <= StyleBits::kMax);
STATIC_ASSERT(Style::CURRENCY <= StyleBits::kMax);
diff --git a/deps/v8/src/objects/js-objects-inl.h b/deps/v8/src/objects/js-objects-inl.h
index f8fe069d3d..a6b9e9ad83 100644
--- a/deps/v8/src/objects/js-objects-inl.h
+++ b/deps/v8/src/objects/js-objects-inl.h
@@ -31,16 +31,19 @@ namespace internal {
OBJECT_CONSTRUCTORS_IMPL(JSReceiver, HeapObject)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSCustomElementsObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSSpecialObject)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSAsyncFromSyncIterator)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSFunctionOrBoundFunction)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSBoundFunction)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSDate)
-OBJECT_CONSTRUCTORS_IMPL(JSFunction, JSObject)
-OBJECT_CONSTRUCTORS_IMPL(JSGlobalObject, JSObject)
+OBJECT_CONSTRUCTORS_IMPL(JSFunction, JSFunctionOrBoundFunction)
+OBJECT_CONSTRUCTORS_IMPL(JSGlobalObject, JSSpecialObject)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSGlobalProxy)
JSIteratorResult::JSIteratorResult(Address ptr) : JSObject(ptr) {}
OBJECT_CONSTRUCTORS_IMPL(JSMessageObject, JSObject)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSPrimitiveWrapper)
-OBJECT_CONSTRUCTORS_IMPL(JSStringIterator, JSObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSStringIterator)
NEVER_READ_ONLY_SPACE_IMPL(JSReceiver)
@@ -49,7 +52,6 @@ CAST_ACCESSOR(JSGlobalObject)
CAST_ACCESSOR(JSIteratorResult)
CAST_ACCESSOR(JSMessageObject)
CAST_ACCESSOR(JSReceiver)
-CAST_ACCESSOR(JSStringIterator)
MaybeHandle<Object> JSReceiver::GetProperty(Isolate* isolate,
Handle<JSReceiver> receiver,
@@ -375,7 +377,7 @@ void JSObject::FastPropertyAtPut(FieldIndex index, Object value) {
}
}
-void JSObject::WriteToField(int descriptor, PropertyDetails details,
+void JSObject::WriteToField(InternalIndex descriptor, PropertyDetails details,
Object value) {
DCHECK_EQ(kField, details.location());
DCHECK_EQ(kData, details.kind());
@@ -540,7 +542,9 @@ Code JSFunction::code() const {
void JSFunction::set_code(Code value) {
DCHECK(!ObjectInYoungGeneration(value));
RELAXED_WRITE_FIELD(*this, kCodeOffset, value);
+#ifndef V8_DISABLE_WRITE_BARRIERS
MarkingBarrier(*this, RawField(kCodeOffset), value);
+#endif
}
void JSFunction::set_code_no_write_barrier(Code value) {
@@ -1007,8 +1011,7 @@ inline int JSGlobalProxy::SizeWithEmbedderFields(int embedder_field_count) {
ACCESSORS(JSIteratorResult, value, Object, kValueOffset)
ACCESSORS(JSIteratorResult, done, Object, kDoneOffset)
-ACCESSORS(JSStringIterator, string, String, kStringOffset)
-SMI_ACCESSORS(JSStringIterator, index, kNextIndexOffset)
+TQ_SMI_ACCESSORS(JSStringIterator, index)
// If the fast-case backing storage takes up much more memory than a dictionary
// backing storage would, the object should have slow elements.
diff --git a/deps/v8/src/objects/js-objects.cc b/deps/v8/src/objects/js-objects.cc
index 3666f5afbe..ea0917f18f 100644
--- a/deps/v8/src/objects/js-objects.cc
+++ b/deps/v8/src/objects/js-objects.cc
@@ -216,15 +216,19 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastAssign(
}
Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
- int length = map->NumberOfOwnDescriptors();
bool stable = true;
- for (int i = 0; i < length; i++) {
+ for (InternalIndex i : map->IterateOwnDescriptors()) {
+ HandleScope inner_scope(isolate);
+
Handle<Name> next_key(descriptors->GetKey(i), isolate);
Handle<Object> prop_value;
// Directly decode from the descriptor array if |from| did not change shape.
if (stable) {
+ DCHECK_EQ(from->map(), *map);
+ DCHECK_EQ(*descriptors, map->instance_descriptors());
+
PropertyDetails details = descriptors->GetDetails(i);
if (!details.IsEnumerable()) continue;
if (details.kind() == kData) {
@@ -232,7 +236,8 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastAssign(
prop_value = handle(descriptors->GetStrongValue(i), isolate);
} else {
Representation representation = details.representation();
- FieldIndex index = FieldIndex::ForDescriptor(*map, i);
+ FieldIndex index = FieldIndex::ForPropertyIndex(
+ *map, details.field_index(), representation);
prop_value = JSObject::FastPropertyAt(from, representation, index);
}
} else {
@@ -240,6 +245,7 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastAssign(
isolate, prop_value,
JSReceiver::GetProperty(isolate, from, next_key), Nothing<bool>());
stable = from->map() == *map;
+ *descriptors.location() = map->instance_descriptors().ptr();
}
} else {
// If the map did change, do a slower lookup. We are still guaranteed that
@@ -260,7 +266,10 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastAssign(
Object::SetProperty(&it, prop_value, StoreOrigin::kNamed,
Just(ShouldThrow::kThrowOnError));
if (result.IsNothing()) return result;
- if (stable) stable = from->map() == *map;
+ if (stable) {
+ stable = from->map() == *map;
+ *descriptors.location() = map->instance_descriptors().ptr();
+ }
} else {
if (excluded_properties != nullptr &&
HasExcludedProperty(excluded_properties, next_key)) {
@@ -1094,8 +1103,7 @@ Maybe<bool> SetPropertyWithInterceptorInternal(
Maybe<bool> DefinePropertyWithInterceptorInternal(
LookupIterator* it, Handle<InterceptorInfo> interceptor,
- Maybe<ShouldThrow> should_throw,
- PropertyDescriptor& desc) { // NOLINT(runtime/references)
+ Maybe<ShouldThrow> should_throw, PropertyDescriptor* desc) {
Isolate* isolate = it->isolate();
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
@@ -1116,23 +1124,23 @@ Maybe<bool> DefinePropertyWithInterceptorInternal(
std::unique_ptr<v8::PropertyDescriptor> descriptor(
new v8::PropertyDescriptor());
- if (PropertyDescriptor::IsAccessorDescriptor(&desc)) {
+ if (PropertyDescriptor::IsAccessorDescriptor(desc)) {
descriptor.reset(new v8::PropertyDescriptor(
- v8::Utils::ToLocal(desc.get()), v8::Utils::ToLocal(desc.set())));
- } else if (PropertyDescriptor::IsDataDescriptor(&desc)) {
- if (desc.has_writable()) {
+ v8::Utils::ToLocal(desc->get()), v8::Utils::ToLocal(desc->set())));
+ } else if (PropertyDescriptor::IsDataDescriptor(desc)) {
+ if (desc->has_writable()) {
descriptor.reset(new v8::PropertyDescriptor(
- v8::Utils::ToLocal(desc.value()), desc.writable()));
+ v8::Utils::ToLocal(desc->value()), desc->writable()));
} else {
descriptor.reset(
- new v8::PropertyDescriptor(v8::Utils::ToLocal(desc.value())));
+ new v8::PropertyDescriptor(v8::Utils::ToLocal(desc->value())));
}
}
- if (desc.has_enumerable()) {
- descriptor->set_enumerable(desc.enumerable());
+ if (desc->has_enumerable()) {
+ descriptor->set_enumerable(desc->enumerable());
}
- if (desc.has_configurable()) {
- descriptor->set_configurable(desc.configurable());
+ if (desc->has_configurable()) {
+ descriptor->set_configurable(desc->configurable());
}
if (it->IsElement()) {
@@ -1166,7 +1174,7 @@ Maybe<bool> JSReceiver::OrdinaryDefineOwnProperty(
if (it->state() == LookupIterator::INTERCEPTOR) {
if (it->HolderIsReceiverOrHiddenPrototype()) {
Maybe<bool> result = DefinePropertyWithInterceptorInternal(
- it, it->GetInterceptor(), should_throw, *desc);
+ it, it->GetInterceptor(), should_throw, desc);
if (result.IsNothing() || result.FromJust()) {
return result;
}
@@ -1834,8 +1842,8 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastGetOwnValuesOrEntries(
if (!map->OnlyHasSimpleProperties()) return Just(false);
Handle<JSObject> object(JSObject::cast(*receiver), isolate);
-
Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
+
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
int number_of_own_elements =
object->GetElementsAccessor()->GetCapacity(*object, object->elements());
@@ -1857,15 +1865,25 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastGetOwnValuesOrEntries(
Nothing<bool>());
}
- bool stable = object->map() == *map;
+ // We may have already lost stability, if CollectValuesOrEntries had
+ // side-effects.
+ bool stable = *map == object->map();
+ if (stable) {
+ *descriptors.location() = map->instance_descriptors().ptr();
+ }
+
+ for (InternalIndex index : InternalIndex::Range(number_of_own_descriptors)) {
+ HandleScope inner_scope(isolate);
- for (int index = 0; index < number_of_own_descriptors; index++) {
Handle<Name> next_key(descriptors->GetKey(index), isolate);
if (!next_key->IsString()) continue;
Handle<Object> prop_value;
// Directly decode from the descriptor array if |from| did not change shape.
if (stable) {
+ DCHECK_EQ(object->map(), *map);
+ DCHECK_EQ(*descriptors, map->instance_descriptors());
+
PropertyDetails details = descriptors->GetDetails(index);
if (!details.IsEnumerable()) continue;
if (details.kind() == kData) {
@@ -1873,7 +1891,8 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastGetOwnValuesOrEntries(
prop_value = handle(descriptors->GetStrongValue(index), isolate);
} else {
Representation representation = details.representation();
- FieldIndex field_index = FieldIndex::ForDescriptor(*map, index);
+ FieldIndex field_index = FieldIndex::ForPropertyIndex(
+ *map, details.field_index(), representation);
prop_value =
JSObject::FastPropertyAt(object, representation, field_index);
}
@@ -1883,6 +1902,7 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastGetOwnValuesOrEntries(
JSReceiver::GetProperty(isolate, object, next_key),
Nothing<bool>());
stable = object->map() == *map;
+ *descriptors.location() = map->instance_descriptors().ptr();
}
} else {
// If the map did change, do a slower lookup. We are still guaranteed that
@@ -2121,15 +2141,15 @@ int JSObject::GetHeaderSize(InstanceType type,
return JSWeakSet::kSize;
case JS_PROMISE_TYPE:
return JSPromise::kSize;
- case JS_REGEXP_TYPE:
+ case JS_REG_EXP_TYPE:
return JSRegExp::kSize;
- case JS_REGEXP_STRING_ITERATOR_TYPE:
+ case JS_REG_EXP_STRING_ITERATOR_TYPE:
return JSRegExpStringIterator::kSize;
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
return JSObject::kHeaderSize;
case JS_MESSAGE_OBJECT_TYPE:
return JSMessageObject::kSize;
- case JS_ARGUMENTS_TYPE:
+ case JS_ARGUMENTS_OBJECT_TYPE:
return JSObject::kHeaderSize;
case JS_ERROR_TYPE:
return JSObject::kHeaderSize;
@@ -2138,38 +2158,38 @@ int JSObject::GetHeaderSize(InstanceType type,
case JS_MODULE_NAMESPACE_TYPE:
return JSModuleNamespace::kHeaderSize;
#ifdef V8_INTL_SUPPORT
- case JS_INTL_V8_BREAK_ITERATOR_TYPE:
+ case JS_V8_BREAK_ITERATOR_TYPE:
return JSV8BreakIterator::kSize;
- case JS_INTL_COLLATOR_TYPE:
+ case JS_COLLATOR_TYPE:
return JSCollator::kSize;
- case JS_INTL_DATE_TIME_FORMAT_TYPE:
+ case JS_DATE_TIME_FORMAT_TYPE:
return JSDateTimeFormat::kSize;
- case JS_INTL_LIST_FORMAT_TYPE:
+ case JS_LIST_FORMAT_TYPE:
return JSListFormat::kSize;
- case JS_INTL_LOCALE_TYPE:
+ case JS_LOCALE_TYPE:
return JSLocale::kSize;
- case JS_INTL_NUMBER_FORMAT_TYPE:
+ case JS_NUMBER_FORMAT_TYPE:
return JSNumberFormat::kSize;
- case JS_INTL_PLURAL_RULES_TYPE:
+ case JS_PLURAL_RULES_TYPE:
return JSPluralRules::kSize;
- case JS_INTL_RELATIVE_TIME_FORMAT_TYPE:
+ case JS_RELATIVE_TIME_FORMAT_TYPE:
return JSRelativeTimeFormat::kSize;
- case JS_INTL_SEGMENT_ITERATOR_TYPE:
+ case JS_SEGMENT_ITERATOR_TYPE:
return JSSegmentIterator::kSize;
- case JS_INTL_SEGMENTER_TYPE:
+ case JS_SEGMENTER_TYPE:
return JSSegmenter::kSize;
#endif // V8_INTL_SUPPORT
- case WASM_GLOBAL_TYPE:
+ case WASM_GLOBAL_OBJECT_TYPE:
return WasmGlobalObject::kSize;
- case WASM_INSTANCE_TYPE:
+ case WASM_INSTANCE_OBJECT_TYPE:
return WasmInstanceObject::kSize;
- case WASM_MEMORY_TYPE:
+ case WASM_MEMORY_OBJECT_TYPE:
return WasmMemoryObject::kSize;
- case WASM_MODULE_TYPE:
+ case WASM_MODULE_OBJECT_TYPE:
return WasmModuleObject::kSize;
- case WASM_TABLE_TYPE:
+ case WASM_TABLE_OBJECT_TYPE:
return WasmTableObject::kSize;
- case WASM_EXCEPTION_TYPE:
+ case WASM_EXCEPTION_OBJECT_TYPE:
return WasmExceptionObject::kSize;
default:
UNREACHABLE();
@@ -2377,7 +2397,7 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
accumulator->Add("<JSWeakSet>");
break;
}
- case JS_REGEXP_TYPE: {
+ case JS_REG_EXP_TYPE: {
accumulator->Add("<JSRegExp");
JSRegExp regexp = JSRegExp::cast(*this);
if (regexp.source().IsString()) {
@@ -2506,7 +2526,7 @@ void JSObject::PrintInstanceMigration(FILE* file, Map original_map,
PrintF(file, "[migrating]");
DescriptorArray o = original_map.instance_descriptors();
DescriptorArray n = new_map.instance_descriptors();
- for (int i = 0; i < original_map.NumberOfOwnDescriptors(); i++) {
+ for (InternalIndex i : original_map.IterateOwnDescriptors()) {
Representation o_r = o.GetDetails(i).representation();
Representation n_r = n.GetDetails(i).representation();
if (!o_r.Equals(n_r)) {
@@ -2703,7 +2723,7 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
// number of properties.
DCHECK(old_nof <= new_nof);
- for (int i = 0; i < old_nof; i++) {
+ for (InternalIndex i : InternalIndex::Range(old_nof)) {
PropertyDetails details = new_descriptors->GetDetails(i);
if (details.location() != kField) continue;
DCHECK_EQ(kData, details.kind());
@@ -2753,7 +2773,7 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
}
}
- for (int i = old_nof; i < new_nof; i++) {
+ for (InternalIndex i : InternalIndex::Range(old_nof, new_nof)) {
PropertyDetails details = new_descriptors->GetDetails(i);
if (details.location() != kField) continue;
DCHECK_EQ(kData, details.kind());
@@ -2776,9 +2796,10 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
Heap* heap = isolate->heap();
- int old_instance_size = old_map->instance_size();
-
- heap->NotifyObjectLayoutChange(*object, old_instance_size, no_allocation);
+ // Invalidate slots manually later in case of tagged to untagged translation.
+ // In all other cases the recorded slot remains dereferenceable.
+ heap->NotifyObjectLayoutChange(*object, no_allocation,
+ InvalidateRecordedSlots::kNo);
// Copy (real) inobject properties. If necessary, stop at number_of_fields to
// avoid overwriting |one_pointer_filler_map|.
@@ -2795,7 +2816,8 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
index, HeapNumber::cast(value).value_as_bits());
if (i < old_number_of_fields && !old_map->IsUnboxedDoubleField(index)) {
// Transition from tagged to untagged slot.
- heap->ClearRecordedSlot(*object, object->RawField(index.offset()));
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(*object);
+ chunk->InvalidateRecordedSlots(*object);
} else {
#ifdef DEBUG
heap->VerifyClearedSlot(*object, object->RawField(index.offset()));
@@ -2809,6 +2831,7 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
object->SetProperties(*array);
// Create filler object past the new instance size.
+ int old_instance_size = old_map->instance_size();
int new_instance_size = new_map->instance_size();
int instance_size_delta = old_instance_size - new_instance_size;
DCHECK_GE(instance_size_delta, 0);
@@ -2851,7 +2874,7 @@ void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object,
NameDictionary::New(isolate, property_count);
Handle<DescriptorArray> descs(map->instance_descriptors(isolate), isolate);
- for (int i = 0; i < real_size; i++) {
+ for (InternalIndex i : InternalIndex::Range(real_size)) {
PropertyDetails details = descs->GetDetails(i);
Handle<Name> key(descs->GetKey(isolate, i), isolate);
Handle<Object> value;
@@ -2891,10 +2914,15 @@ void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object,
DisallowHeapAllocation no_allocation;
Heap* heap = isolate->heap();
- int old_instance_size = map->instance_size();
- heap->NotifyObjectLayoutChange(*object, old_instance_size, no_allocation);
+
+ // Invalidate slots manually later in case the new map has in-object
+ // properties. If not, it is not possible to store an untagged value
+ // in a recorded slot.
+ heap->NotifyObjectLayoutChange(*object, no_allocation,
+ InvalidateRecordedSlots::kNo);
// Resize the object in the heap if necessary.
+ int old_instance_size = map->instance_size();
int new_instance_size = new_map->instance_size();
int instance_size_delta = old_instance_size - new_instance_size;
DCHECK_GE(instance_size_delta, 0);
@@ -2914,10 +2942,8 @@ void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object,
// garbage.
int inobject_properties = new_map->GetInObjectProperties();
if (inobject_properties) {
- Heap* heap = isolate->heap();
- heap->ClearRecordedSlotRange(
- object->address() + map->GetInObjectPropertyOffset(0),
- object->address() + new_instance_size);
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(*object);
+ chunk->InvalidateRecordedSlots(*object);
for (int i = 0; i < inobject_properties; i++) {
FieldIndex index = FieldIndex::ForPropertyIndex(*new_map, i);
@@ -3047,7 +3073,7 @@ void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) {
Handle<PropertyArray> array =
isolate->factory()->NewPropertyArray(external);
- for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
+ for (InternalIndex i : map->IterateOwnDescriptors()) {
PropertyDetails details = descriptors->GetDetails(i);
Representation representation = details.representation();
if (!representation.IsDouble()) continue;
@@ -3344,8 +3370,8 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
}
// Allocate the instance descriptor.
- Handle<DescriptorArray> descriptors = DescriptorArray::Allocate(
- isolate, instance_descriptor_length, 0, AllocationType::kOld);
+ Handle<DescriptorArray> descriptors =
+ DescriptorArray::Allocate(isolate, instance_descriptor_length, 0);
int number_of_allocated_fields =
number_of_fields + unused_property_fields - inobject_props;
@@ -3410,7 +3436,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
}
current_offset += details.field_width_in_words();
}
- descriptors->Set(i, &d);
+ descriptors->Set(InternalIndex(i), &d);
}
DCHECK(current_offset == number_of_fields);
@@ -3441,6 +3467,8 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
}
void JSObject::RequireSlowElements(NumberDictionary dictionary) {
+ DCHECK_NE(dictionary,
+ ReadOnlyRoots(GetIsolate()).empty_slow_element_dictionary());
if (dictionary.requires_slow_elements()) return;
dictionary.set_requires_slow_elements();
if (map().is_prototype_map()) {
@@ -3603,8 +3631,7 @@ bool TestFastPropertiesIntegrityLevel(Map map, PropertyAttributes level) {
DCHECK(!map.is_dictionary_map());
DescriptorArray descriptors = map.instance_descriptors();
- int number_of_own_descriptors = map.NumberOfOwnDescriptors();
- for (int i = 0; i < number_of_own_descriptors; i++) {
+ for (InternalIndex i : map.IterateOwnDescriptors()) {
if (descriptors.GetKey(i).IsPrivate()) continue;
PropertyDetails details = descriptors.GetDetails(i);
if (details.IsConfigurable()) return false;
@@ -3709,7 +3736,9 @@ Maybe<bool> JSObject::PreventExtensions(Handle<JSObject> object,
object->HasSlowArgumentsElements());
// Make sure that we never go back to fast case.
- object->RequireSlowElements(*dictionary);
+ if (*dictionary != ReadOnlyRoots(isolate).empty_slow_element_dictionary()) {
+ object->RequireSlowElements(*dictionary);
+ }
}
// Do a map transition, other objects with this map may still
@@ -4136,10 +4165,9 @@ MaybeHandle<Object> JSObject::SetAccessor(Handle<JSObject> object,
Object JSObject::SlowReverseLookup(Object value) {
if (HasFastProperties()) {
- int number_of_own_descriptors = map().NumberOfOwnDescriptors();
DescriptorArray descs = map().instance_descriptors();
bool value_is_number = value.IsNumber();
- for (int i = 0; i < number_of_own_descriptors; i++) {
+ for (InternalIndex i : map().IterateOwnDescriptors()) {
PropertyDetails details = descs.GetDetails(i);
if (details.location() == kField) {
DCHECK_EQ(kData, details.kind());
@@ -5187,16 +5215,16 @@ bool CanSubclassHaveInobjectProperties(InstanceType instance_type) {
case JS_FUNCTION_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
#ifdef V8_INTL_SUPPORT
- case JS_INTL_COLLATOR_TYPE:
- case JS_INTL_DATE_TIME_FORMAT_TYPE:
- case JS_INTL_LIST_FORMAT_TYPE:
- case JS_INTL_LOCALE_TYPE:
- case JS_INTL_NUMBER_FORMAT_TYPE:
- case JS_INTL_PLURAL_RULES_TYPE:
- case JS_INTL_RELATIVE_TIME_FORMAT_TYPE:
- case JS_INTL_SEGMENT_ITERATOR_TYPE:
- case JS_INTL_SEGMENTER_TYPE:
- case JS_INTL_V8_BREAK_ITERATOR_TYPE:
+ case JS_COLLATOR_TYPE:
+ case JS_DATE_TIME_FORMAT_TYPE:
+ case JS_LIST_FORMAT_TYPE:
+ case JS_LOCALE_TYPE:
+ case JS_NUMBER_FORMAT_TYPE:
+ case JS_PLURAL_RULES_TYPE:
+ case JS_RELATIVE_TIME_FORMAT_TYPE:
+ case JS_SEGMENT_ITERATOR_TYPE:
+ case JS_SEGMENTER_TYPE:
+ case JS_V8_BREAK_ITERATOR_TYPE:
#endif
case JS_ASYNC_FUNCTION_OBJECT_TYPE:
case JS_ASYNC_GENERATOR_OBJECT_TYPE:
@@ -5205,9 +5233,9 @@ bool CanSubclassHaveInobjectProperties(InstanceType instance_type) {
case JS_OBJECT_TYPE:
case JS_ERROR_TYPE:
case JS_FINALIZATION_GROUP_TYPE:
- case JS_ARGUMENTS_TYPE:
+ case JS_ARGUMENTS_OBJECT_TYPE:
case JS_PROMISE_TYPE:
- case JS_REGEXP_TYPE:
+ case JS_REG_EXP_TYPE:
case JS_SET_TYPE:
case JS_SPECIAL_API_OBJECT_TYPE:
case JS_TYPED_ARRAY_TYPE:
@@ -5215,11 +5243,11 @@ bool CanSubclassHaveInobjectProperties(InstanceType instance_type) {
case JS_WEAK_MAP_TYPE:
case JS_WEAK_REF_TYPE:
case JS_WEAK_SET_TYPE:
- case WASM_GLOBAL_TYPE:
- case WASM_INSTANCE_TYPE:
- case WASM_MEMORY_TYPE:
- case WASM_MODULE_TYPE:
- case WASM_TABLE_TYPE:
+ case WASM_GLOBAL_OBJECT_TYPE:
+ case WASM_INSTANCE_OBJECT_TYPE:
+ case WASM_MEMORY_OBJECT_TYPE:
+ case WASM_MODULE_OBJECT_TYPE:
+ case WASM_TABLE_OBJECT_TYPE:
return true;
case BIGINT_TYPE:
diff --git a/deps/v8/src/objects/js-objects.h b/deps/v8/src/objects/js-objects.h
index a9510642f1..f38cbe16e6 100644
--- a/deps/v8/src/objects/js-objects.h
+++ b/deps/v8/src/objects/js-objects.h
@@ -6,6 +6,8 @@
#define V8_OBJECTS_JS_OBJECTS_H_
#include "src/objects/embedder-data-slot.h"
+// TODO(jkummerow): Consider forward-declaring instead.
+#include "src/objects/internal-index.h"
#include "src/objects/objects.h"
#include "src/objects/property-array.h"
#include "torque-generated/class-definitions-tq.h"
@@ -264,7 +266,7 @@ class JSReceiver : public HeapObject {
static const int kHashMask = PropertyArray::HashField::kMask;
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- TORQUE_GENERATED_JSRECEIVER_FIELDS)
+ TORQUE_GENERATED_JS_RECEIVER_FIELDS)
bool HasProxyInPrototype(Isolate* isolate);
V8_WARN_UNUSED_RESULT static MaybeHandle<FixedArray> GetPrivateEntries(
@@ -631,7 +633,7 @@ class JSObject : public TorqueGeneratedJSObject<JSObject, JSReceiver> {
FieldIndex index, Object value,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
inline void RawFastDoublePropertyAsBitsAtPut(FieldIndex index, uint64_t bits);
- inline void WriteToField(int descriptor, PropertyDetails details,
+ inline void WriteToField(InternalIndex descriptor, PropertyDetails details,
Object value);
// Access to in object properties.
@@ -806,6 +808,29 @@ class JSObject : public TorqueGeneratedJSObject<JSObject, JSReceiver> {
TQ_OBJECT_CONSTRUCTORS(JSObject)
};
+// An abstract superclass for JSObjects that may have elements while having an
+// empty fixed array as elements backing store. It doesn't carry any
+// functionality but allows function classes to be identified in the type
+// system.
+class JSCustomElementsObject
+ : public TorqueGeneratedJSCustomElementsObject<JSCustomElementsObject,
+ JSObject> {
+ public:
+ STATIC_ASSERT(kHeaderSize == JSObject::kHeaderSize);
+ TQ_OBJECT_CONSTRUCTORS(JSCustomElementsObject)
+};
+
+// An abstract superclass for JSObjects that require non-standard element
+// access. It doesn't carry any functionality but allows function classes to be
+// identified in the type system.
+class JSSpecialObject
+ : public TorqueGeneratedJSSpecialObject<JSSpecialObject,
+ JSCustomElementsObject> {
+ public:
+ STATIC_ASSERT(kHeaderSize == JSObject::kHeaderSize);
+ TQ_OBJECT_CONSTRUCTORS(JSSpecialObject)
+};
+
// JSAccessorPropertyDescriptor is just a JSObject with a specific initial
// map. This initial map adds in-object properties for "get", "set",
// "enumerable" and "configurable" properties, as assigned by the
@@ -893,9 +918,21 @@ class JSIteratorResult : public JSObject {
OBJECT_CONSTRUCTORS(JSIteratorResult, JSObject);
};
+// An abstract superclass for classes representing JavaScript function values.
+// It doesn't carry any functionality but allows function classes to be
+// identified in the type system.
+class JSFunctionOrBoundFunction
+ : public TorqueGeneratedJSFunctionOrBoundFunction<JSFunctionOrBoundFunction,
+ JSObject> {
+ public:
+ STATIC_ASSERT(kHeaderSize == JSObject::kHeaderSize);
+ TQ_OBJECT_CONSTRUCTORS(JSFunctionOrBoundFunction)
+};
+
// JSBoundFunction describes a bound function exotic object.
class JSBoundFunction
- : public TorqueGeneratedJSBoundFunction<JSBoundFunction, JSObject> {
+ : public TorqueGeneratedJSBoundFunction<JSBoundFunction,
+ JSFunctionOrBoundFunction> {
public:
static MaybeHandle<String> GetName(Isolate* isolate,
Handle<JSBoundFunction> function);
@@ -916,7 +953,7 @@ class JSBoundFunction
};
// JSFunction describes JavaScript functions.
-class JSFunction : public JSObject {
+class JSFunction : public JSFunctionOrBoundFunction {
public:
// [prototype_or_initial_map]:
DECL_ACCESSORS(prototype_or_initial_map, HeapObject)
@@ -1119,13 +1156,13 @@ class JSFunction : public JSObject {
// ES6 section 19.2.3.5 Function.prototype.toString ( ).
static Handle<String> ToString(Handle<JSFunction> function);
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_JSFUNCTION_FIELDS)
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSFunctionOrBoundFunction::kHeaderSize,
+ TORQUE_GENERATED_JS_FUNCTION_FIELDS)
static constexpr int kSizeWithoutPrototype = kPrototypeOrInitialMapOffset;
static constexpr int kSizeWithPrototype = kSize;
- OBJECT_CONSTRUCTORS(JSFunction, JSObject);
+ OBJECT_CONSTRUCTORS(JSFunction, JSFunctionOrBoundFunction);
};
// JSGlobalProxy's prototype must be a JSGlobalObject or null,
@@ -1137,7 +1174,7 @@ class JSFunction : public JSObject {
// Accessing a JSGlobalProxy requires security check.
class JSGlobalProxy
- : public TorqueGeneratedJSGlobalProxy<JSGlobalProxy, JSObject> {
+ : public TorqueGeneratedJSGlobalProxy<JSGlobalProxy, JSSpecialObject> {
public:
inline bool IsDetachedFrom(JSGlobalObject global) const;
@@ -1151,7 +1188,7 @@ class JSGlobalProxy
};
// JavaScript global object.
-class JSGlobalObject : public JSObject {
+class JSGlobalObject : public JSSpecialObject {
public:
// [native context]: the natives corresponding to this global object.
DECL_ACCESSORS(native_context, NativeContext)
@@ -1179,15 +1216,16 @@ class JSGlobalObject : public JSObject {
DECL_VERIFIER(JSGlobalObject)
// Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_JSGLOBAL_OBJECT_FIELDS)
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSSpecialObject::kHeaderSize,
+ TORQUE_GENERATED_JS_GLOBAL_OBJECT_FIELDS)
- OBJECT_CONSTRUCTORS(JSGlobalObject, JSObject);
+ OBJECT_CONSTRUCTORS(JSGlobalObject, JSSpecialObject);
};
// Representation for JS Wrapper objects, String, Number, Boolean, etc.
class JSPrimitiveWrapper
- : public TorqueGeneratedJSPrimitiveWrapper<JSPrimitiveWrapper, JSObject> {
+ : public TorqueGeneratedJSPrimitiveWrapper<JSPrimitiveWrapper,
+ JSCustomElementsObject> {
public:
// Dispatched behavior.
DECL_PRINTER(JSPrimitiveWrapper)
@@ -1319,7 +1357,7 @@ class JSMessageObject : public JSObject {
DECL_VERIFIER(JSMessageObject)
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_JSMESSAGE_OBJECT_FIELDS)
+ TORQUE_GENERATED_JS_MESSAGE_OBJECT_FIELDS)
// TODO(v8:8989): [torque] Support marker constants.
static const int kPointerFieldsEndOffset = kStartPositionOffset;
@@ -1370,25 +1408,18 @@ class JSAsyncFromSyncIterator
TQ_OBJECT_CONSTRUCTORS(JSAsyncFromSyncIterator)
};
-class JSStringIterator : public JSObject {
+class JSStringIterator
+ : public TorqueGeneratedJSStringIterator<JSStringIterator, JSObject> {
public:
// Dispatched behavior.
DECL_PRINTER(JSStringIterator)
DECL_VERIFIER(JSStringIterator)
- DECL_CAST(JSStringIterator)
-
- // [string]: the [[IteratedString]] inobject property.
- DECL_ACCESSORS(string, String)
-
// [index]: The [[StringIteratorNextIndex]] inobject property.
inline int index() const;
inline void set_index(int value);
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_JSSTRING_ITERATOR_FIELDS)
-
- OBJECT_CONSTRUCTORS(JSStringIterator, JSObject);
+ TQ_OBJECT_CONSTRUCTORS(JSStringIterator)
};
} // namespace internal
diff --git a/deps/v8/src/objects/js-plural-rules.cc b/deps/v8/src/objects/js-plural-rules.cc
index 84fe9b6d52..bf928416f4 100644
--- a/deps/v8/src/objects/js-plural-rules.cc
+++ b/deps/v8/src/objects/js-plural-rules.cc
@@ -241,17 +241,18 @@ Handle<JSObject> JSPluralRules::ResolvedOptions(
JSNumberFormat::MinimumIntegerDigitsFromSkeleton(skeleton),
"minimumIntegerDigits");
int32_t min = 0, max = 0;
- JSNumberFormat::FractionDigitsFromSkeleton(skeleton, &min, &max);
-
- CreateDataPropertyForOptions(isolate, options, min, "minimumFractionDigits");
-
- CreateDataPropertyForOptions(isolate, options, max, "maximumFractionDigits");
if (JSNumberFormat::SignificantDigitsFromSkeleton(skeleton, &min, &max)) {
CreateDataPropertyForOptions(isolate, options, min,
"minimumSignificantDigits");
CreateDataPropertyForOptions(isolate, options, max,
"maximumSignificantDigits");
+ } else {
+ JSNumberFormat::FractionDigitsFromSkeleton(skeleton, &min, &max);
+ CreateDataPropertyForOptions(isolate, options, min,
+ "minimumFractionDigits");
+ CreateDataPropertyForOptions(isolate, options, max,
+ "maximumFractionDigits");
}
// 6. Let pluralCategories be a List of Strings representing the
diff --git a/deps/v8/src/objects/js-plural-rules.h b/deps/v8/src/objects/js-plural-rules.h
index 840efb07ed..0303266894 100644
--- a/deps/v8/src/objects/js-plural-rules.h
+++ b/deps/v8/src/objects/js-plural-rules.h
@@ -68,7 +68,7 @@ class JSPluralRules : public JSObject {
// Layout description.
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_JSPLURAL_RULES_FIELDS)
+ TORQUE_GENERATED_JS_PLURAL_RULES_FIELDS)
DECL_ACCESSORS(locale, String)
DECL_INT_ACCESSORS(flags)
diff --git a/deps/v8/src/objects/js-proxy.h b/deps/v8/src/objects/js-proxy.h
index 8e29c08bc1..c6bb844fe5 100644
--- a/deps/v8/src/objects/js-proxy.h
+++ b/deps/v8/src/objects/js-proxy.h
@@ -128,7 +128,7 @@ class JSProxyRevocableResult : public JSObject {
public:
// Layout description.
DEFINE_FIELD_OFFSET_CONSTANTS(
- JSObject::kHeaderSize, TORQUE_GENERATED_JSPROXY_REVOCABLE_RESULT_FIELDS)
+ JSObject::kHeaderSize, TORQUE_GENERATED_JS_PROXY_REVOCABLE_RESULT_FIELDS)
// Indices of in-object properties.
static const int kProxyIndex = 0;
diff --git a/deps/v8/src/objects/js-regexp-inl.h b/deps/v8/src/objects/js-regexp-inl.h
index b69d1cca97..885bc4804d 100644
--- a/deps/v8/src/objects/js-regexp-inl.h
+++ b/deps/v8/src/objects/js-regexp-inl.h
@@ -7,6 +7,7 @@
#include "src/objects/js-regexp.h"
+#include "src/objects/js-array-inl.h"
#include "src/objects/objects-inl.h" // Needed for write barriers
#include "src/objects/smi.h"
#include "src/objects/string.h"
@@ -18,9 +19,18 @@ namespace v8 {
namespace internal {
TQ_OBJECT_CONSTRUCTORS_IMPL(JSRegExp)
+OBJECT_CONSTRUCTORS_IMPL(JSRegExpResult, JSArray)
+OBJECT_CONSTRUCTORS_IMPL(JSRegExpResultIndices, JSArray)
+
+CAST_ACCESSOR(JSRegExpResult)
+CAST_ACCESSOR(JSRegExpResultIndices)
ACCESSORS(JSRegExp, last_index, Object, kLastIndexOffset)
+ACCESSORS(JSRegExpResult, cached_indices_or_match_info, Object,
+ kCachedIndicesOrMatchInfoOffset)
+ACCESSORS(JSRegExpResult, names, Object, kNamesOffset)
+
JSRegExp::Type JSRegExp::TypeTag() const {
Object data = this->data();
if (data.IsUndefined()) return JSRegExp::NOT_COMPILED;
diff --git a/deps/v8/src/objects/js-regexp.cc b/deps/v8/src/objects/js-regexp.cc
new file mode 100644
index 0000000000..c7f96fe278
--- /dev/null
+++ b/deps/v8/src/objects/js-regexp.cc
@@ -0,0 +1,118 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/js-regexp.h"
+
+#include "src/objects/js-array-inl.h"
+#include "src/objects/js-regexp-inl.h"
+
+namespace v8 {
+namespace internal {
+Handle<JSArray> JSRegExpResult::GetAndCacheIndices(
+ Isolate* isolate, Handle<JSRegExpResult> regexp_result) {
+ // Check for cached indices.
+ Handle<Object> indices_or_match_info(
+ regexp_result->cached_indices_or_match_info(), isolate);
+ if (indices_or_match_info->IsRegExpMatchInfo()) {
+ // Build and cache indices for next lookup.
+ // TODO(joshualitt): Instead of caching the indices, we could call
+ // ReconfigureToDataProperty on 'indices' setting its value to this
+ // newly created array. However, care would have to be taken to ensure
+ // a new map is not created each time.
+ Handle<RegExpMatchInfo> match_info(
+ RegExpMatchInfo::cast(regexp_result->cached_indices_or_match_info()),
+ isolate);
+ Handle<Object> maybe_names(regexp_result->names(), isolate);
+ indices_or_match_info =
+ JSRegExpResultIndices::BuildIndices(isolate, match_info, maybe_names);
+
+ // Cache the result and clear the names array.
+ regexp_result->set_cached_indices_or_match_info(*indices_or_match_info);
+ regexp_result->set_names(ReadOnlyRoots(isolate).undefined_value());
+ }
+ return Handle<JSArray>::cast(indices_or_match_info);
+}
+
+Handle<JSRegExpResultIndices> JSRegExpResultIndices::BuildIndices(
+ Isolate* isolate, Handle<RegExpMatchInfo> match_info,
+ Handle<Object> maybe_names) {
+ Handle<JSRegExpResultIndices> indices(Handle<JSRegExpResultIndices>::cast(
+ isolate->factory()->NewJSObjectFromMap(
+ isolate->regexp_result_indices_map())));
+
+ // Initialize indices length to avoid having a partially initialized object
+ // should GC be triggered by creating a NewFixedArray.
+ indices->set_length(Smi::kZero);
+
+ // Build indices array from RegExpMatchInfo.
+ int num_indices = match_info->NumberOfCaptureRegisters();
+ int num_results = num_indices >> 1;
+ Handle<FixedArray> indices_array =
+ isolate->factory()->NewFixedArray(num_results);
+ JSArray::SetContent(indices, indices_array);
+
+ for (int i = 0; i < num_results; i++) {
+ int base_offset = i * 2;
+ int start_offset = match_info->Capture(base_offset);
+ int end_offset = match_info->Capture(base_offset + 1);
+
+ // Any unmatched captures are set to undefined, otherwise we set them to a
+ // subarray of the indices.
+ if (start_offset == -1) {
+ indices_array->set(i, ReadOnlyRoots(isolate).undefined_value());
+ } else {
+ Handle<FixedArray> indices_sub_array(
+ isolate->factory()->NewFixedArray(2));
+ indices_sub_array->set(0, Smi::FromInt(start_offset));
+ indices_sub_array->set(1, Smi::FromInt(end_offset));
+ Handle<JSArray> indices_sub_jsarray =
+ isolate->factory()->NewJSArrayWithElements(indices_sub_array,
+ PACKED_SMI_ELEMENTS, 2);
+ indices_array->set(i, *indices_sub_jsarray);
+ }
+ }
+
+ // If there are no capture groups, set the groups property to undefined.
+ FieldIndex groups_index = FieldIndex::ForDescriptor(
+ indices->map(), InternalIndex(kGroupsDescriptorIndex));
+ if (maybe_names->IsUndefined(isolate)) {
+ indices->RawFastPropertyAtPut(groups_index,
+ ReadOnlyRoots(isolate).undefined_value());
+ return indices;
+ }
+
+ // Create a groups property which returns a dictionary of named captures to
+ // their corresponding capture indices.
+ Handle<FixedArray> names(Handle<FixedArray>::cast(maybe_names));
+ int num_names = names->length() >> 1;
+ Handle<NameDictionary> group_names = NameDictionary::New(isolate, num_names);
+ for (int i = 0; i < num_names; i++) {
+ int base_offset = i * 2;
+ int name_offset = base_offset;
+ int index_offset = base_offset + 1;
+ Handle<String> name(String::cast(names->get(name_offset)), isolate);
+ Handle<Smi> smi_index(Smi::cast(names->get(index_offset)), isolate);
+ Handle<Object> capture_indices(indices_array->get(smi_index->value()),
+ isolate);
+ if (!capture_indices->IsUndefined(isolate)) {
+ capture_indices = Handle<JSArray>::cast(capture_indices);
+ }
+ group_names = NameDictionary::Add(
+ isolate, group_names, name, capture_indices, PropertyDetails::Empty());
+ }
+
+ // Convert group_names to a JSObject and store at the groups property of the
+ // result indices.
+ Handle<FixedArrayBase> elements = isolate->factory()->empty_fixed_array();
+ Handle<HeapObject> null =
+ Handle<HeapObject>::cast(isolate->factory()->null_value());
+ Handle<JSObject> js_group_names =
+ isolate->factory()->NewSlowJSObjectWithPropertiesAndElements(
+ null, group_names, elements);
+ indices->RawFastPropertyAtPut(groups_index, *js_group_names);
+ return indices;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/js-regexp.h b/deps/v8/src/objects/js-regexp.h
index b3ef06bd5c..03efd4913c 100644
--- a/deps/v8/src/objects/js-regexp.h
+++ b/deps/v8/src/objects/js-regexp.h
@@ -96,7 +96,8 @@ class JSRegExp : public TorqueGeneratedJSRegExp<JSRegExp, JSObject> {
Handle<String> flags_string);
bool MarkedForTierUp();
- void ResetTierUp();
+ void ResetLastTierUpTick();
+ void TierUpTick();
void MarkTierUpForNextExec();
inline Type TypeTag() const;
@@ -176,9 +177,13 @@ class JSRegExp : public TorqueGeneratedJSRegExp<JSRegExp, JSObject> {
// Maps names of named capture groups (at indices 2i) to their corresponding
// (1-based) capture group indices (at indices 2i + 1).
static const int kIrregexpCaptureNameMapIndex = kDataIndex + 6;
- static const int kIrregexpTierUpTicksIndex = kDataIndex + 7;
+ // Tier-up ticks are set to the value of the tier-up ticks flag. The value is
+ // decremented on each execution of the bytecode, so that the tier-up
+ // happens once the ticks reach zero.
+ // This value is ignored if the regexp-tier-up flag isn't turned on.
+ static const int kIrregexpTicksUntilTierUpIndex = kDataIndex + 7;
- static const int kIrregexpDataSize = kIrregexpTierUpTicksIndex + 1;
+ static const int kIrregexpDataSize = kIrregexpTicksUntilTierUpIndex + 1;
// In-object fields.
static const int kLastIndexFieldIndex = 0;
@@ -195,6 +200,10 @@ class JSRegExp : public TorqueGeneratedJSRegExp<JSRegExp, JSObject> {
// The uninitialized value for a regexp code object.
static const int kUninitializedValue = -1;
+ // The heuristic value for the length of the subject string for which we
+ // tier-up to the compiler immediately, instead of using the interpreter.
+ static constexpr int kTierUpForSubjectLengthValue = 1000;
+
TQ_OBJECT_CONSTRUCTORS(JSRegExp)
};
@@ -208,18 +217,63 @@ DEFINE_OPERATORS_FOR_FLAGS(JSRegExp::Flags)
// After creation the result must be treated as a JSArray in all regards.
class JSRegExpResult : public JSArray {
public:
+ DECL_CAST(JSRegExpResult)
+
+ // TODO(joshualitt): We would like to add printers and verifiers to
+ // JSRegExpResult, and maybe JSRegExpResultIndices, but both have the same
+ // instance type as JSArray.
+
+ // cached_indices_or_match_info and names, are used to construct the
+ // JSRegExpResultIndices returned from the indices property lazily.
+ DECL_ACCESSORS(cached_indices_or_match_info, Object)
+ DECL_ACCESSORS(names, Object)
+
// Layout description.
DEFINE_FIELD_OFFSET_CONSTANTS(JSArray::kSize,
- TORQUE_GENERATED_JSREG_EXP_RESULT_FIELDS)
+ TORQUE_GENERATED_JS_REG_EXP_RESULT_FIELDS)
+
+ static Handle<JSArray> GetAndCacheIndices(
+ Isolate* isolate, Handle<JSRegExpResult> regexp_result);
// Indices of in-object properties.
static const int kIndexIndex = 0;
static const int kInputIndex = 1;
static const int kGroupsIndex = 2;
- static const int kInObjectPropertyCount = 3;
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSRegExpResult);
+ // Private internal only fields.
+ static const int kCachedIndicesOrMatchInfoIndex = 3;
+ static const int kNamesIndex = 4;
+ static const int kInObjectPropertyCount = 5;
+
+ OBJECT_CONSTRUCTORS(JSRegExpResult, JSArray);
+};
+
+// JSRegExpResultIndices is just a JSArray with a specific initial map.
+// This initial map adds in-object properties for "group"
+// properties, as assigned by RegExp.prototype.exec, which allows
+// faster creation of RegExp exec results.
+// This class just holds constants used when creating the result.
+// After creation the result must be treated as a JSArray in all regards.
+class JSRegExpResultIndices : public JSArray {
+ public:
+ DECL_CAST(JSRegExpResultIndices)
+
+ // Layout description.
+ DEFINE_FIELD_OFFSET_CONSTANTS(
+ JSArray::kSize, TORQUE_GENERATED_JS_REG_EXP_RESULT_INDICES_FIELDS)
+
+ static Handle<JSRegExpResultIndices> BuildIndices(
+ Isolate* isolate, Handle<RegExpMatchInfo> match_info,
+ Handle<Object> maybe_names);
+
+ // Indices of in-object properties.
+ static const int kGroupsIndex = 0;
+ static const int kInObjectPropertyCount = 1;
+
+ // Descriptor index of groups.
+ static const int kGroupsDescriptorIndex = 1;
+
+ OBJECT_CONSTRUCTORS(JSRegExpResultIndices, JSArray);
};
} // namespace internal
diff --git a/deps/v8/src/objects/js-relative-time-format.cc b/deps/v8/src/objects/js-relative-time-format.cc
index 28f8c757ee..edf3e26c22 100644
--- a/deps/v8/src/objects/js-relative-time-format.cc
+++ b/deps/v8/src/objects/js-relative-time-format.cc
@@ -112,7 +112,8 @@ MaybeHandle<JSRelativeTimeFormat> JSRelativeTimeFormat::New(
// 14. Let dataLocale be r.[[DataLocale]].
icu::Locale icu_locale = r.icu_locale;
UErrorCode status = U_ZERO_ERROR;
- if (numbering_system_str != nullptr) {
+ if (numbering_system_str != nullptr &&
+ Intl::IsValidNumberingSystem(numbering_system_str.get())) {
icu_locale.setUnicodeKeywordValue("nu", numbering_system_str.get(), status);
CHECK(U_SUCCESS(status));
}
diff --git a/deps/v8/src/objects/js-relative-time-format.h b/deps/v8/src/objects/js-relative-time-format.h
index 6e405e345e..c642367988 100644
--- a/deps/v8/src/objects/js-relative-time-format.h
+++ b/deps/v8/src/objects/js-relative-time-format.h
@@ -107,7 +107,7 @@ class JSRelativeTimeFormat : public JSObject {
// Layout description.
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_JSRELATIVE_TIME_FORMAT_FIELDS)
+ TORQUE_GENERATED_JS_RELATIVE_TIME_FORMAT_FIELDS)
private:
static Style getStyle(const char* str);
diff --git a/deps/v8/src/objects/js-segment-iterator.h b/deps/v8/src/objects/js-segment-iterator.h
index cadb99e79d..1c71af8864 100644
--- a/deps/v8/src/objects/js-segment-iterator.h
+++ b/deps/v8/src/objects/js-segment-iterator.h
@@ -91,7 +91,7 @@ class JSSegmentIterator : public JSObject {
// Layout description.
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_JSSEGMENT_ITERATOR_FIELDS)
+ TORQUE_GENERATED_JS_SEGMENT_ITERATOR_FIELDS)
OBJECT_CONSTRUCTORS(JSSegmentIterator, JSObject);
};
diff --git a/deps/v8/src/objects/js-segmenter.h b/deps/v8/src/objects/js-segmenter.h
index 641cf106fb..209c4682b3 100644
--- a/deps/v8/src/objects/js-segmenter.h
+++ b/deps/v8/src/objects/js-segmenter.h
@@ -78,7 +78,7 @@ class JSSegmenter : public JSObject {
// Layout description.
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_JSSEGMENTER_FIELDS)
+ TORQUE_GENERATED_JS_SEGMENTER_FIELDS)
private:
static Granularity GetGranularity(const char* str);
diff --git a/deps/v8/src/objects/js-weak-refs-inl.h b/deps/v8/src/objects/js-weak-refs-inl.h
index 004ffd6d79..0635153611 100644
--- a/deps/v8/src/objects/js-weak-refs-inl.h
+++ b/deps/v8/src/objects/js-weak-refs-inl.h
@@ -17,38 +17,21 @@
namespace v8 {
namespace internal {
-OBJECT_CONSTRUCTORS_IMPL(WeakCell, HeapObject)
-OBJECT_CONSTRUCTORS_IMPL(JSWeakRef, JSObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(WeakCell)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSWeakRef)
OBJECT_CONSTRUCTORS_IMPL(JSFinalizationGroup, JSObject)
-OBJECT_CONSTRUCTORS_IMPL(JSFinalizationGroupCleanupIterator, JSObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSFinalizationGroupCleanupIterator)
ACCESSORS(JSFinalizationGroup, native_context, NativeContext,
kNativeContextOffset)
ACCESSORS(JSFinalizationGroup, cleanup, Object, kCleanupOffset)
-ACCESSORS(JSFinalizationGroup, active_cells, Object, kActiveCellsOffset)
-ACCESSORS(JSFinalizationGroup, cleared_cells, Object, kClearedCellsOffset)
+ACCESSORS(JSFinalizationGroup, active_cells, HeapObject, kActiveCellsOffset)
+ACCESSORS(JSFinalizationGroup, cleared_cells, HeapObject, kClearedCellsOffset)
ACCESSORS(JSFinalizationGroup, key_map, Object, kKeyMapOffset)
SMI_ACCESSORS(JSFinalizationGroup, flags, kFlagsOffset)
ACCESSORS(JSFinalizationGroup, next, Object, kNextOffset)
CAST_ACCESSOR(JSFinalizationGroup)
-ACCESSORS(WeakCell, finalization_group, Object, kFinalizationGroupOffset)
-ACCESSORS(WeakCell, target, HeapObject, kTargetOffset)
-ACCESSORS(WeakCell, holdings, Object, kHoldingsOffset)
-ACCESSORS(WeakCell, next, Object, kNextOffset)
-ACCESSORS(WeakCell, prev, Object, kPrevOffset)
-ACCESSORS(WeakCell, key, Object, kKeyOffset)
-ACCESSORS(WeakCell, key_list_next, Object, kKeyListNextOffset)
-ACCESSORS(WeakCell, key_list_prev, Object, kKeyListPrevOffset)
-CAST_ACCESSOR(WeakCell)
-
-CAST_ACCESSOR(JSWeakRef)
-ACCESSORS(JSWeakRef, target, HeapObject, kTargetOffset)
-
-ACCESSORS(JSFinalizationGroupCleanupIterator, finalization_group,
- JSFinalizationGroup, kFinalizationGroupOffset)
-CAST_ACCESSOR(JSFinalizationGroupCleanupIterator)
-
void JSFinalizationGroup::Register(
Handle<JSFinalizationGroup> finalization_group, Handle<JSReceiver> target,
Handle<Object> holdings, Handle<Object> key, Isolate* isolate) {
@@ -101,7 +84,7 @@ bool JSFinalizationGroup::Unregister(
Handle<ObjectHashTable> key_map =
handle(ObjectHashTable::cast(finalization_group->key_map()), isolate);
Object value = key_map->Lookup(unregister_token);
- Object undefined = ReadOnlyRoots(isolate).undefined_value();
+ HeapObject undefined = ReadOnlyRoots(isolate).undefined_value();
while (value.IsWeakCell()) {
WeakCell weak_cell = WeakCell::cast(value);
weak_cell.RemoveFromFinalizationGroupCells(isolate);
diff --git a/deps/v8/src/objects/js-weak-refs.h b/deps/v8/src/objects/js-weak-refs.h
index 723e0e3135..8d61b125a1 100644
--- a/deps/v8/src/objects/js-weak-refs.h
+++ b/deps/v8/src/objects/js-weak-refs.h
@@ -28,8 +28,8 @@ class JSFinalizationGroup : public JSObject {
DECL_ACCESSORS(native_context, NativeContext)
DECL_ACCESSORS(cleanup, Object)
- DECL_ACCESSORS(active_cells, Object)
- DECL_ACCESSORS(cleared_cells, Object)
+ DECL_ACCESSORS(active_cells, HeapObject)
+ DECL_ACCESSORS(cleared_cells, HeapObject)
DECL_ACCESSORS(key_map, Object)
// For storing a list of JSFinalizationGroup objects in NativeContext.
@@ -66,7 +66,7 @@ class JSFinalizationGroup : public JSObject {
// Layout description.
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_JSFINALIZATION_GROUP_FIELDS)
+ TORQUE_GENERATED_JS_FINALIZATION_GROUP_FIELDS)
// Bitfields in flags.
using ScheduledForCleanupField = BitField<bool, 0, 1>;
@@ -75,32 +75,10 @@ class JSFinalizationGroup : public JSObject {
};
// Internal object for storing weak references in JSFinalizationGroup.
-class WeakCell : public HeapObject {
+class WeakCell : public TorqueGeneratedWeakCell<WeakCell, HeapObject> {
public:
DECL_PRINTER(WeakCell)
EXPORT_DECL_VERIFIER(WeakCell)
- DECL_CAST(WeakCell)
-
- DECL_ACCESSORS(finalization_group, Object)
- DECL_ACCESSORS(target, HeapObject)
- DECL_ACCESSORS(holdings, Object)
-
- // For storing doubly linked lists of WeakCells in JSFinalizationGroup's
- // "active_cells" and "cleared_cells" lists.
- DECL_ACCESSORS(prev, Object)
- DECL_ACCESSORS(next, Object)
-
- // For storing doubly linked lists of WeakCells per key in
- // JSFinalizationGroup's key-based hashmap. WeakCell also needs to know its
- // key, so that we can remove the key from the key_map when we remove the last
- // WeakCell associated with it.
- DECL_ACCESSORS(key, Object)
- DECL_ACCESSORS(key_list_prev, Object)
- DECL_ACCESSORS(key_list_next, Object)
-
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- TORQUE_GENERATED_WEAK_CELL_FIELDS)
class BodyDescriptor;
@@ -115,40 +93,27 @@ class WeakCell : public HeapObject {
inline void RemoveFromFinalizationGroupCells(Isolate* isolate);
- OBJECT_CONSTRUCTORS(WeakCell, HeapObject);
+ TQ_OBJECT_CONSTRUCTORS(WeakCell)
};
-class JSWeakRef : public JSObject {
+class JSWeakRef : public TorqueGeneratedJSWeakRef<JSWeakRef, JSObject> {
public:
DECL_PRINTER(JSWeakRef)
EXPORT_DECL_VERIFIER(JSWeakRef)
- DECL_CAST(JSWeakRef)
-
- DECL_ACCESSORS(target, HeapObject)
-
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_JSWEAK_REF_FIELDS)
class BodyDescriptor;
- OBJECT_CONSTRUCTORS(JSWeakRef, JSObject);
+ TQ_OBJECT_CONSTRUCTORS(JSWeakRef)
};
-class JSFinalizationGroupCleanupIterator : public JSObject {
+class JSFinalizationGroupCleanupIterator
+ : public TorqueGeneratedJSFinalizationGroupCleanupIterator<
+ JSFinalizationGroupCleanupIterator, JSObject> {
public:
DECL_PRINTER(JSFinalizationGroupCleanupIterator)
DECL_VERIFIER(JSFinalizationGroupCleanupIterator)
- DECL_CAST(JSFinalizationGroupCleanupIterator)
-
- DECL_ACCESSORS(finalization_group, JSFinalizationGroup)
-
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(
- JSObject::kHeaderSize,
- TORQUE_GENERATED_JSFINALIZATION_GROUP_CLEANUP_ITERATOR_FIELDS)
- OBJECT_CONSTRUCTORS(JSFinalizationGroupCleanupIterator, JSObject);
+ TQ_OBJECT_CONSTRUCTORS(JSFinalizationGroupCleanupIterator)
};
} // namespace internal
diff --git a/deps/v8/src/objects/keys.cc b/deps/v8/src/objects/keys.cc
index 7496399cad..0231df18d3 100644
--- a/deps/v8/src/objects/keys.cc
+++ b/deps/v8/src/objects/keys.cc
@@ -279,9 +279,13 @@ void FastKeyAccumulator::Prepare() {
is_receiver_simple_enum_ = false;
has_empty_prototype_ = true;
JSReceiver last_prototype;
+ may_have_elements_ = MayHaveElements(*receiver_);
for (PrototypeIterator iter(isolate_, *receiver_); !iter.IsAtEnd();
iter.Advance()) {
JSReceiver current = iter.GetCurrent<JSReceiver>();
+ if (!may_have_elements_) {
+ may_have_elements_ = MayHaveElements(current);
+ }
bool has_no_properties = CheckAndInitalizeEmptyEnumCache(current);
if (has_no_properties) continue;
last_prototype = current;
@@ -338,13 +342,12 @@ Handle<FixedArray> GetFastEnumPropertyKeys(Isolate* isolate,
Handle<DescriptorArray> descriptors =
Handle<DescriptorArray>(map->instance_descriptors(), isolate);
isolate->counters()->enum_cache_misses()->Increment();
- int nod = map->NumberOfOwnDescriptors();
// Create the keys array.
int index = 0;
bool fields_only = true;
keys = isolate->factory()->NewFixedArray(enum_length);
- for (int i = 0; i < nod; i++) {
+ for (InternalIndex i : map->IterateOwnDescriptors()) {
DisallowHeapAllocation no_gc;
PropertyDetails details = descriptors->GetDetails(i);
if (details.IsDontEnum()) continue;
@@ -361,7 +364,7 @@ Handle<FixedArray> GetFastEnumPropertyKeys(Isolate* isolate,
if (fields_only) {
indices = isolate->factory()->NewFixedArray(enum_length);
index = 0;
- for (int i = 0; i < nod; i++) {
+ for (InternalIndex i : map->IterateOwnDescriptors()) {
DisallowHeapAllocation no_gc;
PropertyDetails details = descriptors->GetDetails(i);
if (details.IsDontEnum()) continue;
@@ -499,12 +502,21 @@ MaybeHandle<FixedArray> FastKeyAccumulator::GetKeysSlow(
accumulator.set_is_for_in(is_for_in_);
accumulator.set_skip_indices(skip_indices_);
accumulator.set_last_non_empty_prototype(last_non_empty_prototype_);
+ accumulator.set_may_have_elements(may_have_elements_);
MAYBE_RETURN(accumulator.CollectKeys(receiver_, receiver_),
MaybeHandle<FixedArray>());
return accumulator.GetKeys(keys_conversion);
}
+bool FastKeyAccumulator::MayHaveElements(JSReceiver receiver) {
+ if (!receiver.IsJSObject()) return true;
+ JSObject object = JSObject::cast(receiver);
+ if (object.HasEnumerableElements()) return true;
+ if (object.HasIndexedInterceptor()) return true;
+ return false;
+}
+
namespace {
enum IndexedOrNamed { kIndexed, kNamed };
@@ -518,13 +530,14 @@ V8_WARN_UNUSED_RESULT ExceptionStatus FilterForEnumerableProperties(
uint32_t length = accessor->GetCapacity(*result, result->elements());
for (uint32_t i = 0; i < length; i++) {
- if (!accessor->HasEntry(*result, i)) continue;
+ InternalIndex entry(i);
+ if (!accessor->HasEntry(*result, entry)) continue;
// args are invalid after args.Call(), create a new one in every iteration.
PropertyCallbackArguments args(accumulator->isolate(), interceptor->data(),
*receiver, *object, Just(kDontThrow));
- Handle<Object> element = accessor->Get(result, i);
+ Handle<Object> element = accessor->Get(result, entry);
Handle<Object> attributes;
if (type == kIndexed) {
uint32_t number;
@@ -624,7 +637,7 @@ base::Optional<int> CollectOwnPropertyNamesInternal(
int first_skipped = -1;
PropertyFilter filter = keys->filter();
KeyCollectionMode mode = keys->mode();
- for (int i = start_index; i < limit; i++) {
+ for (InternalIndex i : InternalIndex::Range(start_index, limit)) {
bool is_shadowing_key = false;
PropertyDetails details = descs->GetDetails(i);
@@ -645,7 +658,7 @@ base::Optional<int> CollectOwnPropertyNamesInternal(
Name key = descs->GetKey(i);
if (skip_symbols == key.IsSymbol()) {
- if (first_skipped == -1) first_skipped = i;
+ if (first_skipped == -1) first_skipped = i.as_int();
continue;
}
if (key.FilterKey(keys->filter())) continue;
@@ -689,13 +702,15 @@ Maybe<bool> KeyAccumulator::CollectOwnPropertyNames(Handle<JSReceiver> receiver,
Map map = object->map();
int nof_descriptors = map.NumberOfOwnDescriptors();
if (enum_keys->length() != nof_descriptors) {
- Handle<DescriptorArray> descs =
- Handle<DescriptorArray>(map.instance_descriptors(), isolate_);
- for (int i = 0; i < nof_descriptors; i++) {
- PropertyDetails details = descs->GetDetails(i);
- if (!details.IsDontEnum()) continue;
- Object key = descs->GetKey(i);
- this->AddShadowingKey(key);
+ if (map.prototype(isolate_) != ReadOnlyRoots(isolate_).null_value()) {
+ Handle<DescriptorArray> descs =
+ Handle<DescriptorArray>(map.instance_descriptors(), isolate_);
+ for (InternalIndex i : InternalIndex::Range(nof_descriptors)) {
+ PropertyDetails details = descs->GetDetails(i);
+ if (!details.IsDontEnum()) continue;
+ Object key = descs->GetKey(i);
+ this->AddShadowingKey(key);
+ }
}
}
} else if (object->IsJSGlobalObject()) {
@@ -823,7 +838,9 @@ Maybe<bool> KeyAccumulator::CollectOwnKeys(Handle<JSReceiver> receiver,
return Just(true);
}
- MAYBE_RETURN(CollectOwnElementIndices(receiver, object), Nothing<bool>());
+ if (may_have_elements_) {
+ MAYBE_RETURN(CollectOwnElementIndices(receiver, object), Nothing<bool>());
+ }
MAYBE_RETURN(CollectOwnPropertyNames(receiver, object), Nothing<bool>());
return Just(true);
}
diff --git a/deps/v8/src/objects/keys.h b/deps/v8/src/objects/keys.h
index 5d8632e2a7..4c2307a20b 100644
--- a/deps/v8/src/objects/keys.h
+++ b/deps/v8/src/objects/keys.h
@@ -93,6 +93,7 @@ class KeyAccumulator final {
void set_last_non_empty_prototype(Handle<JSReceiver> object) {
last_non_empty_prototype_ = object;
}
+ void set_may_have_elements(bool value) { may_have_elements_ = value; }
// Shadowing keys are used to filter keys. This happens when non-enumerable
// keys appear again on the prototype chain.
void AddShadowingKey(Object key);
@@ -125,6 +126,7 @@ class KeyAccumulator final {
// For all the keys on the first receiver adding a shadowing key we can skip
// the shadow check.
bool skip_shadow_check_ = true;
+ bool may_have_elements_ = true;
DISALLOW_COPY_AND_ASSIGN(KeyAccumulator);
};
@@ -149,6 +151,7 @@ class FastKeyAccumulator {
bool is_receiver_simple_enum() { return is_receiver_simple_enum_; }
bool has_empty_prototype() { return has_empty_prototype_; }
+ bool may_have_elements() { return may_have_elements_; }
MaybeHandle<FixedArray> GetKeys(
GetKeysConversion convert = GetKeysConversion::kKeepNumbers);
@@ -160,6 +163,8 @@ class FastKeyAccumulator {
MaybeHandle<FixedArray> GetOwnKeysWithUninitializedEnumCache();
+ bool MayHaveElements(JSReceiver receiver);
+
Isolate* isolate_;
Handle<JSReceiver> receiver_;
Handle<JSReceiver> last_non_empty_prototype_;
@@ -169,6 +174,7 @@ class FastKeyAccumulator {
bool skip_indices_ = false;
bool is_receiver_simple_enum_ = false;
bool has_empty_prototype_ = false;
+ bool may_have_elements_ = true;
DISALLOW_COPY_AND_ASSIGN(FastKeyAccumulator);
};
diff --git a/deps/v8/src/objects/layout-descriptor-inl.h b/deps/v8/src/objects/layout-descriptor-inl.h
index ad0a058a92..30fe132129 100644
--- a/deps/v8/src/objects/layout-descriptor-inl.h
+++ b/deps/v8/src/objects/layout-descriptor-inl.h
@@ -169,7 +169,7 @@ int LayoutDescriptor::CalculateCapacity(Map map, DescriptorArray descriptors,
} else {
layout_descriptor_length = 0;
- for (int i = 0; i < num_descriptors; i++) {
+ for (InternalIndex i : InternalIndex::Range(num_descriptors)) {
PropertyDetails details = descriptors.GetDetails(i);
if (!InobjectUnboxedField(inobject_properties, details)) continue;
int field_index = details.field_index();
@@ -188,7 +188,7 @@ LayoutDescriptor LayoutDescriptor::Initialize(
DisallowHeapAllocation no_allocation;
int inobject_properties = map.GetInObjectProperties();
- for (int i = 0; i < num_descriptors; i++) {
+ for (InternalIndex i : InternalIndex::Range(num_descriptors)) {
PropertyDetails details = descriptors.GetDetails(i);
if (!InobjectUnboxedField(inobject_properties, details)) {
DCHECK(details.location() != kField ||
diff --git a/deps/v8/src/objects/layout-descriptor.cc b/deps/v8/src/objects/layout-descriptor.cc
index 76421aaf4f..2b588a58bf 100644
--- a/deps/v8/src/objects/layout-descriptor.cc
+++ b/deps/v8/src/objects/layout-descriptor.cc
@@ -258,9 +258,8 @@ LayoutDescriptor LayoutDescriptor::Trim(Heap* heap, Map map,
bool LayoutDescriptor::IsConsistentWithMap(Map map, bool check_tail) {
if (FLAG_unbox_double_fields) {
DescriptorArray descriptors = map.instance_descriptors();
- int nof_descriptors = map.NumberOfOwnDescriptors();
int last_field_index = 0;
- for (int i = 0; i < nof_descriptors; i++) {
+ for (InternalIndex i : map.IterateOwnDescriptors()) {
PropertyDetails details = descriptors.GetDetails(i);
if (details.location() != kField) continue;
FieldIndex field_index = FieldIndex::ForDescriptor(map, i);
diff --git a/deps/v8/src/objects/literal-objects.cc b/deps/v8/src/objects/literal-objects.cc
index 95beb6cbdb..98c41cbfb5 100644
--- a/deps/v8/src/objects/literal-objects.cc
+++ b/deps/v8/src/objects/literal-objects.cc
@@ -31,11 +31,11 @@ void AddToDescriptorArrayTemplate(
Isolate* isolate, Handle<DescriptorArray> descriptor_array_template,
Handle<Name> name, ClassBoilerplate::ValueKind value_kind,
Handle<Object> value) {
- int entry = descriptor_array_template->Search(
+ InternalIndex entry = descriptor_array_template->Search(
*name, descriptor_array_template->number_of_descriptors());
// TODO(ishell): deduplicate properties at AST level, this will allow us to
// avoid creation of closures that will be overwritten anyway.
- if (entry == DescriptorArray::kNotFound) {
+ if (entry.is_not_found()) {
// Entry not found, add new one.
Descriptor d;
if (value_kind == ClassBoilerplate::kData) {
@@ -412,8 +412,8 @@ Handle<ClassBoilerplate> ClassBoilerplate::BuildClassBoilerplate(
ObjectDescriptor static_desc(kMinimumClassPropertiesCount);
ObjectDescriptor instance_desc(kMinimumPrototypePropertiesCount);
- for (int i = 0; i < expr->properties()->length(); i++) {
- ClassLiteral::Property* property = expr->properties()->at(i);
+ for (int i = 0; i < expr->public_members()->length(); i++) {
+ ClassLiteral::Property* property = expr->public_members()->at(i);
ObjectDescriptor& desc =
property->is_static() ? static_desc : instance_desc;
if (property->is_computed_name()) {
@@ -477,14 +477,8 @@ Handle<ClassBoilerplate> ClassBoilerplate::BuildClassBoilerplate(
//
int dynamic_argument_index = ClassBoilerplate::kFirstDynamicArgumentIndex;
- for (int i = 0; i < expr->properties()->length(); i++) {
- ClassLiteral::Property* property = expr->properties()->at(i);
-
- // Private members are not processed using the class boilerplate.
- if (property->is_private()) {
- continue;
- }
-
+ for (int i = 0; i < expr->public_members()->length(); i++) {
+ ClassLiteral::Property* property = expr->public_members()->at(i);
ClassBoilerplate::ValueKind value_kind;
switch (property->kind()) {
case ClassLiteral::Property::METHOD:
diff --git a/deps/v8/src/objects/lookup-inl.h b/deps/v8/src/objects/lookup-inl.h
index 648398be5e..49a42e7131 100644
--- a/deps/v8/src/objects/lookup-inl.h
+++ b/deps/v8/src/objects/lookup-inl.h
@@ -10,6 +10,7 @@
#include "src/handles/handles-inl.h"
#include "src/heap/factory-inl.h"
#include "src/objects/api-callbacks.h"
+#include "src/objects/internal-index.h"
#include "src/objects/map-inl.h"
#include "src/objects/name-inl.h"
#include "src/objects/objects-inl.h"
@@ -136,11 +137,11 @@ void LookupIterator::UpdateProtector() {
}
}
-int LookupIterator::descriptor_number() const {
+InternalIndex LookupIterator::descriptor_number() const {
DCHECK(!IsElement());
DCHECK(has_property_);
DCHECK(holder_->HasFastProperties(isolate_));
- return number_;
+ return InternalIndex(number_);
}
int LookupIterator::dictionary_entry() const {
diff --git a/deps/v8/src/objects/lookup.cc b/deps/v8/src/objects/lookup.cc
index 4646b71a9e..7f626cc223 100644
--- a/deps/v8/src/objects/lookup.cc
+++ b/deps/v8/src/objects/lookup.cc
@@ -249,10 +249,10 @@ void LookupIterator::InternalUpdateProtector() {
}
if (!Protectors::IsArraySpeciesLookupChainIntact(isolate_) &&
- !isolate_->IsPromiseSpeciesLookupChainIntact() &&
+ !Protectors::IsPromiseSpeciesLookupChainIntact(isolate_) &&
!Protectors::IsRegExpSpeciesLookupChainProtectorIntact(
native_context) &&
- !isolate_->IsTypedArraySpeciesLookupChainIntact()) {
+ !Protectors::IsTypedArraySpeciesLookupChainIntact(isolate_)) {
return;
}
// Setting the constructor property could change an instance's @@species
@@ -263,8 +263,8 @@ void LookupIterator::InternalUpdateProtector() {
Protectors::InvalidateArraySpeciesLookupChain(isolate_);
return;
} else if (receiver->IsJSPromise(isolate_)) {
- if (!isolate_->IsPromiseSpeciesLookupChainIntact()) return;
- isolate_->InvalidatePromiseSpeciesProtector();
+ if (!Protectors::IsPromiseSpeciesLookupChainIntact(isolate_)) return;
+ Protectors::InvalidatePromiseSpeciesLookupChain(isolate_);
return;
} else if (receiver->IsJSRegExp(isolate_)) {
if (!Protectors::IsRegExpSpeciesLookupChainProtectorIntact(
@@ -275,8 +275,8 @@ void LookupIterator::InternalUpdateProtector() {
native_context);
return;
} else if (receiver->IsJSTypedArray(isolate_)) {
- if (!isolate_->IsTypedArraySpeciesLookupChainIntact()) return;
- isolate_->InvalidateTypedArraySpeciesProtector();
+ if (!Protectors::IsTypedArraySpeciesLookupChainIntact(isolate_)) return;
+ Protectors::InvalidateTypedArraySpeciesLookupChain(isolate_);
return;
}
if (receiver->map(isolate_).is_prototype_map()) {
@@ -294,8 +294,8 @@ void LookupIterator::InternalUpdateProtector() {
Protectors::InvalidateArraySpeciesLookupChain(isolate_);
} else if (isolate_->IsInAnyContext(*receiver,
Context::PROMISE_PROTOTYPE_INDEX)) {
- if (!isolate_->IsPromiseSpeciesLookupChainIntact()) return;
- isolate_->InvalidatePromiseSpeciesProtector();
+ if (!Protectors::IsPromiseSpeciesLookupChainIntact(isolate_)) return;
+ Protectors::InvalidatePromiseSpeciesLookupChain(isolate_);
} else if (isolate_->IsInAnyContext(*receiver,
Context::REGEXP_PROTOTYPE_INDEX)) {
if (!Protectors::IsRegExpSpeciesLookupChainProtectorIntact(
@@ -307,8 +307,8 @@ void LookupIterator::InternalUpdateProtector() {
} else if (isolate_->IsInAnyContext(
receiver->map(isolate_).prototype(isolate_),
Context::TYPED_ARRAY_PROTOTYPE_INDEX)) {
- if (!isolate_->IsTypedArraySpeciesLookupChainIntact()) return;
- isolate_->InvalidateTypedArraySpeciesProtector();
+ if (!Protectors::IsTypedArraySpeciesLookupChainIntact(isolate_)) return;
+ Protectors::InvalidateTypedArraySpeciesLookupChain(isolate_);
}
}
} else if (*name_ == roots.next_string()) {
@@ -317,26 +317,26 @@ void LookupIterator::InternalUpdateProtector() {
*receiver, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX)) {
// Setting the next property of %ArrayIteratorPrototype% also needs to
// invalidate the array iterator protector.
- if (!isolate_->IsArrayIteratorLookupChainIntact()) return;
- isolate_->InvalidateArrayIteratorProtector();
+ if (!Protectors::IsArrayIteratorLookupChainIntact(isolate_)) return;
+ Protectors::InvalidateArrayIteratorLookupChain(isolate_);
} else if (receiver->IsJSMapIterator() ||
isolate_->IsInAnyContext(
*receiver, Context::INITIAL_MAP_ITERATOR_PROTOTYPE_INDEX)) {
- if (!isolate_->IsMapIteratorLookupChainIntact()) return;
- isolate_->InvalidateMapIteratorProtector();
+ if (!Protectors::IsMapIteratorLookupChainIntact(isolate_)) return;
+ Protectors::InvalidateMapIteratorLookupChain(isolate_);
} else if (receiver->IsJSSetIterator() ||
isolate_->IsInAnyContext(
*receiver, Context::INITIAL_SET_ITERATOR_PROTOTYPE_INDEX)) {
- if (!isolate_->IsSetIteratorLookupChainIntact()) return;
- isolate_->InvalidateSetIteratorProtector();
+ if (!Protectors::IsSetIteratorLookupChainIntact(isolate_)) return;
+ Protectors::InvalidateSetIteratorLookupChain(isolate_);
} else if (receiver->IsJSStringIterator() ||
isolate_->IsInAnyContext(
*receiver,
Context::INITIAL_STRING_ITERATOR_PROTOTYPE_INDEX)) {
// Setting the next property of %StringIteratorPrototype% invalidates the
// string iterator protector.
- if (!isolate_->IsStringIteratorLookupChainIntact()) return;
- isolate_->InvalidateStringIteratorProtector();
+ if (!Protectors::IsStringIteratorLookupChainIntact(isolate_)) return;
+ Protectors::InvalidateStringIteratorLookupChain(isolate_);
}
} else if (*name_ == roots.species_symbol()) {
// Fetching the context in here since the operation is rather expensive.
@@ -345,10 +345,10 @@ void LookupIterator::InternalUpdateProtector() {
}
if (!Protectors::IsArraySpeciesLookupChainIntact(isolate_) &&
- !isolate_->IsPromiseSpeciesLookupChainIntact() &&
+ !Protectors::IsPromiseSpeciesLookupChainIntact(isolate_) &&
!Protectors::IsRegExpSpeciesLookupChainProtectorIntact(
native_context) &&
- !isolate_->IsTypedArraySpeciesLookupChainIntact()) {
+ !Protectors::IsTypedArraySpeciesLookupChainIntact(isolate_)) {
return;
}
// Setting the Symbol.species property of any Array, Promise or TypedArray
@@ -360,8 +360,8 @@ void LookupIterator::InternalUpdateProtector() {
Protectors::InvalidateArraySpeciesLookupChain(isolate_);
} else if (isolate_->IsInAnyContext(*receiver,
Context::PROMISE_FUNCTION_INDEX)) {
- if (!isolate_->IsPromiseSpeciesLookupChainIntact()) return;
- isolate_->InvalidatePromiseSpeciesProtector();
+ if (!Protectors::IsPromiseSpeciesLookupChainIntact(isolate_)) return;
+ Protectors::InvalidatePromiseSpeciesLookupChain(isolate_);
} else if (isolate_->IsInAnyContext(*receiver,
Context::REGEXP_FUNCTION_INDEX)) {
if (!Protectors::IsRegExpSpeciesLookupChainProtectorIntact(
@@ -371,37 +371,37 @@ void LookupIterator::InternalUpdateProtector() {
Protectors::InvalidateRegExpSpeciesLookupChainProtector(isolate_,
native_context);
} else if (IsTypedArrayFunctionInAnyContext(isolate_, *receiver)) {
- if (!isolate_->IsTypedArraySpeciesLookupChainIntact()) return;
- isolate_->InvalidateTypedArraySpeciesProtector();
+ if (!Protectors::IsTypedArraySpeciesLookupChainIntact(isolate_)) return;
+ Protectors::InvalidateTypedArraySpeciesLookupChain(isolate_);
}
} else if (*name_ == roots.is_concat_spreadable_symbol()) {
- if (!isolate_->IsIsConcatSpreadableLookupChainIntact()) return;
- isolate_->InvalidateIsConcatSpreadableProtector();
+ if (!Protectors::IsIsConcatSpreadableLookupChainIntact(isolate_)) return;
+ Protectors::InvalidateIsConcatSpreadableLookupChain(isolate_);
} else if (*name_ == roots.iterator_symbol()) {
if (receiver->IsJSArray(isolate_)) {
- if (!isolate_->IsArrayIteratorLookupChainIntact()) return;
- isolate_->InvalidateArrayIteratorProtector();
+ if (!Protectors::IsArrayIteratorLookupChainIntact(isolate_)) return;
+ Protectors::InvalidateArrayIteratorLookupChain(isolate_);
} else if (receiver->IsJSSet(isolate_) || receiver->IsJSSetIterator() ||
isolate_->IsInAnyContext(
*receiver, Context::INITIAL_SET_ITERATOR_PROTOTYPE_INDEX) ||
isolate_->IsInAnyContext(*receiver,
Context::INITIAL_SET_PROTOTYPE_INDEX)) {
- if (isolate_->IsSetIteratorLookupChainIntact()) {
- isolate_->InvalidateSetIteratorProtector();
+ if (Protectors::IsSetIteratorLookupChainIntact(isolate_)) {
+ Protectors::InvalidateSetIteratorLookupChain(isolate_);
}
} else if (receiver->IsJSMapIterator() ||
isolate_->IsInAnyContext(
*receiver, Context::INITIAL_MAP_ITERATOR_PROTOTYPE_INDEX)) {
- if (isolate_->IsMapIteratorLookupChainIntact()) {
- isolate_->InvalidateMapIteratorProtector();
+ if (Protectors::IsMapIteratorLookupChainIntact(isolate_)) {
+ Protectors::InvalidateMapIteratorLookupChain(isolate_);
}
} else if (isolate_->IsInAnyContext(
*receiver, Context::INITIAL_ITERATOR_PROTOTYPE_INDEX)) {
- if (isolate_->IsMapIteratorLookupChainIntact()) {
- isolate_->InvalidateMapIteratorProtector();
+ if (Protectors::IsMapIteratorLookupChainIntact(isolate_)) {
+ Protectors::InvalidateMapIteratorLookupChain(isolate_);
}
- if (isolate_->IsSetIteratorLookupChainIntact()) {
- isolate_->InvalidateSetIteratorProtector();
+ if (Protectors::IsSetIteratorLookupChainIntact(isolate_)) {
+ Protectors::InvalidateSetIteratorLookupChain(isolate_);
}
} else if (isolate_->IsInAnyContext(
*receiver, Context::INITIAL_STRING_PROTOTYPE_INDEX)) {
@@ -409,18 +409,18 @@ void LookupIterator::InternalUpdateProtector() {
// the string iterator protector. Symbol.iterator can also be set on a
// String wrapper, but not on a primitive string. We only support
// protector for primitive strings.
- if (!isolate_->IsStringIteratorLookupChainIntact()) return;
- isolate_->InvalidateStringIteratorProtector();
+ if (!Protectors::IsStringIteratorLookupChainIntact(isolate_)) return;
+ Protectors::InvalidateStringIteratorLookupChain(isolate_);
}
} else if (*name_ == roots.resolve_string()) {
- if (!isolate_->IsPromiseResolveLookupChainIntact()) return;
+ if (!Protectors::IsPromiseResolveLookupChainIntact(isolate_)) return;
// Setting the "resolve" property on any %Promise% intrinsic object
// invalidates the Promise.resolve protector.
if (isolate_->IsInAnyContext(*receiver, Context::PROMISE_FUNCTION_INDEX)) {
- isolate_->InvalidatePromiseResolveProtector();
+ Protectors::InvalidatePromiseResolveLookupChain(isolate_);
}
} else if (*name_ == roots.then_string()) {
- if (!isolate_->IsPromiseThenLookupChainIntact()) return;
+ if (!Protectors::IsPromiseThenLookupChainIntact(isolate_)) return;
// Setting the "then" property on any JSPromise instance or on the
// initial %PromisePrototype% invalidates the Promise#then protector.
// Also setting the "then" property on the initial %ObjectPrototype%
@@ -432,7 +432,7 @@ void LookupIterator::InternalUpdateProtector() {
isolate_->IsInAnyContext(*receiver,
Context::INITIAL_OBJECT_PROTOTYPE_INDEX) ||
isolate_->IsInAnyContext(*receiver, Context::PROMISE_PROTOTYPE_INDEX)) {
- isolate_->InvalidatePromiseThenProtector();
+ Protectors::InvalidatePromiseThenLookupChain(isolate_);
}
}
}
@@ -534,7 +534,7 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
DCHECK(attributes != NONE || !holder_obj->HasFastElements(isolate_));
Handle<FixedArrayBase> elements(holder_obj->elements(isolate_), isolate());
holder_obj->GetElementsAccessor(isolate_)->Reconfigure(
- holder_obj, elements, number_, value, attributes);
+ holder_obj, elements, InternalIndex(number_), value, attributes);
ReloadPropertyInformation<true>();
} else if (holder_obj->HasFastProperties(isolate_)) {
Handle<Map> old_map(holder_obj->map(isolate_), isolate_);
@@ -699,8 +699,7 @@ void LookupIterator::ApplyTransitionToDataProperty(
}
if (simple_transition) {
- int number = transition->LastAdded();
- number_ = static_cast<uint32_t>(number);
+ number_ = transition->LastAdded().as_uint32();
property_details_ = transition->GetLastDescriptorDetails(isolate_);
state_ = DATA;
} else if (receiver->map(isolate_).is_dictionary_map()) {
@@ -731,7 +730,7 @@ void LookupIterator::Delete() {
if (IsElement()) {
Handle<JSObject> object = Handle<JSObject>::cast(holder);
ElementsAccessor* accessor = object->GetElementsAccessor(isolate_);
- accessor->Delete(object, number_);
+ accessor->Delete(object, InternalIndex(number_));
} else {
DCHECK(!name()->IsPrivateName(isolate_));
bool is_prototype_map = holder->map(isolate_).is_prototype_map();
@@ -777,8 +776,11 @@ void LookupIterator::TransitionToAccessorProperty(
} else if (state_ == INTERCEPTOR) {
LookupInRegularHolder<false>(*old_map, *holder_);
}
- int descriptor =
- IsFound() ? static_cast<int>(number_) : DescriptorArray::kNotFound;
+ // TODO(jkummerow): {IsFound()} should be enough once {number_} has type
+ // {InternalIndex}.
+ InternalIndex descriptor = (IsFound() && number_ != kMaxUInt32)
+ ? InternalIndex(number_)
+ : InternalIndex::NotFound();
Handle<Map> new_map = Map::TransitionToAccessorProperty(
isolate_, old_map, name_, descriptor, getter, setter, attributes);
@@ -787,8 +789,7 @@ void LookupIterator::TransitionToAccessorProperty(
JSObject::MigrateToMap(isolate_, receiver, new_map);
if (simple_transition) {
- int number = new_map->LastAdded();
- number_ = static_cast<uint32_t>(number);
+ number_ = new_map->LastAdded().as_uint32();
property_details_ = new_map->GetLastDescriptorDetails(isolate_);
state_ = ACCESSOR;
return;
@@ -894,23 +895,24 @@ Handle<Object> LookupIterator::FetchValue() const {
if (IsElement()) {
Handle<JSObject> holder = GetHolder<JSObject>();
ElementsAccessor* accessor = holder->GetElementsAccessor(isolate_);
- return accessor->Get(holder, number_);
+ return accessor->Get(holder, InternalIndex(number_));
} else if (holder_->IsJSGlobalObject(isolate_)) {
Handle<JSGlobalObject> holder = GetHolder<JSGlobalObject>();
result = holder->global_dictionary(isolate_).ValueAt(isolate_, number_);
} else if (!holder_->HasFastProperties(isolate_)) {
- result = holder_->property_dictionary(isolate_).ValueAt(isolate_, number_);
+ result = holder_->property_dictionary(isolate_).ValueAt(isolate_,
+ dictionary_entry());
} else if (property_details_.location() == kField) {
DCHECK_EQ(kData, property_details_.kind());
Handle<JSObject> holder = GetHolder<JSObject>();
FieldIndex field_index =
- FieldIndex::ForDescriptor(holder->map(isolate_), number_);
+ FieldIndex::ForDescriptor(holder->map(isolate_), descriptor_number());
return JSObject::FastPropertyAt(holder, property_details_.representation(),
field_index);
} else {
result =
holder_->map(isolate_).instance_descriptors(isolate_).GetStrongValue(
- isolate_, number_);
+ isolate_, descriptor_number());
}
return handle(result, isolate_);
}
@@ -922,7 +924,7 @@ bool LookupIterator::IsConstFieldValueEqualTo(Object value) const {
DCHECK_EQ(PropertyConstness::kConst, property_details_.constness());
Handle<JSObject> holder = GetHolder<JSObject>();
FieldIndex field_index =
- FieldIndex::ForDescriptor(holder->map(isolate_), number_);
+ FieldIndex::ForDescriptor(holder->map(isolate_), descriptor_number());
if (property_details_.representation().IsDouble()) {
if (!value.IsNumber(isolate_)) return false;
uint64_t bits;
@@ -958,7 +960,8 @@ int LookupIterator::GetFieldDescriptorIndex() const {
DCHECK(holder_->HasFastProperties());
DCHECK_EQ(kField, property_details_.location());
DCHECK_EQ(kData, property_details_.kind());
- return descriptor_number();
+ // TODO(jkummerow): Propagate InternalIndex further.
+ return descriptor_number().as_int();
}
int LookupIterator::GetAccessorIndex() const {
@@ -966,7 +969,7 @@ int LookupIterator::GetAccessorIndex() const {
DCHECK(holder_->HasFastProperties(isolate_));
DCHECK_EQ(kDescriptor, property_details_.location());
DCHECK_EQ(kAccessor, property_details_.kind());
- return descriptor_number();
+ return descriptor_number().as_int();
}
Handle<Map> LookupIterator::GetFieldOwnerMap() const {
@@ -1028,7 +1031,7 @@ void LookupIterator::WriteDataValue(Handle<Object> value,
if (IsElement()) {
Handle<JSObject> object = Handle<JSObject>::cast(holder);
ElementsAccessor* accessor = object->GetElementsAccessor(isolate_);
- accessor->Set(object, number_, *value);
+ accessor->Set(object, InternalIndex(number_), *value);
} else if (holder->HasFastProperties(isolate_)) {
if (property_details_.location() == kField) {
// Check that in case of VariableMode::kConst field the existing value is
@@ -1164,13 +1167,15 @@ LookupIterator::State LookupIterator::LookupInRegularHolder(
JSObject js_object = JSObject::cast(holder);
ElementsAccessor* accessor = js_object.GetElementsAccessor(isolate_);
FixedArrayBase backing_store = js_object.elements(isolate_);
- number_ =
+ // TODO(jkummerow): {number_} should have type InternalIndex.
+ InternalIndex entry =
accessor->GetEntryForIndex(isolate_, js_object, backing_store, index_);
+ number_ = entry.is_found() ? entry.as_uint32() : kMaxUInt32;
if (number_ == kMaxUInt32) {
return holder.IsJSTypedArray(isolate_) ? INTEGER_INDEXED_EXOTIC
: NOT_FOUND;
}
- property_details_ = accessor->GetDetails(js_object, number_);
+ property_details_ = accessor->GetDetails(js_object, InternalIndex(number_));
if (map.has_frozen_elements()) {
property_details_ = property_details_.CopyAddAttributes(FROZEN);
} else if (map.has_sealed_elements()) {
@@ -1178,10 +1183,10 @@ LookupIterator::State LookupIterator::LookupInRegularHolder(
}
} else if (!map.is_dictionary_map()) {
DescriptorArray descriptors = map.instance_descriptors(isolate_);
- int number = descriptors.SearchWithCache(isolate_, *name_, map);
- if (number == DescriptorArray::kNotFound) return NotFound(holder);
- number_ = static_cast<uint32_t>(number);
- property_details_ = descriptors.GetDetails(number_);
+ InternalIndex number = descriptors.SearchWithCache(isolate_, *name_, map);
+ if (number.is_not_found()) return NotFound(holder);
+ number_ = number.as_uint32();
+ property_details_ = descriptors.GetDetails(InternalIndex(number_));
} else {
DCHECK_IMPLIES(holder.IsJSProxy(isolate_), name()->IsPrivate(isolate_));
NameDictionary dict = holder.property_dictionary(isolate_);
diff --git a/deps/v8/src/objects/lookup.h b/deps/v8/src/objects/lookup.h
index 565ea4bb75..2a1f0e2f1b 100644
--- a/deps/v8/src/objects/lookup.h
+++ b/deps/v8/src/objects/lookup.h
@@ -241,7 +241,7 @@ class V8_EXPORT_PRIVATE LookupIterator final {
bool check_interceptor() const {
return (configuration_ & kInterceptor) != 0;
}
- inline int descriptor_number() const;
+ inline InternalIndex descriptor_number() const;
inline int dictionary_entry() const;
static inline Configuration ComputeConfiguration(Isolate* isolate,
diff --git a/deps/v8/src/objects/map-inl.h b/deps/v8/src/objects/map-inl.h
index 48bb86e2da..557c004401 100644
--- a/deps/v8/src/objects/map-inl.h
+++ b/deps/v8/src/objects/map-inl.h
@@ -112,7 +112,7 @@ bool Map::IsMostGeneralFieldType(Representation representation,
bool Map::CanHaveFastTransitionableElementsKind(InstanceType instance_type) {
return instance_type == JS_ARRAY_TYPE ||
instance_type == JS_PRIMITIVE_WRAPPER_TYPE ||
- instance_type == JS_ARGUMENTS_TYPE;
+ instance_type == JS_ARGUMENTS_OBJECT_TYPE;
}
bool Map::CanHaveFastTransitionableElementsKind() const {
@@ -177,10 +177,10 @@ PropertyDetails Map::GetLastDescriptorDetails(Isolate* isolate) const {
return instance_descriptors(isolate).GetDetails(LastAdded());
}
-int Map::LastAdded() const {
+InternalIndex Map::LastAdded() const {
int number_of_own_descriptors = NumberOfOwnDescriptors();
DCHECK_GT(number_of_own_descriptors, 0);
- return number_of_own_descriptors - 1;
+ return InternalIndex(number_of_own_descriptors - 1);
}
int Map::NumberOfOwnDescriptors() const {
@@ -194,6 +194,10 @@ void Map::SetNumberOfOwnDescriptors(int number) {
set_bit_field3(NumberOfOwnDescriptorsBits::update(bit_field3(), number));
}
+InternalIndex::Range Map::IterateOwnDescriptors() const {
+ return InternalIndex::Range(NumberOfOwnDescriptors());
+}
+
int Map::EnumLength() const { return EnumLengthBits::decode(bit_field3()); }
void Map::SetEnumLength(int length) {
@@ -207,7 +211,8 @@ void Map::SetEnumLength(int length) {
FixedArrayBase Map::GetInitialElements() const {
FixedArrayBase result;
- if (has_fast_elements() || has_fast_string_wrapper_elements()) {
+ if (has_fast_elements() || has_fast_string_wrapper_elements() ||
+ has_any_nonextensible_elements()) {
result = GetReadOnlyRoots().empty_fixed_array();
} else if (has_fast_sloppy_arguments_elements()) {
result = GetReadOnlyRoots().empty_sloppy_arguments_elements();
@@ -540,12 +545,12 @@ void Map::mark_unstable() {
bool Map::is_stable() const { return !IsUnstableBit::decode(bit_field3()); }
bool Map::CanBeDeprecated() const {
- int descriptor = LastAdded();
- for (int i = 0; i <= descriptor; i++) {
+ for (InternalIndex i : IterateOwnDescriptors()) {
PropertyDetails details = instance_descriptors().GetDetails(i);
if (details.representation().IsNone()) return true;
if (details.representation().IsSmi()) return true;
- if (details.representation().IsDouble()) return true;
+ if (details.representation().IsDouble() && FLAG_unbox_double_fields)
+ return true;
if (details.representation().IsHeapObject()) return true;
if (details.kind() == kData && details.location() == kDescriptor) {
return true;
@@ -584,7 +589,7 @@ bool Map::IsNullOrUndefinedMap() const {
}
bool Map::IsPrimitiveMap() const {
- return instance_type() <= LAST_PRIMITIVE_TYPE;
+ return instance_type() <= LAST_PRIMITIVE_HEAP_OBJECT_TYPE;
}
LayoutDescriptor Map::layout_descriptor_gc_safe() const {
@@ -675,8 +680,10 @@ void Map::AppendDescriptor(Isolate* isolate, Descriptor* desc) {
// barrier.
descriptors.Append(desc);
SetNumberOfOwnDescriptors(number_of_own_descriptors + 1);
+#ifndef V8_DISABLE_WRITE_BARRIERS
MarkingBarrierForDescriptorArray(isolate->heap(), *this, descriptors,
number_of_own_descriptors + 1);
+#endif
}
// Properly mark the map if the {desc} is an "interesting symbol".
if (desc->GetKey()->IsInterestingSymbol()) {
diff --git a/deps/v8/src/objects/map-updater.cc b/deps/v8/src/objects/map-updater.cc
index 49b9ccea91..8c9b94014f 100644
--- a/deps/v8/src/objects/map-updater.cc
+++ b/deps/v8/src/objects/map-updater.cc
@@ -38,12 +38,12 @@ MapUpdater::MapUpdater(Isolate* isolate, Handle<Map> old_map)
!old_map->FindRootMap(isolate).GetConstructor().IsFunctionTemplateInfo());
}
-Name MapUpdater::GetKey(int descriptor) const {
+Name MapUpdater::GetKey(InternalIndex descriptor) const {
return old_descriptors_->GetKey(descriptor);
}
-PropertyDetails MapUpdater::GetDetails(int descriptor) const {
- DCHECK_LE(0, descriptor);
+PropertyDetails MapUpdater::GetDetails(InternalIndex descriptor) const {
+ DCHECK(descriptor.is_found());
if (descriptor == modified_descriptor_) {
PropertyAttributes attributes = new_attributes_;
// If the original map was sealed or frozen, let us used the old
@@ -59,8 +59,8 @@ PropertyDetails MapUpdater::GetDetails(int descriptor) const {
return old_descriptors_->GetDetails(descriptor);
}
-Object MapUpdater::GetValue(int descriptor) const {
- DCHECK_LE(0, descriptor);
+Object MapUpdater::GetValue(InternalIndex descriptor) const {
+ DCHECK(descriptor.is_found());
if (descriptor == modified_descriptor_) {
DCHECK_EQ(kDescriptor, new_location_);
return *new_value_;
@@ -69,8 +69,8 @@ Object MapUpdater::GetValue(int descriptor) const {
return old_descriptors_->GetStrongValue(descriptor);
}
-FieldType MapUpdater::GetFieldType(int descriptor) const {
- DCHECK_LE(0, descriptor);
+FieldType MapUpdater::GetFieldType(InternalIndex descriptor) const {
+ DCHECK(descriptor.is_found());
if (descriptor == modified_descriptor_) {
DCHECK_EQ(kField, new_location_);
return *new_field_type_;
@@ -80,9 +80,9 @@ FieldType MapUpdater::GetFieldType(int descriptor) const {
}
Handle<FieldType> MapUpdater::GetOrComputeFieldType(
- int descriptor, PropertyLocation location,
+ InternalIndex descriptor, PropertyLocation location,
Representation representation) const {
- DCHECK_LE(0, descriptor);
+ DCHECK(descriptor.is_found());
// |location| is just a pre-fetched GetDetails(descriptor).location().
DCHECK_EQ(location, GetDetails(descriptor).location());
if (location == kField) {
@@ -93,7 +93,7 @@ Handle<FieldType> MapUpdater::GetOrComputeFieldType(
}
Handle<FieldType> MapUpdater::GetOrComputeFieldType(
- Handle<DescriptorArray> descriptors, int descriptor,
+ Handle<DescriptorArray> descriptors, InternalIndex descriptor,
PropertyLocation location, Representation representation) {
// |location| is just a pre-fetched GetDetails(descriptor).location().
DCHECK_EQ(descriptors->GetDetails(descriptor).location(), location);
@@ -105,13 +105,13 @@ Handle<FieldType> MapUpdater::GetOrComputeFieldType(
}
}
-Handle<Map> MapUpdater::ReconfigureToDataField(int descriptor,
+Handle<Map> MapUpdater::ReconfigureToDataField(InternalIndex descriptor,
PropertyAttributes attributes,
PropertyConstness constness,
Representation representation,
Handle<FieldType> field_type) {
DCHECK_EQ(kInitialized, state_);
- DCHECK_LE(0, descriptor);
+ DCHECK(descriptor.is_found());
DCHECK(!old_map_->is_dictionary_map());
modified_descriptor_ = descriptor;
new_kind_ = kData;
@@ -190,7 +190,7 @@ Handle<Map> MapUpdater::Update() {
return result_map_;
}
-void MapUpdater::GeneralizeField(Handle<Map> map, int modify_index,
+void MapUpdater::GeneralizeField(Handle<Map> map, InternalIndex modify_index,
PropertyConstness new_constness,
Representation new_representation,
Handle<FieldType> new_field_type) {
@@ -338,7 +338,8 @@ MapUpdater::State MapUpdater::FindRootMap() {
}
int root_nof = root_map_->NumberOfOwnDescriptors();
- if (modified_descriptor_ >= 0 && modified_descriptor_ < root_nof) {
+ if (modified_descriptor_.is_found() &&
+ modified_descriptor_.as_int() < root_nof) {
PropertyDetails old_details =
old_descriptors_->GetDetails(modified_descriptor_);
if (old_details.kind() != new_kind_ ||
@@ -374,7 +375,7 @@ MapUpdater::State MapUpdater::FindTargetMap() {
target_map_ = root_map_;
int root_nof = root_map_->NumberOfOwnDescriptors();
- for (int i = root_nof; i < old_nof_; ++i) {
+ for (InternalIndex i : InternalIndex::Range(root_nof, old_nof_)) {
PropertyDetails old_details = GetDetails(i);
Map transition = TransitionsAccessor(isolate_, target_map_)
.SearchTransition(GetKey(i), old_details.kind(),
@@ -423,7 +424,7 @@ MapUpdater::State MapUpdater::FindTargetMap() {
int target_nof = target_map_->NumberOfOwnDescriptors();
if (target_nof == old_nof_) {
#ifdef DEBUG
- if (modified_descriptor_ >= 0) {
+ if (modified_descriptor_.is_found()) {
DescriptorArray target_descriptors = target_map_->instance_descriptors();
PropertyDetails details =
target_descriptors.GetDetails(modified_descriptor_);
@@ -465,7 +466,7 @@ MapUpdater::State MapUpdater::FindTargetMap() {
}
// Find the last compatible target map in the transition tree.
- for (int i = target_nof; i < old_nof_; ++i) {
+ for (InternalIndex i : InternalIndex::Range(target_nof, old_nof_)) {
PropertyDetails old_details = GetDetails(i);
Map transition = TransitionsAccessor(isolate_, target_map_)
.SearchTransition(GetKey(i), old_details.kind(),
@@ -521,7 +522,7 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
// general than we requested. Take |root_nof| entries as is.
// 0 -> |root_nof|
int current_offset = 0;
- for (int i = 0; i < root_nof; ++i) {
+ for (InternalIndex i : InternalIndex::Range(root_nof)) {
PropertyDetails old_details = old_descriptors_->GetDetails(i);
if (old_details.location() == kField) {
current_offset += old_details.field_width_in_words();
@@ -534,7 +535,7 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
// Merge "updated" old_descriptor entries with target_descriptor entries.
// |root_nof| -> |target_nof|
- for (int i = root_nof; i < target_nof; ++i) {
+ for (InternalIndex i : InternalIndex::Range(root_nof, target_nof)) {
Handle<Name> key(GetKey(i), isolate_);
PropertyDetails old_details = GetDetails(i);
PropertyDetails target_details = target_descriptors->GetDetails(i);
@@ -606,7 +607,7 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
// Take "updated" old_descriptor entries.
// |target_nof| -> |old_nof|
- for (int i = target_nof; i < old_nof_; ++i) {
+ for (InternalIndex i : InternalIndex::Range(target_nof, old_nof_)) {
PropertyDetails old_details = GetDetails(i);
Handle<Name> key(GetKey(i), isolate_);
@@ -665,7 +666,7 @@ Handle<Map> MapUpdater::FindSplitMap(Handle<DescriptorArray> descriptors) {
int root_nof = root_map_->NumberOfOwnDescriptors();
Map current = *root_map_;
- for (int i = root_nof; i < old_nof_; i++) {
+ for (InternalIndex i : InternalIndex::Range(root_nof, old_nof_)) {
Name name = descriptors->GetKey(i);
PropertyDetails details = descriptors->GetDetails(i);
Map next =
@@ -707,13 +708,13 @@ MapUpdater::State MapUpdater::ConstructNewMap() {
state_ = kAtIntegrityLevelSource;
return state_;
}
-
- PropertyDetails split_details = GetDetails(split_nof);
+ InternalIndex split_index(split_nof);
+ PropertyDetails split_details = GetDetails(split_index);
TransitionsAccessor transitions(isolate_, split_map);
// Invalidate a transition target at |key|.
Map maybe_transition = transitions.SearchTransition(
- GetKey(split_nof), split_details.kind(), split_details.attributes());
+ GetKey(split_index), split_details.kind(), split_details.attributes());
if (!maybe_transition.is_null()) {
maybe_transition.DeprecateTransitionTree(isolate_);
}
@@ -727,7 +728,7 @@ MapUpdater::State MapUpdater::ConstructNewMap() {
old_map_->NotifyLeafMapLayoutChange(isolate_);
- if (FLAG_trace_generalization && modified_descriptor_ >= 0) {
+ if (FLAG_trace_generalization && modified_descriptor_.is_found()) {
PropertyDetails old_details =
old_descriptors_->GetDetails(modified_descriptor_);
PropertyDetails new_details =
diff --git a/deps/v8/src/objects/map-updater.h b/deps/v8/src/objects/map-updater.h
index 6ee373cbdf..11bdd0859f 100644
--- a/deps/v8/src/objects/map-updater.h
+++ b/deps/v8/src/objects/map-updater.h
@@ -54,7 +54,7 @@ class MapUpdater {
// Prepares for reconfiguring of a property at |descriptor| to data field
// with given |attributes| and |representation|/|field_type| and
// performs the steps 1-5.
- Handle<Map> ReconfigureToDataField(int descriptor,
+ Handle<Map> ReconfigureToDataField(InternalIndex descriptor,
PropertyAttributes attributes,
PropertyConstness constness,
Representation representation,
@@ -127,26 +127,26 @@ class MapUpdater {
State Normalize(const char* reason);
// Returns name of a |descriptor| property.
- inline Name GetKey(int descriptor) const;
+ inline Name GetKey(InternalIndex descriptor) const;
// Returns property details of a |descriptor| in "updated" |old_descrtiptors_|
// array.
- inline PropertyDetails GetDetails(int descriptor) const;
+ inline PropertyDetails GetDetails(InternalIndex descriptor) const;
// Returns value of a |descriptor| with kDescriptor location in "updated"
// |old_descrtiptors_| array.
- inline Object GetValue(int descriptor) const;
+ inline Object GetValue(InternalIndex descriptor) const;
// Returns field type for a |descriptor| with kField location in "updated"
// |old_descrtiptors_| array.
- inline FieldType GetFieldType(int descriptor) const;
+ inline FieldType GetFieldType(InternalIndex descriptor) const;
// If a |descriptor| property in "updated" |old_descriptors_| has kField
// location then returns it's field type otherwise computes optimal field
// type for the descriptor's value and |representation|. The |location|
// value must be a pre-fetched location for |descriptor|.
inline Handle<FieldType> GetOrComputeFieldType(
- int descriptor, PropertyLocation location,
+ InternalIndex descriptor, PropertyLocation location,
Representation representation) const;
// If a |descriptor| property in given |descriptors| array has kField
@@ -154,10 +154,10 @@ class MapUpdater {
// type for the descriptor's value and |representation|.
// The |location| value must be a pre-fetched location for |descriptor|.
inline Handle<FieldType> GetOrComputeFieldType(
- Handle<DescriptorArray> descriptors, int descriptor,
+ Handle<DescriptorArray> descriptors, InternalIndex descriptor,
PropertyLocation location, Representation representation);
- void GeneralizeField(Handle<Map> map, int modify_index,
+ void GeneralizeField(Handle<Map> map, InternalIndex modify_index,
PropertyConstness new_constness,
Representation new_representation,
Handle<FieldType> new_field_type);
@@ -182,9 +182,9 @@ class MapUpdater {
ElementsKind new_elements_kind_;
bool is_transitionable_fast_elements_kind_;
- // If |modified_descriptor_| is not equal to -1 then the fields below form
+ // If |modified_descriptor_.is_found()|, then the fields below form
// an "update" of the |old_map_|'s descriptors.
- int modified_descriptor_ = -1;
+ InternalIndex modified_descriptor_ = InternalIndex::NotFound();
PropertyKind new_kind_ = kData;
PropertyAttributes new_attributes_ = NONE;
PropertyConstness new_constness_ = PropertyConstness::kMutable;
diff --git a/deps/v8/src/objects/map.cc b/deps/v8/src/objects/map.cc
index a672d6580a..0f448922eb 100644
--- a/deps/v8/src/objects/map.cc
+++ b/deps/v8/src/objects/map.cc
@@ -56,20 +56,8 @@ MaybeHandle<JSFunction> Map::GetConstructorFunction(
return MaybeHandle<JSFunction>();
}
-bool Map::IsMapOfGlobalProxy(Handle<NativeContext> native_context) const {
- DisallowHeapAllocation no_gc;
- if (IsJSGlobalProxyMap()) {
- Object maybe_constructor = GetConstructor();
- // Detached global proxies have |null| as their constructor.
- return maybe_constructor.IsJSFunction() &&
- JSFunction::cast(maybe_constructor).native_context() ==
- *native_context;
- }
- return false;
-}
-
-void Map::PrintReconfiguration(Isolate* isolate, FILE* file, int modify_index,
- PropertyKind kind,
+void Map::PrintReconfiguration(Isolate* isolate, FILE* file,
+ InternalIndex modify_index, PropertyKind kind,
PropertyAttributes attributes) {
OFStream os(file);
os << "[reconfiguring]";
@@ -256,7 +244,7 @@ VisitorId Map::GetVisitorId(Map map) {
case CODE_DATA_CONTAINER_TYPE:
return kVisitCodeDataContainer;
- case WASM_INSTANCE_TYPE:
+ case WASM_INSTANCE_OBJECT_TYPE:
return kVisitWasmInstanceObject;
case PREPARSE_DATA_TYPE:
@@ -270,7 +258,7 @@ VisitorId Map::GetVisitorId(Map map) {
case JS_OBJECT_TYPE:
case JS_ERROR_TYPE:
- case JS_ARGUMENTS_TYPE:
+ case JS_ARGUMENTS_OBJECT_TYPE:
case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
@@ -291,27 +279,27 @@ VisitorId Map::GetVisitorId(Map map) {
case JS_MAP_VALUE_ITERATOR_TYPE:
case JS_STRING_ITERATOR_TYPE:
case JS_PROMISE_TYPE:
- case JS_REGEXP_TYPE:
- case JS_REGEXP_STRING_ITERATOR_TYPE:
+ case JS_REG_EXP_TYPE:
+ case JS_REG_EXP_STRING_ITERATOR_TYPE:
case JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_TYPE:
case JS_FINALIZATION_GROUP_TYPE:
#ifdef V8_INTL_SUPPORT
- case JS_INTL_V8_BREAK_ITERATOR_TYPE:
- case JS_INTL_COLLATOR_TYPE:
- case JS_INTL_DATE_TIME_FORMAT_TYPE:
- case JS_INTL_LIST_FORMAT_TYPE:
- case JS_INTL_LOCALE_TYPE:
- case JS_INTL_NUMBER_FORMAT_TYPE:
- case JS_INTL_PLURAL_RULES_TYPE:
- case JS_INTL_RELATIVE_TIME_FORMAT_TYPE:
- case JS_INTL_SEGMENT_ITERATOR_TYPE:
- case JS_INTL_SEGMENTER_TYPE:
+ case JS_V8_BREAK_ITERATOR_TYPE:
+ case JS_COLLATOR_TYPE:
+ case JS_DATE_TIME_FORMAT_TYPE:
+ case JS_LIST_FORMAT_TYPE:
+ case JS_LOCALE_TYPE:
+ case JS_NUMBER_FORMAT_TYPE:
+ case JS_PLURAL_RULES_TYPE:
+ case JS_RELATIVE_TIME_FORMAT_TYPE:
+ case JS_SEGMENT_ITERATOR_TYPE:
+ case JS_SEGMENTER_TYPE:
#endif // V8_INTL_SUPPORT
- case WASM_EXCEPTION_TYPE:
- case WASM_GLOBAL_TYPE:
- case WASM_MEMORY_TYPE:
- case WASM_MODULE_TYPE:
- case WASM_TABLE_TYPE:
+ case WASM_EXCEPTION_OBJECT_TYPE:
+ case WASM_GLOBAL_OBJECT_TYPE:
+ case WASM_MEMORY_OBJECT_TYPE:
+ case WASM_MODULE_OBJECT_TYPE:
+ case WASM_TABLE_OBJECT_TYPE:
case JS_BOUND_FUNCTION_TYPE: {
const bool has_raw_data_fields =
(FLAG_unbox_double_fields && !map.HasFastPointerLayout()) ||
@@ -371,12 +359,13 @@ VisitorId Map::GetVisitorId(Map map) {
}
void Map::PrintGeneralization(
- Isolate* isolate, FILE* file, const char* reason, int modify_index,
- int split, int descriptors, bool descriptor_to_field,
- Representation old_representation, Representation new_representation,
- PropertyConstness old_constness, PropertyConstness new_constness,
- MaybeHandle<FieldType> old_field_type, MaybeHandle<Object> old_value,
- MaybeHandle<FieldType> new_field_type, MaybeHandle<Object> new_value) {
+ Isolate* isolate, FILE* file, const char* reason,
+ InternalIndex modify_index, int split, int descriptors,
+ bool descriptor_to_field, Representation old_representation,
+ Representation new_representation, PropertyConstness old_constness,
+ PropertyConstness new_constness, MaybeHandle<FieldType> old_field_type,
+ MaybeHandle<Object> old_value, MaybeHandle<FieldType> new_field_type,
+ MaybeHandle<Object> new_value) {
OFStream os(file);
os << "[generalizing]";
Name name = instance_descriptors().GetKey(modify_index);
@@ -440,9 +429,9 @@ MaybeHandle<Map> Map::CopyWithField(Isolate* isolate, Handle<Map> map,
PropertyConstness constness,
Representation representation,
TransitionFlag flag) {
- DCHECK(
- DescriptorArray::kNotFound ==
- map->instance_descriptors().Search(*name, map->NumberOfOwnDescriptors()));
+ DCHECK(map->instance_descriptors()
+ .Search(*name, map->NumberOfOwnDescriptors())
+ .is_not_found());
// Ensure the descriptor array does not get too big.
if (map->NumberOfOwnDescriptors() >= kMaxNumberOfDescriptors) {
@@ -536,8 +525,7 @@ bool Map::InstancesNeedRewriting(Map target, int target_number_of_fields,
// If smi descriptors were replaced by double descriptors, rewrite.
DescriptorArray old_desc = instance_descriptors();
DescriptorArray new_desc = target.instance_descriptors();
- int limit = NumberOfOwnDescriptors();
- for (int i = 0; i < limit; i++) {
+ for (InternalIndex i : IterateOwnDescriptors()) {
if (new_desc.GetDetails(i).representation().IsDouble() !=
old_desc.GetDetails(i).representation().IsDouble()) {
return true;
@@ -562,7 +550,7 @@ bool Map::InstancesNeedRewriting(Map target, int target_number_of_fields,
int Map::NumberOfFields() const {
DescriptorArray descriptors = instance_descriptors();
int result = 0;
- for (int i = 0; i < NumberOfOwnDescriptors(); i++) {
+ for (InternalIndex i : IterateOwnDescriptors()) {
if (descriptors.GetDetails(i).location() == kField) result++;
}
return result;
@@ -572,7 +560,7 @@ Map::FieldCounts Map::GetFieldCounts() const {
DescriptorArray descriptors = instance_descriptors();
int mutable_count = 0;
int const_count = 0;
- for (int i = 0; i < NumberOfOwnDescriptors(); i++) {
+ for (InternalIndex i : IterateOwnDescriptors()) {
PropertyDetails details = descriptors.GetDetails(i);
if (details.location() == kField) {
switch (details.constness()) {
@@ -625,8 +613,10 @@ void Map::ReplaceDescriptors(Isolate* isolate, DescriptorArray new_descriptors,
// descriptors will not be trimmed in the mark-compactor, we need to mark
// all its elements.
Map current = *this;
+#ifndef V8_DISABLE_WRITE_BARRIERS
MarkingBarrierForDescriptorArray(isolate->heap(), current, to_replace,
to_replace.number_of_descriptors());
+#endif
while (current.instance_descriptors(isolate) == to_replace) {
Object next = current.GetBackPointer(isolate);
if (next.IsUndefined(isolate)) break; // Stop overwriting at initial map.
@@ -654,7 +644,7 @@ Map Map::FindRootMap(Isolate* isolate) const {
}
}
-Map Map::FindFieldOwner(Isolate* isolate, int descriptor) const {
+Map Map::FindFieldOwner(Isolate* isolate, InternalIndex descriptor) const {
DisallowHeapAllocation no_allocation;
DCHECK_EQ(kField,
instance_descriptors(isolate).GetDetails(descriptor).location());
@@ -663,14 +653,14 @@ Map Map::FindFieldOwner(Isolate* isolate, int descriptor) const {
Object back = result.GetBackPointer(isolate);
if (back.IsUndefined(isolate)) break;
const Map parent = Map::cast(back);
- if (parent.NumberOfOwnDescriptors() <= descriptor) break;
+ if (parent.NumberOfOwnDescriptors() <= descriptor.as_int()) break;
result = parent;
}
return result;
}
-void Map::UpdateFieldType(Isolate* isolate, int descriptor, Handle<Name> name,
- PropertyConstness new_constness,
+void Map::UpdateFieldType(Isolate* isolate, InternalIndex descriptor,
+ Handle<Name> name, PropertyConstness new_constness,
Representation new_representation,
const MaybeObjectHandle& new_wrapped_type) {
DCHECK(new_wrapped_type->IsSmi() || new_wrapped_type->IsWeak());
@@ -740,7 +730,8 @@ Handle<FieldType> Map::GeneralizeFieldType(Representation rep1,
}
// static
-void Map::GeneralizeField(Isolate* isolate, Handle<Map> map, int modify_index,
+void Map::GeneralizeField(Isolate* isolate, Handle<Map> map,
+ InternalIndex modify_index,
PropertyConstness new_constness,
Representation new_representation,
Handle<FieldType> new_field_type) {
@@ -791,7 +782,8 @@ void Map::GeneralizeField(Isolate* isolate, Handle<Map> map, int modify_index,
map->PrintGeneralization(
isolate, stdout, "field type generalization", modify_index,
map->NumberOfOwnDescriptors(), map->NumberOfOwnDescriptors(), false,
- details.representation(), details.representation(), old_constness,
+ details.representation(),
+ descriptors->GetDetails(modify_index).representation(), old_constness,
new_constness, old_field_type, MaybeHandle<Object>(), new_field_type,
MaybeHandle<Object>());
}
@@ -800,7 +792,8 @@ void Map::GeneralizeField(Isolate* isolate, Handle<Map> map, int modify_index,
// TODO(ishell): remove.
// static
Handle<Map> Map::ReconfigureProperty(Isolate* isolate, Handle<Map> map,
- int modify_index, PropertyKind new_kind,
+ InternalIndex modify_index,
+ PropertyKind new_kind,
PropertyAttributes new_attributes,
Representation new_representation,
Handle<FieldType> new_field_type) {
@@ -840,9 +833,8 @@ Map SearchMigrationTarget(Isolate* isolate, Map old_map) {
// types instead of old_map's types.
// Go to slow map updating if the old_map has fast properties with cleared
// field types.
- int old_nof = old_map.NumberOfOwnDescriptors();
DescriptorArray old_descriptors = old_map.instance_descriptors();
- for (int i = 0; i < old_nof; i++) {
+ for (InternalIndex i : old_map.IterateOwnDescriptors()) {
PropertyDetails old_details = old_descriptors.GetDetails(i);
if (old_details.location() == kField && old_details.kind() == kData) {
FieldType old_type = old_descriptors.GetFieldType(i);
@@ -1007,7 +999,7 @@ Map Map::TryReplayPropertyTransitions(Isolate* isolate, Map old_map) {
DescriptorArray old_descriptors = old_map.instance_descriptors();
Map new_map = *this;
- for (int i = root_nof; i < old_nof; ++i) {
+ for (InternalIndex i : InternalIndex::Range(root_nof, old_nof)) {
PropertyDetails old_details = old_descriptors.GetDetails(i);
Map transition =
TransitionsAccessor(isolate, new_map, &no_allocation)
@@ -1107,8 +1099,10 @@ void Map::EnsureDescriptorSlack(Isolate* isolate, Handle<Map> map, int slack) {
// Replace descriptors by new_descriptors in all maps that share it. The old
// descriptors will not be trimmed in the mark-compactor, we need to mark
// all its elements.
+#ifndef V8_DISABLE_WRITE_BARRIERS
MarkingBarrierForDescriptorArray(isolate->heap(), *map, *descriptors,
descriptors->number_of_descriptors());
+#endif
Map current = *map;
while (current.instance_descriptors() == *descriptors) {
@@ -1363,8 +1357,7 @@ Handle<Map> Map::AsElementsKind(Isolate* isolate, Handle<Map> map,
int Map::NumberOfEnumerableProperties() const {
int result = 0;
DescriptorArray descs = instance_descriptors();
- int limit = NumberOfOwnDescriptors();
- for (int i = 0; i < limit; i++) {
+ for (InternalIndex i : IterateOwnDescriptors()) {
if ((descs.GetDetails(i).attributes() & ONLY_ENUMERABLE) == 0 &&
!descs.GetKey(i).FilterKey(ENUMERABLE_STRINGS)) {
result++;
@@ -1378,7 +1371,7 @@ int Map::NextFreePropertyIndex() const {
DescriptorArray descs = instance_descriptors();
// Search properties backwards to find the last field.
for (int i = number_of_own_descriptors - 1; i >= 0; --i) {
- PropertyDetails details = descs.GetDetails(i);
+ PropertyDetails details = descs.GetDetails(InternalIndex(i));
if (details.location() == kField) {
return details.field_index() + details.field_width_in_words();
}
@@ -1788,7 +1781,7 @@ Handle<Map> Map::AddMissingTransitions(
// if there are no dead transitions from that map and this is exactly the
// case for all the intermediate maps we create here.
Handle<Map> map = split_map;
- for (int i = split_nof; i < nof_descriptors - 1; ++i) {
+ for (InternalIndex i : InternalIndex::Range(split_nof, nof_descriptors - 1)) {
Handle<Map> new_map = CopyDropDescriptors(isolate, map);
InstallDescriptors(isolate, map, new_map, i, descriptors,
full_layout_descriptor);
@@ -1797,20 +1790,21 @@ Handle<Map> Map::AddMissingTransitions(
}
map->NotifyLeafMapLayoutChange(isolate);
last_map->set_may_have_interesting_symbols(false);
- InstallDescriptors(isolate, map, last_map, nof_descriptors - 1, descriptors,
- full_layout_descriptor);
+ InstallDescriptors(isolate, map, last_map, InternalIndex(nof_descriptors - 1),
+ descriptors, full_layout_descriptor);
return last_map;
}
// Since this method is used to rewrite an existing transition tree, it can
// always insert transitions without checking.
void Map::InstallDescriptors(Isolate* isolate, Handle<Map> parent,
- Handle<Map> child, int new_descriptor,
+ Handle<Map> child, InternalIndex new_descriptor,
Handle<DescriptorArray> descriptors,
Handle<LayoutDescriptor> full_layout_descriptor) {
DCHECK(descriptors->IsSortedNoDuplicates());
- child->SetInstanceDescriptors(isolate, *descriptors, new_descriptor + 1);
+ child->SetInstanceDescriptors(isolate, *descriptors,
+ new_descriptor.as_int() + 1);
child->CopyUnusedPropertyFields(*parent);
PropertyDetails details = descriptors->GetDetails(new_descriptor);
if (details.location() == kField) {
@@ -2063,7 +2057,7 @@ Handle<Map> Map::CopyForPreventExtensions(
namespace {
-bool CanHoldValue(DescriptorArray descriptors, int descriptor,
+bool CanHoldValue(DescriptorArray descriptors, InternalIndex descriptor,
PropertyConstness constness, Object value) {
PropertyDetails details = descriptors.GetDetails(descriptor);
if (details.location() == kField) {
@@ -2086,7 +2080,7 @@ bool CanHoldValue(DescriptorArray descriptors, int descriptor,
}
Handle<Map> UpdateDescriptorForValue(Isolate* isolate, Handle<Map> map,
- int descriptor,
+ InternalIndex descriptor,
PropertyConstness constness,
Handle<Object> value) {
if (CanHoldValue(map->instance_descriptors(), descriptor, constness,
@@ -2108,7 +2102,7 @@ Handle<Map> UpdateDescriptorForValue(Isolate* isolate, Handle<Map> map,
// static
Handle<Map> Map::PrepareForDataProperty(Isolate* isolate, Handle<Map> map,
- int descriptor,
+ InternalIndex descriptor,
PropertyConstness constness,
Handle<Object> value) {
// Update to the newest map before storing the property.
@@ -2140,7 +2134,7 @@ Handle<Map> Map::TransitionToDataProperty(Isolate* isolate, Handle<Map> map,
.SearchTransition(*name, kData, attributes);
if (!maybe_transition.is_null()) {
Handle<Map> transition(maybe_transition, isolate);
- int descriptor = transition->LastAdded();
+ InternalIndex descriptor = transition->LastAdded();
DCHECK_EQ(
attributes,
@@ -2206,7 +2200,8 @@ Handle<Map> Map::TransitionToDataProperty(Isolate* isolate, Handle<Map> map,
}
Handle<Map> Map::ReconfigureExistingProperty(Isolate* isolate, Handle<Map> map,
- int descriptor, PropertyKind kind,
+ InternalIndex descriptor,
+ PropertyKind kind,
PropertyAttributes attributes,
PropertyConstness constness) {
// Dictionaries have to be reconfigured in-place.
@@ -2232,7 +2227,8 @@ Handle<Map> Map::ReconfigureExistingProperty(Isolate* isolate, Handle<Map> map,
}
Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map,
- Handle<Name> name, int descriptor,
+ Handle<Name> name,
+ InternalIndex descriptor,
Handle<Object> getter,
Handle<Object> setter,
PropertyAttributes attributes) {
@@ -2261,7 +2257,7 @@ Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map,
if (!maybe_transition.is_null()) {
Handle<Map> transition(maybe_transition, isolate);
DescriptorArray descriptors = transition->instance_descriptors();
- int descriptor = transition->LastAdded();
+ InternalIndex descriptor = transition->LastAdded();
DCHECK(descriptors.GetKey(descriptor).Equals(*name));
DCHECK_EQ(kAccessor, descriptors.GetDetails(descriptor).kind());
@@ -2284,7 +2280,7 @@ Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map,
Handle<AccessorPair> pair;
DescriptorArray old_descriptors = map->instance_descriptors();
- if (descriptor != DescriptorArray::kNotFound) {
+ if (descriptor.is_found()) {
if (descriptor != map->LastAdded()) {
return Map::Normalize(isolate, map, mode, "AccessorsOverwritingNonLast");
}
@@ -2374,9 +2370,9 @@ Handle<Map> Map::CopyInsertDescriptor(Isolate* isolate, Handle<Map> map,
Handle<DescriptorArray> old_descriptors(map->instance_descriptors(), isolate);
// We replace the key if it is already present.
- int index =
+ InternalIndex index =
old_descriptors->SearchWithCache(isolate, *descriptor->GetKey(), *map);
- if (index != DescriptorArray::kNotFound) {
+ if (index.is_found()) {
return CopyReplaceDescriptor(isolate, map, old_descriptors, descriptor,
index, flag);
}
@@ -2386,7 +2382,7 @@ Handle<Map> Map::CopyInsertDescriptor(Isolate* isolate, Handle<Map> map,
Handle<Map> Map::CopyReplaceDescriptor(Isolate* isolate, Handle<Map> map,
Handle<DescriptorArray> descriptors,
Descriptor* descriptor,
- int insertion_index,
+ InternalIndex insertion_index,
TransitionFlag flag) {
Handle<Name> key = descriptor->GetKey();
DCHECK_EQ(*key, descriptors->GetKey(insertion_index));
@@ -2403,7 +2399,7 @@ Handle<Map> Map::CopyReplaceDescriptor(Isolate* isolate, Handle<Map> map,
isolate, map, new_descriptors, new_descriptors->number_of_descriptors());
SimpleTransitionFlag simple_flag =
- (insertion_index == descriptors->number_of_descriptors() - 1)
+ (insertion_index.as_int() == descriptors->number_of_descriptors() - 1)
? SIMPLE_PROPERTY_TRANSITION
: PROPERTY_TRANSITION;
return CopyReplaceDescriptors(isolate, map, new_descriptors,
@@ -2465,8 +2461,7 @@ bool Map::EquivalentToForElementsKindTransition(const Map other) const {
// with fields that may be generalized in-place. This must already be handled
// during addition of a new field.
DescriptorArray descriptors = instance_descriptors();
- int nof = NumberOfOwnDescriptors();
- for (int i = 0; i < nof; i++) {
+ for (InternalIndex i : IterateOwnDescriptors()) {
PropertyDetails details = descriptors.GetDetails(i);
if (details.location() == kField) {
DCHECK(IsMostGeneralFieldType(details.representation(),
@@ -2547,8 +2542,10 @@ void Map::SetInstanceDescriptors(Isolate* isolate, DescriptorArray descriptors,
int number_of_own_descriptors) {
set_synchronized_instance_descriptors(descriptors);
SetNumberOfOwnDescriptors(number_of_own_descriptors);
+#ifndef V8_DISABLE_WRITE_BARRIERS
MarkingBarrierForDescriptorArray(isolate->heap(), *this, descriptors,
number_of_own_descriptors);
+#endif
}
// static
diff --git a/deps/v8/src/objects/map.h b/deps/v8/src/objects/map.h
index ef16019685..0daadbee08 100644
--- a/deps/v8/src/objects/map.h
+++ b/deps/v8/src/objects/map.h
@@ -8,6 +8,7 @@
#include "src/common/globals.h"
#include "src/objects/code.h"
#include "src/objects/heap-object.h"
+#include "src/objects/internal-index.h"
#include "src/objects/objects.h"
#include "torque-generated/field-offsets-tq.h"
@@ -470,7 +471,8 @@ class Map : public HeapObject {
Map GetPrototypeChainRootMap(Isolate* isolate) const;
V8_EXPORT_PRIVATE Map FindRootMap(Isolate* isolate) const;
- V8_EXPORT_PRIVATE Map FindFieldOwner(Isolate* isolate, int descriptor) const;
+ V8_EXPORT_PRIVATE Map FindFieldOwner(Isolate* isolate,
+ InternalIndex descriptor) const;
inline int GetInObjectPropertyOffset(int index) const;
@@ -513,7 +515,8 @@ class Map : public HeapObject {
Representation rep1, Handle<FieldType> type1, Representation rep2,
Handle<FieldType> type2, Isolate* isolate);
static void GeneralizeField(Isolate* isolate, Handle<Map> map,
- int modify_index, PropertyConstness new_constness,
+ InternalIndex modify_index,
+ PropertyConstness new_constness,
Representation new_representation,
Handle<FieldType> new_field_type);
// Returns true if the |field_type| is the most general one for
@@ -533,7 +536,7 @@ class Map : public HeapObject {
Representation* representation, Handle<FieldType>* field_type);
V8_EXPORT_PRIVATE static Handle<Map> ReconfigureProperty(
- Isolate* isolate, Handle<Map> map, int modify_index,
+ Isolate* isolate, Handle<Map> map, InternalIndex modify_index,
PropertyKind new_kind, PropertyAttributes new_attributes,
Representation new_representation, Handle<FieldType> new_field_type);
@@ -541,7 +544,7 @@ class Map : public HeapObject {
Isolate* isolate, Handle<Map> map, ElementsKind new_elements_kind);
V8_EXPORT_PRIVATE static Handle<Map> PrepareForDataProperty(
- Isolate* isolate, Handle<Map> old_map, int descriptor_number,
+ Isolate* isolate, Handle<Map> old_map, InternalIndex descriptor_number,
PropertyConstness constness, Handle<Object> value);
V8_EXPORT_PRIVATE static Handle<Map> Normalize(Isolate* isolate,
@@ -636,10 +639,11 @@ class Map : public HeapObject {
inline PropertyDetails GetLastDescriptorDetails(Isolate* isolate) const;
- inline int LastAdded() const;
+ inline InternalIndex LastAdded() const;
inline int NumberOfOwnDescriptors() const;
inline void SetNumberOfOwnDescriptors(int number);
+ inline InternalIndex::Range IterateOwnDescriptors() const;
inline Cell RetrieveDescriptorsPointer();
@@ -742,12 +746,13 @@ class Map : public HeapObject {
Handle<Object> value, PropertyAttributes attributes,
PropertyConstness constness, StoreOrigin store_origin);
V8_EXPORT_PRIVATE static Handle<Map> TransitionToAccessorProperty(
- Isolate* isolate, Handle<Map> map, Handle<Name> name, int descriptor,
- Handle<Object> getter, Handle<Object> setter,
+ Isolate* isolate, Handle<Map> map, Handle<Name> name,
+ InternalIndex descriptor, Handle<Object> getter, Handle<Object> setter,
PropertyAttributes attributes);
V8_EXPORT_PRIVATE static Handle<Map> ReconfigureExistingProperty(
- Isolate* isolate, Handle<Map> map, int descriptor, PropertyKind kind,
- PropertyAttributes attributes, PropertyConstness constness);
+ Isolate* isolate, Handle<Map> map, InternalIndex descriptor,
+ PropertyKind kind, PropertyAttributes attributes,
+ PropertyConstness constness);
inline void AppendDescriptor(Isolate* isolate, Descriptor* desc);
@@ -881,9 +886,6 @@ class Map : public HeapObject {
InstanceType instance_type);
inline bool CanHaveFastTransitionableElementsKind() const;
- // Whether this is the map of the given native context's global proxy.
- bool IsMapOfGlobalProxy(Handle<NativeContext> native_context) const;
-
private:
// This byte encodes either the instance size without the in-object slack or
// the slack size in properties backing store.
@@ -925,7 +927,7 @@ class Map : public HeapObject {
Handle<LayoutDescriptor> full_layout_descriptor);
static void InstallDescriptors(
Isolate* isolate, Handle<Map> parent_map, Handle<Map> child_map,
- int new_descriptor, Handle<DescriptorArray> descriptors,
+ InternalIndex new_descriptor, Handle<DescriptorArray> descriptors,
Handle<LayoutDescriptor> full_layout_descriptor);
static Handle<Map> CopyAddDescriptor(Isolate* isolate, Handle<Map> map,
Descriptor* descriptor,
@@ -938,7 +940,8 @@ class Map : public HeapObject {
static Handle<Map> CopyReplaceDescriptor(Isolate* isolate, Handle<Map> map,
Handle<DescriptorArray> descriptors,
- Descriptor* descriptor, int index,
+ Descriptor* descriptor,
+ InternalIndex index,
TransitionFlag flag);
static Handle<Map> CopyNormalized(Isolate* isolate, Handle<Map> map,
PropertyNormalizationMode mode);
@@ -951,22 +954,24 @@ class Map : public HeapObject {
// Update field type of the given descriptor to new representation and new
// type. The type must be prepared for storing in descriptor array:
// it must be either a simple type or a map wrapped in a weak cell.
- void UpdateFieldType(Isolate* isolate, int descriptor_number,
+ void UpdateFieldType(Isolate* isolate, InternalIndex descriptor_number,
Handle<Name> name, PropertyConstness new_constness,
Representation new_representation,
const MaybeObjectHandle& new_wrapped_type);
// TODO(ishell): Move to MapUpdater.
- void PrintReconfiguration(Isolate* isolate, FILE* file, int modify_index,
- PropertyKind kind, PropertyAttributes attributes);
+ void PrintReconfiguration(Isolate* isolate, FILE* file,
+ InternalIndex modify_index, PropertyKind kind,
+ PropertyAttributes attributes);
// TODO(ishell): Move to MapUpdater.
void PrintGeneralization(
- Isolate* isolate, FILE* file, const char* reason, int modify_index,
- int split, int descriptors, bool constant_to_field,
- Representation old_representation, Representation new_representation,
- PropertyConstness old_constness, PropertyConstness new_constness,
- MaybeHandle<FieldType> old_field_type, MaybeHandle<Object> old_value,
- MaybeHandle<FieldType> new_field_type, MaybeHandle<Object> new_value);
+ Isolate* isolate, FILE* file, const char* reason,
+ InternalIndex modify_index, int split, int descriptors,
+ bool constant_to_field, Representation old_representation,
+ Representation new_representation, PropertyConstness old_constness,
+ PropertyConstness new_constness, MaybeHandle<FieldType> old_field_type,
+ MaybeHandle<Object> old_value, MaybeHandle<FieldType> new_field_type,
+ MaybeHandle<Object> new_value);
// Use the high-level instance_descriptors/SetInstanceDescriptors instead.
DECL_ACCESSORS(synchronized_instance_descriptors, DescriptorArray)
diff --git a/deps/v8/src/objects/module-inl.h b/deps/v8/src/objects/module-inl.h
index ac54516376..aaf790cc8a 100644
--- a/deps/v8/src/objects/module-inl.h
+++ b/deps/v8/src/objects/module-inl.h
@@ -38,9 +38,17 @@ SMI_ACCESSORS(Module, hash, kHashOffset)
TQ_SMI_ACCESSORS(SourceTextModule, dfs_index)
TQ_SMI_ACCESSORS(SourceTextModule, dfs_ancestor_index)
+TQ_SMI_ACCESSORS(SourceTextModule, flags)
+BOOL_ACCESSORS(SourceTextModule, flags, async, kAsyncBit)
+BOOL_ACCESSORS(SourceTextModule, flags, async_evaluating, kAsyncEvaluatingBit)
+TQ_SMI_ACCESSORS(SourceTextModule, pending_async_dependencies)
+ACCESSORS(SourceTextModule, async_parent_modules, ArrayList,
+ kAsyncParentModulesOffset)
+ACCESSORS(SourceTextModule, top_level_capability, HeapObject,
+ kTopLevelCapabilityOffset)
SourceTextModuleInfo SourceTextModule::info() const {
- return (status() >= kEvaluating)
+ return status() == kErrored
? SourceTextModuleInfo::cast(code())
: GetSharedFunctionInfo().scope_info().ModuleDescriptorInfo();
}
@@ -112,6 +120,37 @@ class UnorderedModuleSet
ZoneAllocator<Handle<Module>>(zone)) {}
};
+void SourceTextModule::AddAsyncParentModule(Isolate* isolate,
+ Handle<SourceTextModule> module) {
+ Handle<ArrayList> new_array_list =
+ ArrayList::Add(isolate, handle(async_parent_modules(), isolate), module);
+ set_async_parent_modules(*new_array_list);
+}
+
+Handle<SourceTextModule> SourceTextModule::GetAsyncParentModule(
+ Isolate* isolate, int index) {
+ Handle<SourceTextModule> module(
+ SourceTextModule::cast(async_parent_modules().Get(index)), isolate);
+ return module;
+}
+
+int SourceTextModule::AsyncParentModuleCount() {
+ return async_parent_modules().Length();
+}
+
+bool SourceTextModule::HasPendingAsyncDependencies() {
+ DCHECK_GE(pending_async_dependencies(), 0);
+ return pending_async_dependencies() > 0;
+}
+
+void SourceTextModule::IncrementPendingAsyncDependencies() {
+ set_pending_async_dependencies(pending_async_dependencies() + 1);
+}
+
+void SourceTextModule::DecrementPendingAsyncDependencies() {
+ set_pending_async_dependencies(pending_async_dependencies() - 1);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/module.cc b/deps/v8/src/objects/module.cc
index 60b9145d10..9c37de0c85 100644
--- a/deps/v8/src/objects/module.cc
+++ b/deps/v8/src/objects/module.cc
@@ -10,6 +10,7 @@
#include "src/api/api-inl.h"
#include "src/ast/modules.h"
#include "src/builtins/accessors.h"
+#include "src/heap/heap-inl.h"
#include "src/objects/cell-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-generator-inl.h"
@@ -50,12 +51,14 @@ void Module::SetStatus(Status new_status) {
set_status(new_status);
}
-void Module::RecordError(Isolate* isolate) {
- DisallowHeapAllocation no_alloc;
- DCHECK(exception().IsTheHole(isolate));
- Object the_exception = isolate->pending_exception();
- DCHECK(!the_exception.IsTheHole(isolate));
+void Module::RecordErrorUsingPendingException(Isolate* isolate) {
+ Handle<Object> the_exception(isolate->pending_exception(), isolate);
+ RecordError(isolate, the_exception);
+}
+void Module::RecordError(Isolate* isolate, Handle<Object> error) {
+ DCHECK(exception().IsTheHole(isolate));
+ DCHECK(!error->IsTheHole(isolate));
if (this->IsSourceTextModule()) {
Handle<SourceTextModule> self(SourceTextModule::cast(*this), GetIsolate());
self->set_code(self->info());
@@ -64,7 +67,7 @@ void Module::RecordError(Isolate* isolate) {
PrintStatusTransition(Module::kErrored);
#endif // DEBUG
set_status(Module::kErrored);
- set_exception(the_exception);
+ set_exception(*error);
}
void Module::ResetGraph(Isolate* isolate, Handle<Module> module) {
@@ -244,46 +247,35 @@ MaybeHandle<Object> Module::Evaluate(Isolate* isolate, Handle<Module> module) {
#endif // OBJECT_PRINT
}
#endif // DEBUG
- if (module->status() == kErrored) {
- isolate->Throw(module->GetException());
- return MaybeHandle<Object>();
- }
- DCHECK_NE(module->status(), kEvaluating);
- DCHECK_GE(module->status(), kInstantiated);
- Zone zone(isolate->allocator(), ZONE_NAME);
-
- ZoneForwardList<Handle<SourceTextModule>> stack(&zone);
- unsigned dfs_index = 0;
- Handle<Object> result;
- if (!Evaluate(isolate, module, &stack, &dfs_index).ToHandle(&result)) {
- for (auto& descendant : stack) {
- DCHECK_EQ(descendant->status(), kEvaluating);
- descendant->RecordError(isolate);
- }
- DCHECK_EQ(module->GetException(), isolate->pending_exception());
- return MaybeHandle<Object>();
+ STACK_CHECK(isolate, MaybeHandle<Object>());
+ if (FLAG_harmony_top_level_await && module->IsSourceTextModule()) {
+ return SourceTextModule::EvaluateMaybeAsync(
+ isolate, Handle<SourceTextModule>::cast(module));
+ } else {
+ return Module::InnerEvaluate(isolate, module);
}
- DCHECK_EQ(module->status(), kEvaluated);
- DCHECK(stack.empty());
- return result;
}
-MaybeHandle<Object> Module::Evaluate(
- Isolate* isolate, Handle<Module> module,
- ZoneForwardList<Handle<SourceTextModule>>* stack, unsigned* dfs_index) {
+MaybeHandle<Object> Module::InnerEvaluate(Isolate* isolate,
+ Handle<Module> module) {
if (module->status() == kErrored) {
isolate->Throw(module->GetException());
return MaybeHandle<Object>();
- }
- if (module->status() >= kEvaluating) {
+ } else if (module->status() == kEvaluated) {
return isolate->factory()->undefined_value();
}
- DCHECK_EQ(module->status(), kInstantiated);
- STACK_CHECK(isolate, MaybeHandle<Object>());
+
+ // InnerEvaluate can be called both to evaluate top level modules without
+ // the harmony_top_level_await flag and recursively to evaluate
+ // SyntheticModules in the dependency graphs of SourceTextModules.
+ //
+ // However, SyntheticModules transition directly to 'Evaluated,' so we should
+ // never see an 'Evaluating' module at this point.
+ CHECK_EQ(module->status(), kInstantiated);
if (module->IsSourceTextModule()) {
- return SourceTextModule::Evaluate(
- isolate, Handle<SourceTextModule>::cast(module), stack, dfs_index);
+ return SourceTextModule::Evaluate(isolate,
+ Handle<SourceTextModule>::cast(module));
} else {
return SyntheticModule::Evaluate(isolate,
Handle<SyntheticModule>::cast(module));
diff --git a/deps/v8/src/objects/module.h b/deps/v8/src/objects/module.h
index 08badf0357..d0ea22e6e5 100644
--- a/deps/v8/src/objects/module.h
+++ b/deps/v8/src/objects/module.h
@@ -112,18 +112,19 @@ class Module : public HeapObject {
ZoneForwardList<Handle<SourceTextModule>>* stack, unsigned* dfs_index,
Zone* zone);
- static V8_WARN_UNUSED_RESULT MaybeHandle<Object> Evaluate(
- Isolate* isolate, Handle<Module> module,
- ZoneForwardList<Handle<SourceTextModule>>* stack, unsigned* dfs_index);
+ static V8_WARN_UNUSED_RESULT MaybeHandle<Object> InnerEvaluate(
+ Isolate* isolate, Handle<Module> module);
// Set module's status back to kUninstantiated and reset other internal state.
// This is used when instantiation fails.
static void Reset(Isolate* isolate, Handle<Module> module);
static void ResetGraph(Isolate* isolate, Handle<Module> module);
- // To set status to kErrored, RecordError should be used.
+ // To set status to kErrored, RecordError or RecordErrorUsingPendingException
+ // should be used.
void SetStatus(Status status);
- void RecordError(Isolate* isolate);
+ void RecordErrorUsingPendingException(Isolate* isolate);
+ void RecordError(Isolate* isolate, Handle<Object> error);
#ifdef DEBUG
// For --trace-module-status.
@@ -137,7 +138,8 @@ class Module : public HeapObject {
// JSModuleNamespace object (representing module "bar") is created and bound to
// the declared variable (foo). A module can have at most one namespace object.
class JSModuleNamespace
- : public TorqueGeneratedJSModuleNamespace<JSModuleNamespace, JSObject> {
+ : public TorqueGeneratedJSModuleNamespace<JSModuleNamespace,
+ JSSpecialObject> {
public:
DECL_PRINTER(JSModuleNamespace)
diff --git a/deps/v8/src/objects/name-inl.h b/deps/v8/src/objects/name-inl.h
index b76ae245a2..88ae2feea5 100644
--- a/deps/v8/src/objects/name-inl.h
+++ b/deps/v8/src/objects/name-inl.h
@@ -9,6 +9,7 @@
#include "src/heap/heap-write-barrier-inl.h"
#include "src/objects/map-inl.h"
+#include "src/objects/primitive-heap-object-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -100,6 +101,10 @@ bool Name::AsArrayIndex(uint32_t* index) {
return IsString() && String::cast(*this).AsArrayIndex(index);
}
+bool Name::AsIntegerIndex(size_t* index) {
+ return IsString() && String::cast(*this).AsIntegerIndex(index);
+}
+
// static
bool Name::ContainsCachedArrayIndex(uint32_t hash) {
return (hash & Name::kDoesNotContainCachedArrayIndexMask) == 0;
diff --git a/deps/v8/src/objects/name.h b/deps/v8/src/objects/name.h
index a02bb3d794..386b9ec055 100644
--- a/deps/v8/src/objects/name.h
+++ b/deps/v8/src/objects/name.h
@@ -5,9 +5,8 @@
#ifndef V8_OBJECTS_NAME_H_
#define V8_OBJECTS_NAME_H_
-#include "src/objects/heap-object.h"
#include "src/objects/objects.h"
-#include "torque-generated/class-definitions-tq.h"
+#include "src/objects/primitive-heap-object.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -17,7 +16,7 @@ namespace internal {
// The Name abstract class captures anything that can be used as a property
// name, i.e., strings and symbols. All names store a hash value.
-class Name : public TorqueGeneratedName<Name, HeapObject> {
+class Name : public TorqueGeneratedName<Name, PrimitiveHeapObject> {
public:
// Tells whether the hash code has been computed.
inline bool HasHashCode();
@@ -32,6 +31,7 @@ class Name : public TorqueGeneratedName<Name, HeapObject> {
// Conversion.
inline bool AsArrayIndex(uint32_t* index);
+ inline bool AsIntegerIndex(size_t* index);
// An "interesting symbol" is a well-known symbol, like @@toStringTag,
// that's often looked up on random objects but is usually not present.
@@ -73,7 +73,8 @@ class Name : public TorqueGeneratedName<Name, HeapObject> {
// array index.
static const int kHashNotComputedMask = 1;
static const int kIsNotArrayIndexMask = 1 << 1;
- static const int kNofHashBitFields = 2;
+ static const int kIsNotIntegerIndexMask = 1 << 2;
+ static const int kNofHashBitFields = 3;
// Shift constant retrieving hash code from hash field.
static const int kHashShift = kNofHashBitFields;
@@ -88,6 +89,14 @@ class Name : public TorqueGeneratedName<Name, HeapObject> {
// Maximum number of characters to consider when trying to convert a string
// value into an array index.
static const int kMaxArrayIndexSize = 10;
+ // Maximum number of characters that might be parsed into a size_t:
+ // 10 characters per 32 bits of size_t width.
+ // We choose this as large as possible (rather than MAX_SAFE_INTEGER range)
+ // because TypedArray accesses will treat all string keys that are
+ // canonical representations of numbers in the range [MAX_SAFE_INTEGER ..
+ // size_t::max] as out-of-bounds accesses, and we can handle those in the
+ // fast path if we tag them as such (see kIsNotIntegerIndexMask).
+ static const int kMaxIntegerIndexSize = 10 * (sizeof(size_t) / 4);
// For strings which are array indexes the hash value has the string length
// mixed into the hash, mainly to avoid a hash value of zero which would be
@@ -120,7 +129,7 @@ class Name : public TorqueGeneratedName<Name, HeapObject> {
// Value of empty hash field indicating that the hash is not computed.
static const int kEmptyHashField =
- kIsNotArrayIndexMask | kHashNotComputedMask;
+ kIsNotIntegerIndexMask | kIsNotArrayIndexMask | kHashNotComputedMask;
protected:
static inline bool IsHashFieldComputed(uint32_t field);
diff --git a/deps/v8/src/objects/object-list-macros.h b/deps/v8/src/objects/object-list-macros.h
index d5bce62d43..09b1bdc5f0 100644
--- a/deps/v8/src/objects/object-list-macros.h
+++ b/deps/v8/src/objects/object-list-macros.h
@@ -41,6 +41,7 @@ class HeapNumber;
class ObjectHashTable;
class ObjectTemplateInfo;
class ObjectVisitor;
+class OSROptimizedCodeCache;
class PreparseData;
class PropertyArray;
class PropertyCell;
@@ -138,12 +139,14 @@ class ZoneForwardList;
V(JSCollection) \
V(JSCollectionIterator) \
V(JSContextExtensionObject) \
+ V(JSCustomElementsObject) \
V(JSDataView) \
V(JSDate) \
V(JSError) \
V(JSFinalizationGroup) \
V(JSFinalizationGroupCleanupIterator) \
V(JSFunction) \
+ V(JSFunctionOrBoundFunction) \
V(JSGeneratorObject) \
V(JSGlobalObject) \
V(JSGlobalProxy) \
@@ -158,10 +161,12 @@ class ZoneForwardList;
V(JSReceiver) \
V(JSRegExp) \
V(JSRegExpResult) \
+ V(JSRegExpResultIndices) \
V(JSRegExpStringIterator) \
V(JSSet) \
V(JSSetIterator) \
V(JSSloppyArgumentsObject) \
+ V(JSSpecialObject) \
V(JSStringIterator) \
V(JSTypedArray) \
V(JSWeakCollection) \
@@ -185,7 +190,9 @@ class ZoneForwardList;
V(OrderedHashMap) \
V(OrderedHashSet) \
V(OrderedNameDictionary) \
+ V(OSROptimizedCodeCache) \
V(PreparseData) \
+ V(PrimitiveHeapObject) \
V(PromiseReactionJobTask) \
V(PropertyArray) \
V(PropertyCell) \
@@ -225,6 +232,7 @@ class ZoneForwardList;
V(Undetectable) \
V(UniqueName) \
V(WasmExceptionObject) \
+ V(WasmExceptionPackage) \
V(WasmGlobalObject) \
V(WasmInstanceObject) \
V(WasmMemoryObject) \
diff --git a/deps/v8/src/objects/objects-body-descriptors-inl.h b/deps/v8/src/objects/objects-body-descriptors-inl.h
index 4c980b2697..68164fdce6 100644
--- a/deps/v8/src/objects/objects-body-descriptors-inl.h
+++ b/deps/v8/src/objects/objects-body-descriptors-inl.h
@@ -913,7 +913,7 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
return Op::template apply<FeedbackVector::BodyDescriptor>(p1, p2, p3, p4);
case JS_OBJECT_TYPE:
case JS_ERROR_TYPE:
- case JS_ARGUMENTS_TYPE:
+ case JS_ARGUMENTS_OBJECT_TYPE:
case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE:
case JS_PROMISE_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
@@ -933,8 +933,8 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
case JS_MAP_KEY_VALUE_ITERATOR_TYPE:
case JS_MAP_VALUE_ITERATOR_TYPE:
case JS_STRING_ITERATOR_TYPE:
- case JS_REGEXP_STRING_ITERATOR_TYPE:
- case JS_REGEXP_TYPE:
+ case JS_REG_EXP_STRING_ITERATOR_TYPE:
+ case JS_REG_EXP_TYPE:
case JS_GLOBAL_PROXY_TYPE:
case JS_GLOBAL_OBJECT_TYPE:
case JS_API_OBJECT_TYPE:
@@ -944,24 +944,24 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
case JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_TYPE:
case JS_FINALIZATION_GROUP_TYPE:
#ifdef V8_INTL_SUPPORT
- case JS_INTL_V8_BREAK_ITERATOR_TYPE:
- case JS_INTL_COLLATOR_TYPE:
- case JS_INTL_DATE_TIME_FORMAT_TYPE:
- case JS_INTL_LIST_FORMAT_TYPE:
- case JS_INTL_LOCALE_TYPE:
- case JS_INTL_NUMBER_FORMAT_TYPE:
- case JS_INTL_PLURAL_RULES_TYPE:
- case JS_INTL_RELATIVE_TIME_FORMAT_TYPE:
- case JS_INTL_SEGMENT_ITERATOR_TYPE:
- case JS_INTL_SEGMENTER_TYPE:
+ case JS_V8_BREAK_ITERATOR_TYPE:
+ case JS_COLLATOR_TYPE:
+ case JS_DATE_TIME_FORMAT_TYPE:
+ case JS_LIST_FORMAT_TYPE:
+ case JS_LOCALE_TYPE:
+ case JS_NUMBER_FORMAT_TYPE:
+ case JS_PLURAL_RULES_TYPE:
+ case JS_RELATIVE_TIME_FORMAT_TYPE:
+ case JS_SEGMENT_ITERATOR_TYPE:
+ case JS_SEGMENTER_TYPE:
#endif // V8_INTL_SUPPORT
- case WASM_EXCEPTION_TYPE:
- case WASM_GLOBAL_TYPE:
- case WASM_MEMORY_TYPE:
- case WASM_MODULE_TYPE:
- case WASM_TABLE_TYPE:
+ case WASM_EXCEPTION_OBJECT_TYPE:
+ case WASM_GLOBAL_OBJECT_TYPE:
+ case WASM_MEMORY_OBJECT_TYPE:
+ case WASM_MODULE_OBJECT_TYPE:
+ case WASM_TABLE_OBJECT_TYPE:
return Op::template apply<JSObject::BodyDescriptor>(p1, p2, p3, p4);
- case WASM_INSTANCE_TYPE:
+ case WASM_INSTANCE_OBJECT_TYPE:
return Op::template apply<WasmInstanceObject::BodyDescriptor>(p1, p2, p3,
p4);
case JS_WEAK_MAP_TYPE:
diff --git a/deps/v8/src/objects/objects-definitions.h b/deps/v8/src/objects/objects-definitions.h
index b346b5b7d1..53354014e9 100644
--- a/deps/v8/src/objects/objects-definitions.h
+++ b/deps/v8/src/objects/objects-definitions.h
@@ -32,15 +32,7 @@ namespace internal {
// instance_types that are less than those of all other types:
// HeapObject::Size, HeapObject::IterateBody, the typeof operator, and
// Object::IsString.
-//
-// NOTE: Everything following JS_PRIMITIVE_WRAPPER_TYPE is considered a
-// JSObject for GC purposes. The first four entries here have typeof
-// 'object', whereas JS_FUNCTION_TYPE has typeof 'function'.
-//
-// NOTE: List had to be split into two, because of conditional item(s) from
-// INTL namespace. They can't just be appended to the end, because of the
-// checks we do in tests (expecting JS_FUNCTION_TYPE to be last).
-#define INSTANCE_TYPE_LIST_BEFORE_INTL(V) \
+#define INSTANCE_TYPE_LIST_BASE(V) \
V(INTERNALIZED_STRING_TYPE) \
V(EXTERNAL_INTERNALIZED_STRING_TYPE) \
V(ONE_BYTE_INTERNALIZED_STRING_TYPE) \
@@ -58,191 +50,11 @@ namespace internal {
V(SLICED_ONE_BYTE_STRING_TYPE) \
V(THIN_ONE_BYTE_STRING_TYPE) \
V(UNCACHED_EXTERNAL_STRING_TYPE) \
- V(UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE) \
- \
- V(SYMBOL_TYPE) \
- V(HEAP_NUMBER_TYPE) \
- V(BIGINT_TYPE) \
- V(ODDBALL_TYPE) \
- \
- V(MAP_TYPE) \
- V(CODE_TYPE) \
- V(FOREIGN_TYPE) \
- V(BYTE_ARRAY_TYPE) \
- V(BYTECODE_ARRAY_TYPE) \
- V(FREE_SPACE_TYPE) \
- \
- V(FIXED_DOUBLE_ARRAY_TYPE) \
- V(FEEDBACK_METADATA_TYPE) \
- V(FILLER_TYPE) \
- \
- V(ACCESS_CHECK_INFO_TYPE) \
- V(ACCESSOR_INFO_TYPE) \
- V(ACCESSOR_PAIR_TYPE) \
- V(ALIASED_ARGUMENTS_ENTRY_TYPE) \
- V(ALLOCATION_MEMENTO_TYPE) \
- V(ARRAY_BOILERPLATE_DESCRIPTION_TYPE) \
- V(ASM_WASM_DATA_TYPE) \
- V(ASYNC_GENERATOR_REQUEST_TYPE) \
- V(CLASS_POSITIONS_TYPE) \
- V(DEBUG_INFO_TYPE) \
- V(ENUM_CACHE_TYPE) \
- V(FUNCTION_TEMPLATE_INFO_TYPE) \
- V(FUNCTION_TEMPLATE_RARE_DATA_TYPE) \
- V(INTERCEPTOR_INFO_TYPE) \
- V(INTERPRETER_DATA_TYPE) \
- V(OBJECT_TEMPLATE_INFO_TYPE) \
- V(PROMISE_CAPABILITY_TYPE) \
- V(PROMISE_REACTION_TYPE) \
- V(PROTOTYPE_INFO_TYPE) \
- V(SCRIPT_TYPE) \
- V(SOURCE_POSITION_TABLE_WITH_FRAME_CACHE_TYPE) \
- V(SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE) \
- V(STACK_FRAME_INFO_TYPE) \
- V(STACK_TRACE_FRAME_TYPE) \
- V(TEMPLATE_OBJECT_DESCRIPTION_TYPE) \
- V(TUPLE2_TYPE) \
- V(TUPLE3_TYPE) \
- V(WASM_CAPI_FUNCTION_DATA_TYPE) \
- V(WASM_DEBUG_INFO_TYPE) \
- V(WASM_EXCEPTION_TAG_TYPE) \
- V(WASM_EXPORTED_FUNCTION_DATA_TYPE) \
- V(WASM_INDIRECT_FUNCTION_TABLE_TYPE) \
- V(WASM_JS_FUNCTION_DATA_TYPE) \
- \
- V(CALLABLE_TASK_TYPE) \
- V(CALLBACK_TASK_TYPE) \
- V(PROMISE_FULFILL_REACTION_JOB_TASK_TYPE) \
- V(PROMISE_REJECT_REACTION_JOB_TASK_TYPE) \
- V(PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE) \
- \
- TORQUE_DEFINED_INSTANCE_TYPES(V) \
- \
- V(SOURCE_TEXT_MODULE_TYPE) \
- V(SYNTHETIC_MODULE_TYPE) \
- \
- V(ALLOCATION_SITE_TYPE) \
- V(EMBEDDER_DATA_ARRAY_TYPE) \
- \
- V(FIXED_ARRAY_TYPE) \
- V(OBJECT_BOILERPLATE_DESCRIPTION_TYPE) \
- V(CLOSURE_FEEDBACK_CELL_ARRAY_TYPE) \
- V(HASH_TABLE_TYPE) \
- V(ORDERED_HASH_MAP_TYPE) \
- V(ORDERED_HASH_SET_TYPE) \
- V(ORDERED_NAME_DICTIONARY_TYPE) \
- V(NAME_DICTIONARY_TYPE) \
- V(GLOBAL_DICTIONARY_TYPE) \
- V(NUMBER_DICTIONARY_TYPE) \
- V(SIMPLE_NUMBER_DICTIONARY_TYPE) \
- V(STRING_TABLE_TYPE) \
- V(EPHEMERON_HASH_TABLE_TYPE) \
- V(SCOPE_INFO_TYPE) \
- V(SCRIPT_CONTEXT_TABLE_TYPE) \
- \
- V(AWAIT_CONTEXT_TYPE) \
- V(BLOCK_CONTEXT_TYPE) \
- V(CATCH_CONTEXT_TYPE) \
- V(DEBUG_EVALUATE_CONTEXT_TYPE) \
- V(EVAL_CONTEXT_TYPE) \
- V(FUNCTION_CONTEXT_TYPE) \
- V(MODULE_CONTEXT_TYPE) \
- V(NATIVE_CONTEXT_TYPE) \
- V(SCRIPT_CONTEXT_TYPE) \
- V(WITH_CONTEXT_TYPE) \
- \
- V(WEAK_FIXED_ARRAY_TYPE) \
- V(TRANSITION_ARRAY_TYPE) \
- \
- V(CALL_HANDLER_INFO_TYPE) \
- V(CELL_TYPE) \
- V(CODE_DATA_CONTAINER_TYPE) \
- V(DESCRIPTOR_ARRAY_TYPE) \
- V(FEEDBACK_CELL_TYPE) \
- V(FEEDBACK_VECTOR_TYPE) \
- V(LOAD_HANDLER_TYPE) \
- V(PREPARSE_DATA_TYPE) \
- V(PROPERTY_ARRAY_TYPE) \
- V(PROPERTY_CELL_TYPE) \
- V(SHARED_FUNCTION_INFO_TYPE) \
- V(SMALL_ORDERED_HASH_MAP_TYPE) \
- V(SMALL_ORDERED_HASH_SET_TYPE) \
- V(SMALL_ORDERED_NAME_DICTIONARY_TYPE) \
- V(STORE_HANDLER_TYPE) \
- V(UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE) \
- V(UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE) \
- V(WEAK_ARRAY_LIST_TYPE) \
- V(WEAK_CELL_TYPE) \
- \
- V(JS_PROXY_TYPE) \
- V(JS_GLOBAL_OBJECT_TYPE) \
- V(JS_GLOBAL_PROXY_TYPE) \
- V(JS_MODULE_NAMESPACE_TYPE) \
- V(JS_SPECIAL_API_OBJECT_TYPE) \
- V(JS_PRIMITIVE_WRAPPER_TYPE) \
- V(JS_API_OBJECT_TYPE) \
- V(JS_OBJECT_TYPE) \
- \
- V(JS_ARGUMENTS_TYPE) \
- V(JS_ARRAY_BUFFER_TYPE) \
- V(JS_ARRAY_ITERATOR_TYPE) \
- V(JS_ARRAY_TYPE) \
- V(JS_ASYNC_FROM_SYNC_ITERATOR_TYPE) \
- V(JS_ASYNC_FUNCTION_OBJECT_TYPE) \
- V(JS_ASYNC_GENERATOR_OBJECT_TYPE) \
- V(JS_CONTEXT_EXTENSION_OBJECT_TYPE) \
- V(JS_DATE_TYPE) \
- V(JS_ERROR_TYPE) \
- V(JS_GENERATOR_OBJECT_TYPE) \
- V(JS_MAP_TYPE) \
- V(JS_MAP_KEY_ITERATOR_TYPE) \
- V(JS_MAP_KEY_VALUE_ITERATOR_TYPE) \
- V(JS_MAP_VALUE_ITERATOR_TYPE) \
- V(JS_MESSAGE_OBJECT_TYPE) \
- V(JS_PROMISE_TYPE) \
- V(JS_REGEXP_TYPE) \
- V(JS_REGEXP_STRING_ITERATOR_TYPE) \
- V(JS_SET_TYPE) \
- V(JS_SET_KEY_VALUE_ITERATOR_TYPE) \
- V(JS_SET_VALUE_ITERATOR_TYPE) \
- V(JS_STRING_ITERATOR_TYPE) \
- V(JS_WEAK_REF_TYPE) \
- V(JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_TYPE) \
- V(JS_FINALIZATION_GROUP_TYPE) \
- V(JS_WEAK_MAP_TYPE) \
- V(JS_WEAK_SET_TYPE) \
- V(JS_TYPED_ARRAY_TYPE) \
- V(JS_DATA_VIEW_TYPE)
+ V(UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE)
-#define INSTANCE_TYPE_LIST_AFTER_INTL(V) \
- V(WASM_EXCEPTION_TYPE) \
- V(WASM_GLOBAL_TYPE) \
- V(WASM_INSTANCE_TYPE) \
- V(WASM_MEMORY_TYPE) \
- V(WASM_MODULE_TYPE) \
- V(WASM_TABLE_TYPE) \
- V(JS_BOUND_FUNCTION_TYPE) \
- V(JS_FUNCTION_TYPE)
-
-#ifdef V8_INTL_SUPPORT
-#define INSTANCE_TYPE_LIST(V) \
- INSTANCE_TYPE_LIST_BEFORE_INTL(V) \
- V(JS_INTL_V8_BREAK_ITERATOR_TYPE) \
- V(JS_INTL_COLLATOR_TYPE) \
- V(JS_INTL_DATE_TIME_FORMAT_TYPE) \
- V(JS_INTL_LIST_FORMAT_TYPE) \
- V(JS_INTL_LOCALE_TYPE) \
- V(JS_INTL_NUMBER_FORMAT_TYPE) \
- V(JS_INTL_PLURAL_RULES_TYPE) \
- V(JS_INTL_RELATIVE_TIME_FORMAT_TYPE) \
- V(JS_INTL_SEGMENT_ITERATOR_TYPE) \
- V(JS_INTL_SEGMENTER_TYPE) \
- INSTANCE_TYPE_LIST_AFTER_INTL(V)
-#else
-#define INSTANCE_TYPE_LIST(V) \
- INSTANCE_TYPE_LIST_BEFORE_INTL(V) \
- INSTANCE_TYPE_LIST_AFTER_INTL(V)
-#endif // V8_INTL_SUPPORT
+#define INSTANCE_TYPE_LIST(V) \
+ INSTANCE_TYPE_LIST_BASE(V) \
+ TORQUE_ASSIGNED_INSTANCE_TYPE_LIST(V)
// Since string types are not consecutive, this macro is used to
// iterate over them.
@@ -290,11 +102,20 @@ namespace internal {
// code for the class including allocation and garbage collection routines,
// casts and predicates. All you need to define is the class, methods and
// object verification routines. Easy, no?
-//
-// Note that for subtle reasons related to the ordering or numerical values of
-// type tags, elements in this list have to be added to the INSTANCE_TYPE_LIST
-// manually.
-#define STRUCT_LIST_GENERATOR(V, _) \
+#define STRUCT_LIST_GENERATOR_BASE(V, _) \
+ V(_, PROMISE_FULFILL_REACTION_JOB_TASK_TYPE, PromiseFulfillReactionJobTask, \
+ promise_fulfill_reaction_job_task) \
+ V(_, PROMISE_REJECT_REACTION_JOB_TASK_TYPE, PromiseRejectReactionJobTask, \
+ promise_reject_reaction_job_task) \
+ V(_, CALLABLE_TASK_TYPE, CallableTask, callable_task) \
+ V(_, CALLBACK_TASK_TYPE, CallbackTask, callback_task) \
+ V(_, PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE, PromiseResolveThenableJobTask, \
+ promise_resolve_thenable_job_task) \
+ V(_, FUNCTION_TEMPLATE_INFO_TYPE, FunctionTemplateInfo, \
+ function_template_info) \
+ V(_, OBJECT_TEMPLATE_INFO_TYPE, ObjectTemplateInfo, object_template_info) \
+ V(_, TUPLE2_TYPE, Tuple2, tuple2) \
+ V(_, TUPLE3_TYPE, Tuple3, tuple3) \
V(_, ACCESS_CHECK_INFO_TYPE, AccessCheckInfo, access_check_info) \
V(_, ACCESSOR_INFO_TYPE, AccessorInfo, accessor_info) \
V(_, ACCESSOR_PAIR_TYPE, AccessorPair, accessor_pair) \
@@ -309,13 +130,10 @@ namespace internal {
V(_, CLASS_POSITIONS_TYPE, ClassPositions, class_positions) \
V(_, DEBUG_INFO_TYPE, DebugInfo, debug_info) \
V(_, ENUM_CACHE_TYPE, EnumCache, enum_cache) \
- V(_, FUNCTION_TEMPLATE_INFO_TYPE, FunctionTemplateInfo, \
- function_template_info) \
V(_, FUNCTION_TEMPLATE_RARE_DATA_TYPE, FunctionTemplateRareData, \
function_template_rare_data) \
V(_, INTERCEPTOR_INFO_TYPE, InterceptorInfo, interceptor_info) \
V(_, INTERPRETER_DATA_TYPE, InterpreterData, interpreter_data) \
- V(_, OBJECT_TEMPLATE_INFO_TYPE, ObjectTemplateInfo, object_template_info) \
V(_, PROMISE_CAPABILITY_TYPE, PromiseCapability, promise_capability) \
V(_, PROMISE_REACTION_TYPE, PromiseReaction, promise_reaction) \
V(_, PROTOTYPE_INFO_TYPE, PrototypeInfo, prototype_info) \
@@ -328,8 +146,6 @@ namespace internal {
V(_, STACK_TRACE_FRAME_TYPE, StackTraceFrame, stack_trace_frame) \
V(_, TEMPLATE_OBJECT_DESCRIPTION_TYPE, TemplateObjectDescription, \
template_object_description) \
- V(_, TUPLE2_TYPE, Tuple2, tuple2) \
- V(_, TUPLE3_TYPE, Tuple3, tuple3) \
V(_, WASM_CAPI_FUNCTION_DATA_TYPE, WasmCapiFunctionData, \
wasm_capi_function_data) \
V(_, WASM_DEBUG_INFO_TYPE, WasmDebugInfo, wasm_debug_info) \
@@ -338,32 +154,24 @@ namespace internal {
wasm_exported_function_data) \
V(_, WASM_INDIRECT_FUNCTION_TABLE_TYPE, WasmIndirectFunctionTable, \
wasm_indirect_function_table) \
- V(_, WASM_JS_FUNCTION_DATA_TYPE, WasmJSFunctionData, wasm_js_function_data) \
- V(_, CALLABLE_TASK_TYPE, CallableTask, callable_task) \
- V(_, CALLBACK_TASK_TYPE, CallbackTask, callback_task) \
- V(_, PROMISE_FULFILL_REACTION_JOB_TASK_TYPE, PromiseFulfillReactionJobTask, \
- promise_fulfill_reaction_job_task) \
- V(_, PROMISE_REJECT_REACTION_JOB_TASK_TYPE, PromiseRejectReactionJobTask, \
- promise_reject_reaction_job_task) \
- V(_, PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE, PromiseResolveThenableJobTask, \
- promise_resolve_thenable_job_task)
+ V(_, WASM_JS_FUNCTION_DATA_TYPE, WasmJSFunctionData, wasm_js_function_data)
+
+#define STRUCT_LIST_GENERATOR(V, _) \
+ STRUCT_LIST_GENERATOR_BASE(V, _) \
+ TORQUE_STRUCT_LIST_GENERATOR(V, _)
// Adapts one STRUCT_LIST_GENERATOR entry to the STRUCT_LIST entry
#define STRUCT_LIST_ADAPTER(V, NAME, Name, name) V(NAME, Name, name)
// Produces (NAME, Name, name) entries.
-#define STRUCT_LIST(V) \
- STRUCT_LIST_GENERATOR(STRUCT_LIST_ADAPTER, V) \
- TORQUE_STRUCT_LIST_GENERATOR(STRUCT_LIST_ADAPTER, V)
+#define STRUCT_LIST(V) STRUCT_LIST_GENERATOR(STRUCT_LIST_ADAPTER, V)
// Adapts one STRUCT_LIST_GENERATOR entry to the STRUCT_MAPS_LIST entry
#define STRUCT_MAPS_LIST_ADAPTER(V, NAME, Name, name) \
V(Map, name##_map, Name##Map)
// Produces (Map, struct_name_map, StructNameMap) entries
-#define STRUCT_MAPS_LIST(V) \
- STRUCT_LIST_GENERATOR(STRUCT_MAPS_LIST_ADAPTER, V) \
- TORQUE_STRUCT_LIST_GENERATOR(STRUCT_MAPS_LIST_ADAPTER, V)
+#define STRUCT_MAPS_LIST(V) STRUCT_LIST_GENERATOR(STRUCT_MAPS_LIST_ADAPTER, V)
//
// The following macros define list of allocation size objects and list of
diff --git a/deps/v8/src/objects/objects-inl.h b/deps/v8/src/objects/objects-inl.h
index cf8c3ffad2..08f4a2b6f0 100644
--- a/deps/v8/src/objects/objects-inl.h
+++ b/deps/v8/src/objects/objects-inl.h
@@ -350,6 +350,13 @@ DEF_GETTER(HeapObject, IsDependentCode, bool) {
return true;
}
+DEF_GETTER(HeapObject, IsOSROptimizedCodeCache, bool) {
+ if (!IsWeakFixedArray(isolate)) return false;
+ // There's actually no way to see the difference between a weak fixed array
+ // and a osr optimized code cache.
+ return true;
+}
+
DEF_GETTER(HeapObject, IsAbstractCode, bool) {
return IsBytecodeArray(isolate) || IsCode(isolate);
}
@@ -411,6 +418,12 @@ DEF_GETTER(HeapObject, IsSmallOrderedHashTable, bool) {
IsSmallOrderedNameDictionary(isolate);
}
+DEF_GETTER(HeapObject, IsWasmExceptionPackage, bool) {
+ // It is not possible to check for the existence of certain properties on the
+ // underlying {JSReceiver} here because that requires calling handlified code.
+ return IsJSReceiver(isolate);
+}
+
bool Object::IsPrimitive() const {
if (IsSmi()) return true;
HeapObject this_heap_object = HeapObject::cast(*this);
@@ -506,7 +519,7 @@ bool Object::IsMinusZero() const {
OBJECT_CONSTRUCTORS_IMPL(RegExpMatchInfo, FixedArray)
OBJECT_CONSTRUCTORS_IMPL(ScopeInfo, FixedArray)
-OBJECT_CONSTRUCTORS_IMPL(BigIntBase, HeapObject)
+OBJECT_CONSTRUCTORS_IMPL(BigIntBase, PrimitiveHeapObject)
OBJECT_CONSTRUCTORS_IMPL(BigInt, BigIntBase)
OBJECT_CONSTRUCTORS_IMPL(FreshlyAllocatedBigInt, BigIntBase)
@@ -756,11 +769,13 @@ void HeapObject::set_map(Map value) {
#endif
}
set_map_word(MapWord::FromMap(value));
+#ifndef V8_DISABLE_WRITE_BARRIERS
if (!value.is_null()) {
// TODO(1600) We are passing kNullAddress as a slot because maps can never
// be on an evacuation candidate.
MarkingBarrier(*this, ObjectSlot(kNullAddress), value);
}
+#endif
}
DEF_GETTER(HeapObject, synchronized_map, Map) {
@@ -774,11 +789,13 @@ void HeapObject::synchronized_set_map(Map value) {
#endif
}
synchronized_set_map_word(MapWord::FromMap(value));
+#ifndef V8_DISABLE_WRITE_BARRIERS
if (!value.is_null()) {
// TODO(1600) We are passing kNullAddress as a slot because maps can never
// be on an evacuation candidate.
MarkingBarrier(*this, ObjectSlot(kNullAddress), value);
}
+#endif
}
// Unsafe accessor omitting write barrier.
@@ -793,12 +810,14 @@ void HeapObject::set_map_no_write_barrier(Map value) {
void HeapObject::set_map_after_allocation(Map value, WriteBarrierMode mode) {
set_map_word(MapWord::FromMap(value));
+#ifndef V8_DISABLE_WRITE_BARRIERS
if (mode != SKIP_WRITE_BARRIER) {
DCHECK(!value.is_null());
// TODO(1600) We are passing kNullAddress as a slot because maps can never
// be on an evacuation candidate.
MarkingBarrier(*this, ObjectSlot(kNullAddress), value);
}
+#endif
}
ObjectSlot HeapObject::map_slot() const {
diff --git a/deps/v8/src/objects/objects.cc b/deps/v8/src/objects/objects.cc
index 134cb3998a..ec4a8594f6 100644
--- a/deps/v8/src/objects/objects.cc
+++ b/deps/v8/src/objects/objects.cc
@@ -65,6 +65,7 @@
#include "src/objects/lookup-inl.h"
#include "src/objects/map-updater.h"
#include "src/objects/objects-body-descriptors-inl.h"
+#include "src/objects/property-details.h"
#include "src/utils/identity-map.h"
#ifdef V8_INTL_SUPPORT
#include "src/objects/js-break-iterator.h"
@@ -1770,7 +1771,7 @@ bool Object::IterationHasObservableEffects() {
// Check that the ArrayPrototype hasn't been modified in a way that would
// affect iteration.
- if (!isolate->IsArrayIteratorLookupChainIntact()) return true;
+ if (!Protectors::IsArrayIteratorLookupChainIntact(isolate)) return true;
// For FastPacked kinds, iteration will have the same effect as simply
// accessing each property in order.
@@ -1781,7 +1782,7 @@ bool Object::IterationHasObservableEffects() {
// the prototype. This could have different results if the prototype has been
// changed.
if (IsHoleyElementsKind(array_kind) &&
- isolate->IsNoElementsProtectorIntact()) {
+ Protectors::IsNoElementsIntact(isolate)) {
return false;
}
return true;
@@ -2188,7 +2189,8 @@ int HeapObject::SizeFromMap(Map map) const {
}
if (IsInRange(instance_type, FIRST_CONTEXT_TYPE, LAST_CONTEXT_TYPE)) {
if (instance_type == NATIVE_CONTEXT_TYPE) return NativeContext::kSize;
- return Context::SizeFor(Context::unchecked_cast(*this).length());
+ return Context::SizeFor(
+ Context::unchecked_cast(*this).synchronized_length());
}
if (instance_type == ONE_BYTE_STRING_TYPE ||
instance_type == ONE_BYTE_INTERNALIZED_STRING_TYPE) {
@@ -2378,7 +2380,7 @@ bool HeapObject::IsExternal(Isolate* isolate) const {
void DescriptorArray::GeneralizeAllFields() {
int length = number_of_descriptors();
- for (int i = 0; i < length; i++) {
+ for (InternalIndex i : InternalIndex::Range(length)) {
PropertyDetails details = GetDetails(i);
details = details.CopyWithRepresentation(Representation::Tagged());
if (details.location() == kField) {
@@ -3717,7 +3719,7 @@ Handle<DescriptorArray> DescriptorArray::CopyUpToAddAttributes(
DescriptorArray::Allocate(isolate, size, slack);
if (attributes != NONE) {
- for (int i = 0; i < size; ++i) {
+ for (InternalIndex i : InternalIndex::Range(size)) {
MaybeObject value_or_field_type = desc->GetValue(i);
Name key = desc->GetKey(i);
PropertyDetails details = desc->GetDetails(i);
@@ -3737,7 +3739,7 @@ Handle<DescriptorArray> DescriptorArray::CopyUpToAddAttributes(
descriptors->Set(i, key, value_or_field_type, details);
}
} else {
- for (int i = 0; i < size; ++i) {
+ for (InternalIndex i : InternalIndex::Range(size)) {
descriptors->CopyFrom(i, *desc);
}
}
@@ -3760,21 +3762,17 @@ Handle<DescriptorArray> DescriptorArray::CopyForFastObjectClone(
Handle<DescriptorArray> descriptors =
DescriptorArray::Allocate(isolate, size, slack);
- for (int i = 0; i < size; ++i) {
+ for (InternalIndex i : InternalIndex::Range(size)) {
Name key = src->GetKey(i);
PropertyDetails details = src->GetDetails(i);
+ Representation new_representation = details.representation();
DCHECK(!key.IsPrivateName());
DCHECK(details.IsEnumerable());
DCHECK_EQ(details.kind(), kData);
-
- // Ensure the ObjectClone property details are NONE, and that all source
- // details did not contain DONT_ENUM.
- PropertyDetails new_details(kData, NONE, details.location(),
- details.constness(), details.representation(),
- details.field_index());
- // Do not propagate the field type of normal object fields from the
- // original descriptors since FieldType changes don't create new maps.
+ // If the new representation is an in-place changeable field, make it
+ // generic as possible (under in-place changes) to avoid type confusion if
+ // the source representation changes after this feedback has been collected.
MaybeObject type = src->GetValue(i);
if (details.location() == PropertyLocation::kField) {
type = MaybeObject::FromObject(FieldType::Any());
@@ -3783,13 +3781,15 @@ Handle<DescriptorArray> DescriptorArray::CopyForFastObjectClone(
// need to generalize the descriptors here. That will also enable
// us to skip the defensive copying of the target map whenever a
// CloneObjectIC misses.
- if (FLAG_modify_field_representation_inplace &&
- (new_details.representation().IsSmi() ||
- new_details.representation().IsHeapObject())) {
- new_details =
- new_details.CopyWithRepresentation(Representation::Tagged());
- }
+ new_representation = new_representation.MostGenericInPlaceChange();
}
+
+ // Ensure the ObjectClone property details are NONE, and that all source
+ // details did not contain DONT_ENUM.
+ PropertyDetails new_details(kData, NONE, details.location(),
+ details.constness(), new_representation,
+ details.field_index());
+
descriptors->Set(i, key, type, new_details);
}
@@ -3799,7 +3799,7 @@ Handle<DescriptorArray> DescriptorArray::CopyForFastObjectClone(
}
bool DescriptorArray::IsEqualUpTo(DescriptorArray desc, int nof_descriptors) {
- for (int i = 0; i < nof_descriptors; i++) {
+ for (InternalIndex i : InternalIndex::Range(nof_descriptors)) {
if (GetKey(i) != desc.GetKey(i) || GetValue(i) != desc.GetValue(i)) {
return false;
}
@@ -3816,8 +3816,7 @@ bool DescriptorArray::IsEqualUpTo(DescriptorArray desc, int nof_descriptors) {
Handle<FixedArray> FixedArray::SetAndGrow(Isolate* isolate,
Handle<FixedArray> array, int index,
- Handle<Object> value,
- AllocationType allocation) {
+ Handle<Object> value) {
if (index < array->length()) {
array->set(index, *value);
return array;
@@ -3827,7 +3826,7 @@ Handle<FixedArray> FixedArray::SetAndGrow(Isolate* isolate,
capacity = JSObject::NewElementsCapacity(capacity);
} while (capacity <= index);
Handle<FixedArray> new_array =
- isolate->factory()->NewUninitializedFixedArray(capacity, allocation);
+ isolate->factory()->NewUninitializedFixedArray(capacity);
array->CopyTo(0, *new_array, 0, array->length());
new_array->FillWithHoles(array->length(), new_array->length());
new_array->set(index, *value);
@@ -4147,12 +4146,10 @@ Handle<FrameArray> FrameArray::EnsureSpace(Isolate* isolate,
Handle<DescriptorArray> DescriptorArray::Allocate(Isolate* isolate,
int nof_descriptors,
- int slack,
- AllocationType allocation) {
+ int slack) {
return nof_descriptors + slack == 0
? isolate->factory()->empty_descriptor_array()
- : isolate->factory()->NewDescriptorArray(nof_descriptors, slack,
- allocation);
+ : isolate->factory()->NewDescriptorArray(nof_descriptors, slack);
}
void DescriptorArray::Initialize(EnumCache enum_cache,
@@ -4174,8 +4171,8 @@ void DescriptorArray::ClearEnumCache() {
set_enum_cache(GetReadOnlyRoots().empty_enum_cache());
}
-void DescriptorArray::Replace(int index, Descriptor* descriptor) {
- descriptor->SetSortedKeyIndex(GetSortedKeyIndex(index));
+void DescriptorArray::Replace(InternalIndex index, Descriptor* descriptor) {
+ descriptor->SetSortedKeyIndex(GetSortedKeyIndex(index.as_int()));
Set(index, descriptor);
}
@@ -4193,7 +4190,7 @@ void DescriptorArray::InitializeOrChangeEnumCache(
}
}
-void DescriptorArray::CopyFrom(int index, DescriptorArray src) {
+void DescriptorArray::CopyFrom(InternalIndex index, DescriptorArray src) {
PropertyDetails details = src.GetDetails(index);
Set(index, src.GetKey(index), src.GetValue(index), details);
}
@@ -4304,7 +4301,7 @@ bool DescriptorArray::IsEqualTo(DescriptorArray other) {
if (number_of_all_descriptors() != other.number_of_all_descriptors()) {
return false;
}
- for (int i = 0; i < number_of_descriptors(); ++i) {
+ for (InternalIndex i : InternalIndex::Range(number_of_descriptors())) {
if (GetKey(i) != other.GetKey(i)) return false;
if (GetDetails(i).AsSmi() != other.GetDetails(i).AsSmi()) return false;
if (GetValue(i) != other.GetValue(i)) return false;
@@ -4507,6 +4504,7 @@ uint32_t StringHasher::MakeArrayIndexHash(uint32_t value, int length) {
value |= length << String::ArrayIndexLengthBits::kShift;
DCHECK_EQ(value & String::kIsNotArrayIndexMask, 0);
+ DCHECK_EQ(value & String::kIsNotIntegerIndexMask, 0);
DCHECK_EQ(length <= String::kMaxCachedArrayIndexLength,
Name::ContainsCachedArrayIndex(value));
return value;
@@ -4659,8 +4657,26 @@ bool Script::GetPositionInfo(int position, PositionInfo* info,
// directly.
if (type() == Script::TYPE_WASM) {
DCHECK_LE(0, position);
- return WasmModuleObject::cast(wasm_module_object())
- .GetPositionInfo(static_cast<uint32_t>(position), info);
+ wasm::NativeModule* native_module = wasm_native_module();
+ const wasm::WasmModule* module = native_module->module();
+ if (source_mapping_url().IsString()) {
+ if (module->functions.size() == 0) return false;
+ info->line = 0;
+ info->column = position;
+ info->line_start = module->functions[0].code.offset();
+ info->line_end = module->functions.back().code.end_offset();
+ return true;
+ }
+ int func_index = GetContainingWasmFunction(module, position);
+ if (func_index < 0) return false;
+
+ const wasm::WasmFunction& function = module->functions[func_index];
+
+ info->line = func_index;
+ info->column = position - function.code.offset();
+ info->line_start = function.code.offset();
+ info->line_end = function.code.end_offset();
+ return true;
}
if (line_ends().IsUndefined()) {
@@ -4972,26 +4988,8 @@ void SharedFunctionInfo::ScriptIterator::Reset(Isolate* isolate,
index_ = 0;
}
-SharedFunctionInfo::GlobalIterator::GlobalIterator(Isolate* isolate)
- : isolate_(isolate),
- script_iterator_(isolate),
- noscript_sfi_iterator_(isolate->heap()->noscript_shared_function_infos()),
- sfi_iterator_(isolate, script_iterator_.Next()) {}
-
-SharedFunctionInfo SharedFunctionInfo::GlobalIterator::Next() {
- HeapObject next = noscript_sfi_iterator_.Next();
- if (!next.is_null()) return SharedFunctionInfo::cast(next);
- for (;;) {
- next = sfi_iterator_.Next();
- if (!next.is_null()) return SharedFunctionInfo::cast(next);
- Script next_script = script_iterator_.Next();
- if (next_script.is_null()) return SharedFunctionInfo();
- sfi_iterator_.Reset(isolate_, next_script);
- }
-}
-
void SharedFunctionInfo::SetScript(Handle<SharedFunctionInfo> shared,
- Handle<Object> script_object,
+ Handle<HeapObject> script_object,
int function_literal_id,
bool reset_preparsed_scope_data) {
if (shared->script() == *script_object) return;
@@ -5020,30 +5018,8 @@ void SharedFunctionInfo::SetScript(Handle<SharedFunctionInfo> shared,
}
#endif
list->Set(function_literal_id, HeapObjectReference::Weak(*shared));
-
- // Remove shared function info from root array.
- WeakArrayList noscript_list =
- isolate->heap()->noscript_shared_function_infos();
- CHECK(noscript_list.RemoveOne(MaybeObjectHandle::Weak(shared)));
} else {
DCHECK(shared->script().IsScript());
- Handle<WeakArrayList> list =
- isolate->factory()->noscript_shared_function_infos();
-
-#ifdef DEBUG
- if (FLAG_enable_slow_asserts) {
- WeakArrayList::Iterator iterator(*list);
- for (HeapObject next = iterator.Next(); !next.is_null();
- next = iterator.Next()) {
- DCHECK_NE(next, *shared);
- }
- }
-#endif // DEBUG
-
- list =
- WeakArrayList::AddToEnd(isolate, list, MaybeObjectHandle::Weak(shared));
-
- isolate->heap()->SetRootNoScriptSharedFunctionInfos(*list);
// Remove shared function info from old script's list.
Script old_script = Script::cast(shared->script());
@@ -5354,6 +5330,8 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
Scope* outer_scope = lit->scope()->GetOuterScopeWithContext();
if (outer_scope) {
shared_info->set_outer_scope_info(*outer_scope->scope_info());
+ shared_info->set_private_name_lookup_skips_outer_class(
+ lit->scope()->private_name_lookup_skips_outer_class());
}
}
@@ -5669,9 +5647,10 @@ bool JSArray::HasReadOnlyLength(Handle<JSArray> array) {
// Fast path: "length" is the first fast property of arrays. Since it's not
// configurable, it's guaranteed to be the first in the descriptor array.
if (!map.is_dictionary_map()) {
- DCHECK(map.instance_descriptors().GetKey(0) ==
+ InternalIndex first(0);
+ DCHECK(map.instance_descriptors().GetKey(first) ==
array->GetReadOnlyRoots().length_string());
- return map.instance_descriptors().GetDetails(0).IsReadOnly();
+ return map.instance_descriptors().GetDetails(first).IsReadOnly();
}
Isolate* isolate = array->GetIsolate();
@@ -5927,17 +5906,25 @@ MaybeHandle<Object> JSPromise::Resolve(Handle<JSPromise> promise,
// 8. Let then be Get(resolution, "then").
MaybeHandle<Object> then;
- if (isolate->IsPromiseThenLookupChainIntact(
- Handle<JSReceiver>::cast(resolution))) {
+ Handle<JSReceiver> receiver(Handle<JSReceiver>::cast(resolution));
+
+ // Make sure a lookup of "then" on any JSPromise whose [[Prototype]] is the
+ // initial %PromisePrototype% yields the initial method. In addition this
+ // protector also guards the negative lookup of "then" on the intrinsic
+ // %ObjectPrototype%, meaning that such lookups are guaranteed to yield
+ // undefined without triggering any side-effects.
+ if (receiver->IsJSPromise() &&
+ isolate->IsInAnyContext(receiver->map().prototype(),
+ Context::PROMISE_PROTOTYPE_INDEX) &&
+ Protectors::IsPromiseThenLookupChainIntact(isolate)) {
// We can skip the "then" lookup on {resolution} if its [[Prototype]]
// is the (initial) Promise.prototype and the Promise#then protector
// is intact, as that guards the lookup path for the "then" property
// on JSPromise instances which have the (initial) %PromisePrototype%.
then = isolate->promise_then();
} else {
- then =
- JSReceiver::GetProperty(isolate, Handle<JSReceiver>::cast(resolution),
- isolate->factory()->then_string());
+ then = JSReceiver::GetProperty(isolate, receiver,
+ isolate->factory()->then_string());
}
// 9. If then is an abrupt completion, then
@@ -6151,27 +6138,40 @@ bool JSRegExp::ShouldProduceBytecode() {
}
// An irregexp is considered to be marked for tier up if the tier-up ticks value
-// is not zero. An atom is not subject to tier-up implementation, so the tier-up
-// ticks value is not set.
+// reaches zero. An atom is not subject to tier-up implementation, so the
+// tier-up ticks value is not set.
bool JSRegExp::MarkedForTierUp() {
DCHECK(data().IsFixedArray());
- if (TypeTag() == JSRegExp::ATOM) {
+ if (TypeTag() == JSRegExp::ATOM || !FLAG_regexp_tier_up) {
return false;
}
- return Smi::ToInt(DataAt(kIrregexpTierUpTicksIndex)) != 0;
+ return Smi::ToInt(DataAt(kIrregexpTicksUntilTierUpIndex)) == 0;
}
-void JSRegExp::ResetTierUp() {
+void JSRegExp::ResetLastTierUpTick() {
DCHECK(FLAG_regexp_tier_up);
DCHECK_EQ(TypeTag(), JSRegExp::IRREGEXP);
- FixedArray::cast(data()).set(JSRegExp::kIrregexpTierUpTicksIndex, Smi::kZero);
+ int tier_up_ticks = Smi::ToInt(DataAt(kIrregexpTicksUntilTierUpIndex)) + 1;
+ FixedArray::cast(data()).set(JSRegExp::kIrregexpTicksUntilTierUpIndex,
+ Smi::FromInt(tier_up_ticks));
+}
+
+void JSRegExp::TierUpTick() {
+ DCHECK(FLAG_regexp_tier_up);
+ DCHECK_EQ(TypeTag(), JSRegExp::IRREGEXP);
+ int tier_up_ticks = Smi::ToInt(DataAt(kIrregexpTicksUntilTierUpIndex));
+ if (tier_up_ticks == 0) {
+ return;
+ }
+ FixedArray::cast(data()).set(JSRegExp::kIrregexpTicksUntilTierUpIndex,
+ Smi::FromInt(tier_up_ticks - 1));
}
void JSRegExp::MarkTierUpForNextExec() {
DCHECK(FLAG_regexp_tier_up);
DCHECK_EQ(TypeTag(), JSRegExp::IRREGEXP);
- FixedArray::cast(data()).set(JSRegExp::kIrregexpTierUpTicksIndex,
- Smi::FromInt(1));
+ FixedArray::cast(data()).set(JSRegExp::kIrregexpTicksUntilTierUpIndex,
+ Smi::kZero);
}
namespace {
@@ -6938,7 +6938,7 @@ void AddToFeedbackCellsMap(Handle<CompilationCacheTable> cache, int cache_entry,
if (entry < 0) {
// Copy old optimized code map and append one new entry.
new_literals_map = isolate->factory()->CopyWeakFixedArrayAndGrow(
- old_literals_map, kLiteralEntryLength, AllocationType::kOld);
+ old_literals_map, kLiteralEntryLength);
entry = old_literals_map->length();
}
}
@@ -7312,8 +7312,13 @@ Handle<NumberDictionary> NumberDictionary::Set(
Isolate* isolate, Handle<NumberDictionary> dictionary, uint32_t key,
Handle<Object> value, Handle<JSObject> dictionary_holder,
PropertyDetails details) {
- dictionary->UpdateMaxNumberKey(key, dictionary_holder);
- return AtPut(isolate, dictionary, key, value, details);
+ // We could call Set with empty dictionaries. UpdateMaxNumberKey doesn't
+ // expect empty dictionaries so make sure to call AtPut that correctly handles
+ // them by creating new dictionary when required.
+ Handle<NumberDictionary> new_dictionary =
+ AtPut(isolate, dictionary, key, value, details);
+ new_dictionary->UpdateMaxNumberKey(key, dictionary_holder);
+ return new_dictionary;
}
void NumberDictionary::CopyValuesTo(FixedArray elements) {
@@ -7898,9 +7903,6 @@ void PropertyCell::SetValueWithInvalidation(Isolate* isolate,
Handle<PropertyCell> cell,
Handle<Object> new_value) {
if (cell->value() != *new_value) {
- if (FLAG_trace_protector_invalidation) {
- isolate->TraceProtectorInvalidation(cell_name);
- }
cell->set_value(*new_value);
cell->dependent_code().DeoptimizeDependentCodeGroup(
isolate, DependentCode::kPropertyCellChangedGroup);
diff --git a/deps/v8/src/objects/objects.h b/deps/v8/src/objects/objects.h
index b4e78a1937..f66023456c 100644
--- a/deps/v8/src/objects/objects.h
+++ b/deps/v8/src/objects/objects.h
@@ -46,19 +46,22 @@
// - JSArrayBufferView
// - JSTypedArray
// - JSDataView
-// - JSBoundFunction
// - JSCollection
// - JSSet
// - JSMap
+// - JSCustomElementsObject (may have elements despite empty FixedArray)
+// - JSSpecialObject (requires custom property lookup handling)
+// - JSGlobalObject
+// - JSGlobalProxy
+// - JSModuleNamespace
+// - JSPrimitiveWrapper
// - JSDate
-// - JSFunction
+// - JSFunctionOrBoundFunction
+// - JSBoundFunction
+// - JSFunction
// - JSGeneratorObject
-// - JSGlobalObject
-// - JSGlobalProxy
// - JSMapIterator
// - JSMessageObject
-// - JSModuleNamespace
-// - JSPrimitiveWrapper
// - JSRegExp
// - JSSetIterator
// - JSStringIterator
@@ -104,30 +107,32 @@
// - ScriptContextTable
// - ClosureFeedbackCellArray
// - FixedDoubleArray
-// - Name
-// - String
-// - SeqString
-// - SeqOneByteString
-// - SeqTwoByteString
-// - SlicedString
-// - ConsString
-// - ThinString
-// - ExternalString
-// - ExternalOneByteString
-// - ExternalTwoByteString
-// - InternalizedString
-// - SeqInternalizedString
-// - SeqOneByteInternalizedString
-// - SeqTwoByteInternalizedString
-// - ConsInternalizedString
-// - ExternalInternalizedString
-// - ExternalOneByteInternalizedString
-// - ExternalTwoByteInternalizedString
-// - Symbol
+// - PrimitiveHeapObject
+// - BigInt
+// - HeapNumber
+// - Name
+// - String
+// - SeqString
+// - SeqOneByteString
+// - SeqTwoByteString
+// - SlicedString
+// - ConsString
+// - ThinString
+// - ExternalString
+// - ExternalOneByteString
+// - ExternalTwoByteString
+// - InternalizedString
+// - SeqInternalizedString
+// - SeqOneByteInternalizedString
+// - SeqTwoByteInternalizedString
+// - ConsInternalizedString
+// - ExternalInternalizedString
+// - ExternalOneByteInternalizedString
+// - ExternalTwoByteInternalizedString
+// - Symbol
+// - Oddball
// - Context
// - NativeContext
-// - HeapNumber
-// - BigInt
// - Cell
// - DescriptorArray
// - PropertyCell
@@ -135,7 +140,6 @@
// - Code
// - AbstractCode, a wrapper around Code or BytecodeArray
// - Map
-// - Oddball
// - Foreign
// - SmallOrderedHashTable
// - SmallOrderedHashMap
@@ -607,15 +611,13 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
// For use with std::unordered_set.
struct Hasher {
size_t operator()(const Object o) const {
- return std::hash<v8::internal::Address>{}(o.ptr());
+ return std::hash<v8::internal::Address>{}(static_cast<Tagged_t>(o.ptr()));
}
};
// For use with std::map.
struct Comparer {
- bool operator()(const Object a, const Object b) const {
- return a.ptr() < b.ptr();
- }
+ bool operator()(const Object a, const Object b) const { return a < b; }
};
template <class T, typename std::enable_if<std::is_arithmetic<T>::value,
@@ -784,7 +786,8 @@ enum AccessorComponent { ACCESSOR_GETTER, ACCESSOR_SETTER };
enum class GetKeysConversion {
kKeepNumbers = static_cast<int>(v8::KeyConversionMode::kKeepNumbers),
- kConvertToString = static_cast<int>(v8::KeyConversionMode::kConvertToString)
+ kConvertToString = static_cast<int>(v8::KeyConversionMode::kConvertToString),
+ kNoNumbers = static_cast<int>(v8::KeyConversionMode::kNoNumbers)
};
enum class KeyCollectionMode {
diff --git a/deps/v8/src/objects/oddball.h b/deps/v8/src/objects/oddball.h
index 025f9379ba..e88d96624e 100644
--- a/deps/v8/src/objects/oddball.h
+++ b/deps/v8/src/objects/oddball.h
@@ -5,8 +5,7 @@
#ifndef V8_OBJECTS_ODDBALL_H_
#define V8_OBJECTS_ODDBALL_H_
-#include "src/objects/heap-object.h"
-#include "torque-generated/class-definitions-tq.h"
+#include "src/objects/primitive-heap-object.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -15,7 +14,7 @@ namespace v8 {
namespace internal {
// The Oddball describes objects null, undefined, true, and false.
-class Oddball : public TorqueGeneratedOddball<Oddball, HeapObject> {
+class Oddball : public TorqueGeneratedOddball<Oddball, PrimitiveHeapObject> {
public:
// [to_number_raw]: Cached raw to_number computed at startup.
inline void set_to_number_raw_as_bits(uint64_t bits);
diff --git a/deps/v8/src/objects/ordered-hash-table-inl.h b/deps/v8/src/objects/ordered-hash-table-inl.h
index a2270b0a4a..f6b8f972a9 100644
--- a/deps/v8/src/objects/ordered-hash-table-inl.h
+++ b/deps/v8/src/objects/ordered-hash-table-inl.h
@@ -164,10 +164,18 @@ inline bool OrderedHashMap::Is(Handle<HeapObject> table) {
return table->IsOrderedHashMap();
}
+inline bool OrderedNameDictionary::Is(Handle<HeapObject> table) {
+ return table->IsOrderedNameDictionary();
+}
+
inline bool SmallOrderedHashSet::Is(Handle<HeapObject> table) {
return table->IsSmallOrderedHashSet();
}
+inline bool SmallOrderedNameDictionary::Is(Handle<HeapObject> table) {
+ return table->IsSmallOrderedNameDictionary();
+}
+
inline bool SmallOrderedHashMap::Is(Handle<HeapObject> table) {
return table->IsSmallOrderedHashMap();
}
diff --git a/deps/v8/src/objects/ordered-hash-table.cc b/deps/v8/src/objects/ordered-hash-table.cc
index f0cc7b9df7..962224024e 100644
--- a/deps/v8/src/objects/ordered-hash-table.cc
+++ b/deps/v8/src/objects/ordered-hash-table.cc
@@ -168,8 +168,8 @@ Handle<FixedArray> OrderedHashSet::ConvertToKeysArray(
for (int i = 0; i < length; i++) {
int index = HashTableStartIndex() + nof_buckets + (i * kEntrySize);
Object key = table->get(index);
+ uint32_t index_value;
if (convert == GetKeysConversion::kConvertToString) {
- uint32_t index_value;
if (key.ToArrayIndex(&index_value)) {
// Avoid trashing the Number2String cache if indices get very large.
bool use_cache = i < kMaxStringTableEntries;
@@ -177,6 +177,8 @@ Handle<FixedArray> OrderedHashSet::ConvertToKeysArray(
} else {
CHECK(key.IsName());
}
+ } else if (convert == GetKeysConversion::kNoNumbers) {
+ DCHECK(!key.ToArrayIndex(&index_value));
}
result->set(i, key);
}
@@ -957,22 +959,19 @@ OrderedHashTableHandler<SmallOrderedNameDictionary,
OrderedNameDictionary>::Allocate(Isolate* isolate,
int capacity);
-#if !defined(V8_OS_WIN)
template <class SmallTable, class LargeTable>
bool OrderedHashTableHandler<SmallTable, LargeTable>::Delete(
- Handle<HeapObject> table, Handle<Object> key) {
+ Isolate* isolate, Handle<HeapObject> table, Handle<Object> key) {
if (SmallTable::Is(table)) {
- return SmallTable::Delete(Handle<SmallTable>::cast(table), key);
+ return SmallTable::Delete(isolate, *Handle<SmallTable>::cast(table), *key);
}
DCHECK(LargeTable::Is(table));
// Note: Once we migrate to the a big hash table, we never migrate
// down to a smaller hash table.
- return LargeTable::Delete(Handle<LargeTable>::cast(table), key);
+ return LargeTable::Delete(isolate, *Handle<LargeTable>::cast(table), *key);
}
-#endif
-#if !defined(V8_OS_WIN)
template <class SmallTable, class LargeTable>
bool OrderedHashTableHandler<SmallTable, LargeTable>::HasKey(
Isolate* isolate, Handle<HeapObject> table, Handle<Object> key) {
@@ -983,7 +982,6 @@ bool OrderedHashTableHandler<SmallTable, LargeTable>::HasKey(
DCHECK(LargeTable::Is(table));
return LargeTable::HasKey(isolate, LargeTable::cast(*table), *key);
}
-#endif
template bool
OrderedHashTableHandler<SmallOrderedHashSet, OrderedHashSet>::HasKey(
@@ -992,6 +990,18 @@ template bool
OrderedHashTableHandler<SmallOrderedHashMap, OrderedHashMap>::HasKey(
Isolate* isolate, Handle<HeapObject> table, Handle<Object> key);
+template bool
+OrderedHashTableHandler<SmallOrderedHashSet, OrderedHashSet>::Delete(
+ Isolate* isolate, Handle<HeapObject> table, Handle<Object> key);
+template bool
+OrderedHashTableHandler<SmallOrderedHashMap, OrderedHashMap>::Delete(
+ Isolate* isolate, Handle<HeapObject> table, Handle<Object> key);
+template bool
+OrderedHashTableHandler<SmallOrderedNameDictionary,
+ OrderedNameDictionary>::Delete(Isolate* isolate,
+ Handle<HeapObject> table,
+ Handle<Object> key);
+
MaybeHandle<OrderedHashMap> OrderedHashMapHandler::AdjustRepresentation(
Isolate* isolate, Handle<SmallOrderedHashMap> table) {
MaybeHandle<OrderedHashMap> new_table_candidate =
diff --git a/deps/v8/src/objects/ordered-hash-table.h b/deps/v8/src/objects/ordered-hash-table.h
index 21decaeba7..590846f130 100644
--- a/deps/v8/src/objects/ordered-hash-table.h
+++ b/deps/v8/src/objects/ordered-hash-table.h
@@ -658,7 +658,8 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) OrderedHashTableHandler {
using Entry = int;
static MaybeHandle<HeapObject> Allocate(Isolate* isolate, int capacity);
- static bool Delete(Handle<HeapObject> table, Handle<Object> key);
+ static bool Delete(Isolate* isolate, Handle<HeapObject> table,
+ Handle<Object> key);
static bool HasKey(Isolate* isolate, Handle<HeapObject> table,
Handle<Object> key);
@@ -730,6 +731,7 @@ class OrderedNameDictionary
static HeapObject GetEmpty(ReadOnlyRoots ro_roots);
static inline RootIndex GetMapRootIndex();
+ static inline bool Is(Handle<HeapObject> table);
static const int kValueOffset = 1;
static const int kPropertyDetailsOffset = 2;
@@ -831,6 +833,7 @@ class SmallOrderedNameDictionary
Object value, PropertyDetails details);
static inline RootIndex GetMapRootIndex();
+ static inline bool Is(Handle<HeapObject> table);
OBJECT_CONSTRUCTORS(SmallOrderedNameDictionary,
SmallOrderedHashTable<SmallOrderedNameDictionary>);
diff --git a/deps/v8/src/objects/osr-optimized-code-cache-inl.h b/deps/v8/src/objects/osr-optimized-code-cache-inl.h
new file mode 100644
index 0000000000..ab7a97b6aa
--- /dev/null
+++ b/deps/v8/src/objects/osr-optimized-code-cache-inl.h
@@ -0,0 +1,25 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_OSR_OPTIMIZED_CODE_CACHE_INL_H_
+#define V8_OBJECTS_OSR_OPTIMIZED_CODE_CACHE_INL_H_
+
+#include "src/objects/osr-optimized-code-cache.h"
+
+#include "src/objects/fixed-array-inl.h"
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+OBJECT_CONSTRUCTORS_IMPL(OSROptimizedCodeCache, WeakFixedArray)
+CAST_ACCESSOR(OSROptimizedCodeCache)
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_OSR_OPTIMIZED_CODE_CACHE_INL_H_
diff --git a/deps/v8/src/objects/osr-optimized-code-cache.cc b/deps/v8/src/objects/osr-optimized-code-cache.cc
new file mode 100644
index 0000000000..62190529f1
--- /dev/null
+++ b/deps/v8/src/objects/osr-optimized-code-cache.cc
@@ -0,0 +1,223 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/execution/isolate-inl.h"
+#include "src/objects/code.h"
+#include "src/objects/maybe-object.h"
+#include "src/objects/shared-function-info.h"
+
+#include "src/objects/osr-optimized-code-cache.h"
+
+namespace v8 {
+namespace internal {
+
+const int OSROptimizedCodeCache::kInitialLength;
+const int OSROptimizedCodeCache::kMaxLength;
+
+void OSROptimizedCodeCache::AddOptimizedCode(
+ Handle<NativeContext> native_context, Handle<SharedFunctionInfo> shared,
+ Handle<Code> code, BailoutId osr_offset) {
+ DCHECK(!osr_offset.IsNone());
+ DCHECK_EQ(code->kind(), Code::OPTIMIZED_FUNCTION);
+ STATIC_ASSERT(kEntryLength == 3);
+ Isolate* isolate = native_context->GetIsolate();
+ DCHECK(!isolate->serializer_enabled());
+
+ Handle<OSROptimizedCodeCache> osr_cache(
+ native_context->GetOSROptimizedCodeCache(), isolate);
+
+ DCHECK_EQ(osr_cache->FindEntry(shared, osr_offset), -1);
+ int entry = -1;
+ for (int index = 0; index < osr_cache->length(); index += kEntryLength) {
+ if (osr_cache->Get(index + kSharedOffset)->IsCleared() ||
+ osr_cache->Get(index + kCachedCodeOffset)->IsCleared()) {
+ entry = index;
+ break;
+ }
+ }
+
+ if (entry == -1 && osr_cache->length() + kEntryLength <= kMaxLength) {
+ entry = GrowOSRCache(native_context, &osr_cache);
+ } else if (entry == -1) {
+ // We reached max capacity and cannot grow further. Reuse an existing entry.
+ // TODO(mythria): We could use better mechanisms (like lru) to replace
+ // existing entries. Though we don't expect this to be a common case, so
+ // for now choosing to replace the first entry.
+ entry = 0;
+ }
+
+ osr_cache->InitializeEntry(entry, *shared, *code, osr_offset);
+}
+
+void OSROptimizedCodeCache::Clear(NativeContext native_context) {
+ native_context.set_osr_code_cache(
+ *native_context.GetIsolate()->factory()->empty_weak_fixed_array());
+}
+
+void OSROptimizedCodeCache::Compact(Handle<NativeContext> native_context) {
+ Handle<OSROptimizedCodeCache> osr_cache(
+ native_context->GetOSROptimizedCodeCache(), native_context->GetIsolate());
+ Isolate* isolate = native_context->GetIsolate();
+
+ // Re-adjust the cache so all the valid entries are on one side. This will
+ // enable us to compress the cache if needed.
+ int curr_valid_index = 0;
+ for (int curr_index = 0; curr_index < osr_cache->length();
+ curr_index += kEntryLength) {
+ if (osr_cache->Get(curr_index + kSharedOffset)->IsCleared() ||
+ osr_cache->Get(curr_index + kCachedCodeOffset)->IsCleared()) {
+ continue;
+ }
+ if (curr_valid_index != curr_index) {
+ osr_cache->MoveEntry(curr_index, curr_valid_index, isolate);
+ }
+ curr_valid_index += kEntryLength;
+ }
+
+ if (!NeedsTrimming(curr_valid_index, osr_cache->length())) return;
+
+ Handle<OSROptimizedCodeCache> new_osr_cache =
+ Handle<OSROptimizedCodeCache>::cast(isolate->factory()->NewWeakFixedArray(
+ CapacityForLength(curr_valid_index), AllocationType::kOld));
+ DCHECK_LT(new_osr_cache->length(), osr_cache->length());
+ {
+ DisallowHeapAllocation no_gc;
+ new_osr_cache->CopyElements(native_context->GetIsolate(), 0, *osr_cache, 0,
+ new_osr_cache->length(),
+ new_osr_cache->GetWriteBarrierMode(no_gc));
+ }
+ native_context->set_osr_code_cache(*new_osr_cache);
+}
+
+Code OSROptimizedCodeCache::GetOptimizedCode(Handle<SharedFunctionInfo> shared,
+ BailoutId osr_offset,
+ Isolate* isolate) {
+ DisallowHeapAllocation no_gc;
+ int index = FindEntry(shared, osr_offset);
+ if (index == -1) return Code();
+ Code code = GetCodeFromEntry(index);
+ if (code.is_null()) {
+ ClearEntry(index, isolate);
+ return code;
+ }
+ DCHECK(code.is_optimized_code() && !code.marked_for_deoptimization());
+ return code;
+}
+
+void OSROptimizedCodeCache::EvictMarkedCode(Isolate* isolate) {
+ // This is called from DeoptimizeMarkedCodeForContext that uses raw pointers
+ // and hence the DisallowHeapAllocation scope here.
+ DisallowHeapAllocation no_gc;
+ for (int index = 0; index < length(); index += kEntryLength) {
+ MaybeObject code_entry = Get(index + kCachedCodeOffset);
+ HeapObject heap_object;
+ if (!code_entry->GetHeapObject(&heap_object)) continue;
+
+ DCHECK(heap_object.IsCode());
+ DCHECK(Code::cast(heap_object).is_optimized_code());
+ if (!Code::cast(heap_object).marked_for_deoptimization()) continue;
+
+ ClearEntry(index, isolate);
+ }
+}
+
+int OSROptimizedCodeCache::GrowOSRCache(
+ Handle<NativeContext> native_context,
+ Handle<OSROptimizedCodeCache>* osr_cache) {
+ Isolate* isolate = native_context->GetIsolate();
+ int old_length = (*osr_cache)->length();
+ int grow_by = CapacityForLength(old_length) - old_length;
+ DCHECK_GT(grow_by, kEntryLength);
+ *osr_cache = Handle<OSROptimizedCodeCache>::cast(
+ isolate->factory()->CopyWeakFixedArrayAndGrow(*osr_cache, grow_by));
+ for (int i = old_length; i < (*osr_cache)->length(); i++) {
+ (*osr_cache)->Set(i, HeapObjectReference::ClearedValue(isolate));
+ }
+ native_context->set_osr_code_cache(**osr_cache);
+
+ return old_length;
+}
+
+Code OSROptimizedCodeCache::GetCodeFromEntry(int index) {
+ DCHECK_LE(index + OSRCodeCacheConstants::kEntryLength, length());
+ DCHECK_EQ(index % kEntryLength, 0);
+ HeapObject code_entry;
+ Get(index + OSRCodeCacheConstants::kCachedCodeOffset)
+ ->GetHeapObject(&code_entry);
+ return code_entry.is_null() ? Code() : Code::cast(code_entry);
+}
+
+SharedFunctionInfo OSROptimizedCodeCache::GetSFIFromEntry(int index) {
+ DCHECK_LE(index + OSRCodeCacheConstants::kEntryLength, length());
+ DCHECK_EQ(index % kEntryLength, 0);
+ HeapObject sfi_entry;
+ Get(index + OSRCodeCacheConstants::kSharedOffset)->GetHeapObject(&sfi_entry);
+ return sfi_entry.is_null() ? SharedFunctionInfo()
+ : SharedFunctionInfo::cast(sfi_entry);
+}
+
+BailoutId OSROptimizedCodeCache::GetBailoutIdFromEntry(int index) {
+ DCHECK_LE(index + OSRCodeCacheConstants::kEntryLength, length());
+ DCHECK_EQ(index % kEntryLength, 0);
+ Smi osr_offset_entry;
+ Get(index + kOsrIdOffset)->ToSmi(&osr_offset_entry);
+ return BailoutId(osr_offset_entry.value());
+}
+
+int OSROptimizedCodeCache::FindEntry(Handle<SharedFunctionInfo> shared,
+ BailoutId osr_offset) {
+ DisallowHeapAllocation no_gc;
+ DCHECK(!osr_offset.IsNone());
+ for (int index = 0; index < length(); index += kEntryLength) {
+ if (GetSFIFromEntry(index) != *shared) continue;
+ if (GetBailoutIdFromEntry(index) != osr_offset) continue;
+ return index;
+ }
+ return -1;
+}
+
+void OSROptimizedCodeCache::ClearEntry(int index, Isolate* isolate) {
+ Set(index + OSRCodeCacheConstants::kSharedOffset,
+ HeapObjectReference::ClearedValue(isolate));
+ Set(index + OSRCodeCacheConstants::kCachedCodeOffset,
+ HeapObjectReference::ClearedValue(isolate));
+ Set(index + OSRCodeCacheConstants::kOsrIdOffset,
+ HeapObjectReference::ClearedValue(isolate));
+}
+
+void OSROptimizedCodeCache::InitializeEntry(int entry,
+ SharedFunctionInfo shared,
+ Code code, BailoutId osr_offset) {
+ Set(entry + OSRCodeCacheConstants::kSharedOffset,
+ HeapObjectReference::Weak(shared));
+ Set(entry + OSRCodeCacheConstants::kCachedCodeOffset,
+ HeapObjectReference::Weak(code));
+ Set(entry + OSRCodeCacheConstants::kOsrIdOffset,
+ MaybeObject::FromSmi(Smi::FromInt(osr_offset.ToInt())));
+}
+
+void OSROptimizedCodeCache::MoveEntry(int src, int dst, Isolate* isolate) {
+ Set(dst + OSRCodeCacheConstants::kSharedOffset,
+ Get(src + OSRCodeCacheConstants::kSharedOffset));
+ Set(dst + OSRCodeCacheConstants::kCachedCodeOffset,
+ Get(src + OSRCodeCacheConstants::kCachedCodeOffset));
+ Set(dst + OSRCodeCacheConstants::kOsrIdOffset, Get(src + kOsrIdOffset));
+ ClearEntry(src, isolate);
+}
+
+int OSROptimizedCodeCache::CapacityForLength(int curr_length) {
+ // TODO(mythria): This is a randomly chosen heuristic and is not based on any
+ // data. We may have to tune this later.
+ if (curr_length == 0) return kInitialLength;
+ if (curr_length * 2 > kMaxLength) return kMaxLength;
+ return curr_length * 2;
+}
+
+bool OSROptimizedCodeCache::NeedsTrimming(int num_valid_entries,
+ int curr_length) {
+ return curr_length > kInitialLength && curr_length > num_valid_entries * 3;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/osr-optimized-code-cache.h b/deps/v8/src/objects/osr-optimized-code-cache.h
new file mode 100644
index 0000000000..99c148a7e1
--- /dev/null
+++ b/deps/v8/src/objects/osr-optimized-code-cache.h
@@ -0,0 +1,77 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_OSR_OPTIMIZED_CODE_CACHE_H_
+#define V8_OBJECTS_OSR_OPTIMIZED_CODE_CACHE_H_
+
+#include "src/objects/fixed-array.h"
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+class V8_EXPORT OSROptimizedCodeCache : public WeakFixedArray {
+ public:
+ DECL_CAST(OSROptimizedCodeCache)
+
+ enum OSRCodeCacheConstants {
+ kSharedOffset,
+ kCachedCodeOffset,
+ kOsrIdOffset,
+ kEntryLength
+ };
+
+ static const int kInitialLength = OSRCodeCacheConstants::kEntryLength * 4;
+ static const int kMaxLength = OSRCodeCacheConstants::kEntryLength * 1024;
+
+ // Caches the optimized code |code| corresponding to the shared function
+ // |shared| and bailout id |osr_offset| in the OSROptimized code cache.
+ // If the OSR code cache wasn't created before it creates a code cache with
+ // kOSRCodeCacheInitialLength entries.
+ static void AddOptimizedCode(Handle<NativeContext> context,
+ Handle<SharedFunctionInfo> shared,
+ Handle<Code> code, BailoutId osr_offset);
+ // Reduces the size of the OSR code cache if the number of valid entries are
+ // less than the current capacity of the cache.
+ static void Compact(Handle<NativeContext> context);
+ // Sets the OSR optimized code cache to an empty array.
+ static void Clear(NativeContext context);
+
+ // Returns the code corresponding to the shared function |shared| and
+ // BailoutId |offset| if an entry exists in the cache. Returns an empty
+ // object otherwise.
+ Code GetOptimizedCode(Handle<SharedFunctionInfo> shared, BailoutId osr_offset,
+ Isolate* isolate);
+
+ // Remove all code objects marked for deoptimization from OSR code cache.
+ void EvictMarkedCode(Isolate* isolate);
+
+ private:
+ // Functions that implement heuristics on when to grow / shrink the cache.
+ static int CapacityForLength(int curr_capacity);
+ static bool NeedsTrimming(int num_valid_entries, int curr_capacity);
+ static int GrowOSRCache(Handle<NativeContext> native_context,
+ Handle<OSROptimizedCodeCache>* osr_cache);
+
+ // Helper functions to get individual items from an entry in the cache.
+ Code GetCodeFromEntry(int index);
+ SharedFunctionInfo GetSFIFromEntry(int index);
+ BailoutId GetBailoutIdFromEntry(int index);
+
+ inline int FindEntry(Handle<SharedFunctionInfo> shared, BailoutId osr_offset);
+ inline void ClearEntry(int src, Isolate* isolate);
+ inline void InitializeEntry(int entry, SharedFunctionInfo shared, Code code,
+ BailoutId osr_offset);
+ inline void MoveEntry(int src, int dst, Isolate* isolate);
+
+ OBJECT_CONSTRUCTORS(OSROptimizedCodeCache, WeakFixedArray);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_OSR_OPTIMIZED_CODE_CACHE_H_
diff --git a/deps/v8/src/objects/primitive-heap-object-inl.h b/deps/v8/src/objects/primitive-heap-object-inl.h
new file mode 100644
index 0000000000..2c694bd1d6
--- /dev/null
+++ b/deps/v8/src/objects/primitive-heap-object-inl.h
@@ -0,0 +1,26 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_PRIMITIVE_HEAP_OBJECT_INL_H_
+#define V8_OBJECTS_PRIMITIVE_HEAP_OBJECT_INL_H_
+
+#include "src/objects/primitive-heap-object.h"
+
+#include "src/objects/heap-object-inl.h"
+#include "torque-generated/class-definitions-tq-inl.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+TQ_OBJECT_CONSTRUCTORS_IMPL(PrimitiveHeapObject)
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_PRIMITIVE_HEAP_OBJECT_INL_H_
diff --git a/deps/v8/src/objects/primitive-heap-object.h b/deps/v8/src/objects/primitive-heap-object.h
new file mode 100644
index 0000000000..9bd13cafc9
--- /dev/null
+++ b/deps/v8/src/objects/primitive-heap-object.h
@@ -0,0 +1,33 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_PRIMITIVE_HEAP_OBJECT_H_
+#define V8_OBJECTS_PRIMITIVE_HEAP_OBJECT_H_
+
+#include "src/objects/heap-object.h"
+#include "torque-generated/class-definitions-tq.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+// An abstract superclass for classes representing JavaScript primitive values
+// other than Smi. It doesn't carry any functionality but allows primitive
+// classes to be identified in the type system.
+class PrimitiveHeapObject
+ : public TorqueGeneratedPrimitiveHeapObject<PrimitiveHeapObject,
+ HeapObject> {
+ public:
+ STATIC_ASSERT(kHeaderSize == HeapObject::kHeaderSize);
+ TQ_OBJECT_CONSTRUCTORS(PrimitiveHeapObject)
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_PRIMITIVE_HEAP_OBJECT_H_
diff --git a/deps/v8/src/objects/property-descriptor.cc b/deps/v8/src/objects/property-descriptor.cc
index b3b05deceb..c5cfe8c9a9 100644
--- a/deps/v8/src/objects/property-descriptor.cc
+++ b/deps/v8/src/objects/property-descriptor.cc
@@ -58,7 +58,7 @@ bool ToPropertyDescriptorFastPath(Isolate* isolate, Handle<JSReceiver> obj,
if (map.is_dictionary_map()) return false;
Handle<DescriptorArray> descs =
Handle<DescriptorArray>(map.instance_descriptors(), isolate);
- for (int i = 0; i < map.NumberOfOwnDescriptors(); i++) {
+ for (InternalIndex i : map.IterateOwnDescriptors()) {
PropertyDetails details = descs->GetDetails(i);
Name key = descs->GetKey(i);
Handle<Object> value;
diff --git a/deps/v8/src/objects/property-details.h b/deps/v8/src/objects/property-details.h
index e350fe2c27..51318f475a 100644
--- a/deps/v8/src/objects/property-details.h
+++ b/deps/v8/src/objects/property-details.h
@@ -112,7 +112,19 @@ class Representation {
// smi and tagged values. Doubles, however, would require a box allocation.
if (IsNone()) return !other.IsDouble();
if (!FLAG_modify_field_representation_inplace) return false;
- return (IsSmi() || IsHeapObject()) && other.IsTagged();
+ return (IsSmi() || (!FLAG_unbox_double_fields && IsDouble()) ||
+ IsHeapObject()) &&
+ other.IsTagged();
+ }
+
+ // Return the most generic representation that this representation can be
+ // changed to in-place. If in-place representation changes are disabled, then
+ // this will return the current representation.
+ Representation MostGenericInPlaceChange() const {
+ if (!FLAG_modify_field_representation_inplace) return *this;
+ // Everything but unboxed doubles can be in-place changed to Tagged.
+ if (FLAG_unbox_double_fields && IsDouble()) return Representation::Double();
+ return Representation::Tagged();
}
bool is_more_general_than(const Representation& other) const {
diff --git a/deps/v8/src/objects/scope-info.cc b/deps/v8/src/objects/scope-info.cc
index c390298b5d..65a26e5d98 100644
--- a/deps/v8/src/objects/scope-info.cc
+++ b/deps/v8/src/objects/scope-info.cc
@@ -138,6 +138,10 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
const bool has_brand = scope->is_class_scope()
? scope->AsClassScope()->brand() != nullptr
: false;
+ const bool should_save_class_variable_index =
+ scope->is_class_scope()
+ ? scope->AsClassScope()->should_save_class_variable_index()
+ : false;
const bool has_function_name = function_name_info != NONE;
const bool has_position_info = NeedsPositionInfo(scope->scope_type());
const bool has_receiver = receiver_info == STACK || receiver_info == CONTEXT;
@@ -146,7 +150,9 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
? scope->AsDeclarationScope()->num_parameters()
: 0;
const bool has_outer_scope_info = !outer_scope.is_null();
+
const int length = kVariablePartIndex + 2 * context_local_count +
+ (should_save_class_variable_index ? 1 : 0) +
(has_receiver ? 1 : 0) +
(has_function_name ? kFunctionNameEntries : 0) +
(has_inferred_function_name ? 1 : 0) +
@@ -187,6 +193,8 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
DeclarationScopeField::encode(scope->is_declaration_scope()) |
ReceiverVariableField::encode(receiver_info) |
HasClassBrandField::encode(has_brand) |
+ HasSavedClassVariableIndexField::encode(
+ should_save_class_variable_index) |
HasNewTargetField::encode(has_new_target) |
FunctionVariableField::encode(function_name_info) |
HasInferredFunctionNameField::encode(has_inferred_function_name) |
@@ -196,7 +204,9 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
HasOuterScopeInfoField::encode(has_outer_scope_info) |
IsDebugEvaluateScopeField::encode(scope->is_debug_evaluate_scope()) |
ForceContextAllocationField::encode(
- scope->ForceContextForLanguageMode());
+ scope->ForceContextForLanguageMode()) |
+ PrivateNameLookupSkipsOuterClassField::encode(
+ scope->private_name_lookup_skips_outer_class());
scope_info.SetFlags(flags);
scope_info.SetParameterCount(parameter_count);
@@ -220,7 +230,8 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
VariableModeField::encode(var->mode()) |
InitFlagField::encode(var->initialization_flag()) |
MaybeAssignedFlagField::encode(var->maybe_assigned()) |
- ParameterNumberField::encode(ParameterNumberField::kMax);
+ ParameterNumberField::encode(ParameterNumberField::kMax) |
+ IsStaticFlagField::encode(var->is_static_flag());
scope_info.set(context_local_base + local_index, *var->name(), mode);
scope_info.set(context_local_info_base + local_index,
Smi::FromInt(info));
@@ -235,7 +246,8 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
VariableModeField::encode(var->mode()) |
InitFlagField::encode(var->initialization_flag()) |
MaybeAssignedFlagField::encode(var->maybe_assigned()) |
- ParameterNumberField::encode(ParameterNumberField::kMax);
+ ParameterNumberField::encode(ParameterNumberField::kMax) |
+ IsStaticFlagField::encode(var->is_static_flag());
scope_info.set(module_var_entry + kModuleVariablePropertiesOffset,
Smi::FromInt(properties));
module_var_entry += kModuleVariableEntryLength;
@@ -273,7 +285,8 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
VariableModeField::encode(var->mode()) |
InitFlagField::encode(var->initialization_flag()) |
MaybeAssignedFlagField::encode(var->maybe_assigned()) |
- ParameterNumberField::encode(ParameterNumberField::kMax);
+ ParameterNumberField::encode(ParameterNumberField::kMax) |
+ IsStaticFlagField::encode(var->is_static_flag());
scope_info.set(context_local_base + local_index, *var->name(), mode);
scope_info.set(context_local_info_base + local_index,
Smi::FromInt(info));
@@ -283,6 +296,16 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
index += 2 * context_local_count;
+ DCHECK_EQ(index, scope_info.SavedClassVariableInfoIndex());
+ // If the scope is a class scope and has used static private methods, save
+ // the context slot index of the class variable.
+ // Store the class variable index.
+ if (should_save_class_variable_index) {
+ Variable* class_variable = scope->AsClassScope()->class_variable();
+ DCHECK_EQ(class_variable->location(), VariableLocation::CONTEXT);
+ scope_info.set(index++, Smi::FromInt(class_variable->index()));
+ }
+
// If the receiver is allocated, add its index.
DCHECK_EQ(index, scope_info.ReceiverInfoIndex());
if (has_receiver) {
@@ -362,11 +385,14 @@ Handle<ScopeInfo> ScopeInfo::CreateForWithScope(
LanguageModeField::encode(LanguageMode::kSloppy) |
DeclarationScopeField::encode(false) |
ReceiverVariableField::encode(NONE) | HasClassBrandField::encode(false) |
+ HasSavedClassVariableIndexField::encode(false) |
HasNewTargetField::encode(false) | FunctionVariableField::encode(NONE) |
IsAsmModuleField::encode(false) | HasSimpleParametersField::encode(true) |
FunctionKindField::encode(kNormalFunction) |
HasOuterScopeInfoField::encode(has_outer_scope_info) |
- IsDebugEvaluateScopeField::encode(false);
+ IsDebugEvaluateScopeField::encode(false) |
+ ForceContextAllocationField::encode(false) |
+ PrivateNameLookupSkipsOuterClassField::encode(false);
scope_info->SetFlags(flags);
scope_info->SetParameterCount(0);
@@ -425,13 +451,17 @@ Handle<ScopeInfo> ScopeInfo::CreateForBootstrapping(Isolate* isolate,
LanguageModeField::encode(LanguageMode::kSloppy) |
DeclarationScopeField::encode(true) |
ReceiverVariableField::encode(is_empty_function ? UNUSED : CONTEXT) |
- HasClassBrandField::encode(false) | HasNewTargetField::encode(false) |
+ HasClassBrandField::encode(false) |
+ HasSavedClassVariableIndexField::encode(false) |
+ HasNewTargetField::encode(false) |
FunctionVariableField::encode(is_empty_function ? UNUSED : NONE) |
HasInferredFunctionNameField::encode(has_inferred_function_name) |
IsAsmModuleField::encode(false) | HasSimpleParametersField::encode(true) |
FunctionKindField::encode(FunctionKind::kNormalFunction) |
HasOuterScopeInfoField::encode(false) |
- IsDebugEvaluateScopeField::encode(false);
+ IsDebugEvaluateScopeField::encode(false) |
+ ForceContextAllocationField::encode(false) |
+ PrivateNameLookupSkipsOuterClassField::encode(false);
scope_info->SetFlags(flags);
scope_info->SetParameterCount(parameter_count);
scope_info->SetContextLocalCount(context_local_count);
@@ -449,7 +479,8 @@ Handle<ScopeInfo> ScopeInfo::CreateForBootstrapping(Isolate* isolate,
VariableModeField::encode(VariableMode::kConst) |
InitFlagField::encode(kCreatedInitialized) |
MaybeAssignedFlagField::encode(kNotAssigned) |
- ParameterNumberField::encode(ParameterNumberField::kMax);
+ ParameterNumberField::encode(ParameterNumberField::kMax) |
+ IsStaticFlagField::encode(IsStaticFlag::kNotStatic);
scope_info->set(index++, Smi::FromInt(value));
}
@@ -549,6 +580,10 @@ bool ScopeInfo::HasClassBrand() const {
return HasClassBrandField::decode(Flags());
}
+bool ScopeInfo::HasSavedClassVariableIndex() const {
+ return HasSavedClassVariableIndexField::decode(Flags());
+}
+
bool ScopeInfo::HasNewTarget() const {
return HasNewTargetField::decode(Flags());
}
@@ -608,6 +643,11 @@ void ScopeInfo::SetIsDebugEvaluateScope() {
}
}
+bool ScopeInfo::PrivateNameLookupSkipsOuterClass() const {
+ if (length() == 0) return false;
+ return PrivateNameLookupSkipsOuterClassField::decode(Flags());
+}
+
bool ScopeInfo::HasContext() const { return ContextLength() > 0; }
Object ScopeInfo::FunctionName() const {
@@ -674,6 +714,14 @@ VariableMode ScopeInfo::ContextLocalMode(int var) const {
return VariableModeField::decode(value);
}
+IsStaticFlag ScopeInfo::ContextLocalIsStaticFlag(int var) const {
+ DCHECK_LE(0, var);
+ DCHECK_LT(var, ContextLocalCount());
+ int info_index = ContextLocalInfosIndex() + var;
+ int value = Smi::ToInt(get(info_index));
+ return IsStaticFlagField::decode(value);
+}
+
InitializationFlag ScopeInfo::ContextLocalInitFlag(int var) const {
DCHECK_LE(0, var);
DCHECK_LT(var, ContextLocalCount());
@@ -744,7 +792,8 @@ int ScopeInfo::ModuleIndex(String name, VariableMode* mode,
int ScopeInfo::ContextSlotIndex(ScopeInfo scope_info, String name,
VariableMode* mode,
InitializationFlag* init_flag,
- MaybeAssignedFlag* maybe_assigned_flag) {
+ MaybeAssignedFlag* maybe_assigned_flag,
+ IsStaticFlag* is_static_flag) {
DisallowHeapAllocation no_gc;
DCHECK(name.IsInternalizedString());
DCHECK_NOT_NULL(mode);
@@ -759,6 +808,7 @@ int ScopeInfo::ContextSlotIndex(ScopeInfo scope_info, String name,
if (name != scope_info.get(i)) continue;
int var = i - start;
*mode = scope_info.ContextLocalMode(var);
+ *is_static_flag = scope_info.ContextLocalIsStaticFlag(var);
*init_flag = scope_info.ContextLocalInitFlag(var);
*maybe_assigned_flag = scope_info.ContextLocalMaybeAssignedFlag(var);
int result = Context::MIN_CONTEXT_SLOTS + var;
@@ -770,6 +820,14 @@ int ScopeInfo::ContextSlotIndex(ScopeInfo scope_info, String name,
return -1;
}
+int ScopeInfo::SavedClassVariableContextLocalIndex() const {
+ if (length() > 0 && HasSavedClassVariableIndexField::decode(Flags())) {
+ int index = Smi::ToInt(get(SavedClassVariableInfoIndex()));
+ return index - Context::MIN_CONTEXT_SLOTS;
+ }
+ return -1;
+}
+
int ScopeInfo::ReceiverContextSlotIndex() const {
if (length() > 0 && ReceiverVariableField::decode(Flags()) == CONTEXT) {
return Smi::ToInt(get(ReceiverInfoIndex()));
@@ -801,10 +859,14 @@ int ScopeInfo::ContextLocalInfosIndex() const {
return ContextLocalNamesIndex() + ContextLocalCount();
}
-int ScopeInfo::ReceiverInfoIndex() const {
+int ScopeInfo::SavedClassVariableInfoIndex() const {
return ContextLocalInfosIndex() + ContextLocalCount();
}
+int ScopeInfo::ReceiverInfoIndex() const {
+ return SavedClassVariableInfoIndex() + (HasSavedClassVariableIndex() ? 1 : 0);
+}
+
int ScopeInfo::FunctionNameInfoIndex() const {
return ReceiverInfoIndex() + (HasAllocatedReceiver() ? 1 : 0);
}
@@ -879,9 +941,10 @@ std::ostream& operator<<(std::ostream& os,
}
Handle<SourceTextModuleInfoEntry> SourceTextModuleInfoEntry::New(
- Isolate* isolate, Handle<HeapObject> export_name,
- Handle<HeapObject> local_name, Handle<HeapObject> import_name,
- int module_request, int cell_index, int beg_pos, int end_pos) {
+ Isolate* isolate, Handle<PrimitiveHeapObject> export_name,
+ Handle<PrimitiveHeapObject> local_name,
+ Handle<PrimitiveHeapObject> import_name, int module_request, int cell_index,
+ int beg_pos, int end_pos) {
Handle<SourceTextModuleInfoEntry> result =
Handle<SourceTextModuleInfoEntry>::cast(isolate->factory()->NewStruct(
SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE, AllocationType::kOld));
diff --git a/deps/v8/src/objects/scope-info.h b/deps/v8/src/objects/scope-info.h
index 123b9b1797..aa63cf2998 100644
--- a/deps/v8/src/objects/scope-info.h
+++ b/deps/v8/src/objects/scope-info.h
@@ -72,6 +72,10 @@ class ScopeInfo : public FixedArray {
// Does this scope has class brand (for private methods)?
bool HasClassBrand() const;
+ // Does this scope contains a saved class variable context local slot index
+ // for checking receivers of static private methods?
+ bool HasSavedClassVariableIndex() const;
+
// Does this scope declare a "new.target" binding?
bool HasNewTarget() const;
@@ -121,6 +125,9 @@ class ScopeInfo : public FixedArray {
// Return the mode of the given context local.
VariableMode ContextLocalMode(int var) const;
+ // Return whether the given context local variable is static.
+ IsStaticFlag ContextLocalIsStaticFlag(int var) const;
+
// Return the initialization flag of the given context local.
InitializationFlag ContextLocalInitFlag(int var) const;
@@ -141,7 +148,8 @@ class ScopeInfo : public FixedArray {
// mode for that variable.
static int ContextSlotIndex(ScopeInfo scope_info, String name,
VariableMode* mode, InitializationFlag* init_flag,
- MaybeAssignedFlag* maybe_assigned_flag);
+ MaybeAssignedFlag* maybe_assigned_flag,
+ IsStaticFlag* is_static_flag);
// Lookup metadata of a MODULE-allocated variable. Return 0 if there is no
// module variable with the given name (the index value of a MODULE variable
@@ -161,6 +169,12 @@ class ScopeInfo : public FixedArray {
// context-allocated. Otherwise returns a value < 0.
int ReceiverContextSlotIndex() const;
+ // Lookup support for serialized scope info. Returns the index of the
+ // saved class variable in context local slots if scope is a class scope
+ // and it contains static private methods that may be accessed.
+ // Otherwise returns a value < 0.
+ int SavedClassVariableContextLocalIndex() const;
+
FunctionKind function_kind() const;
// Returns true if this ScopeInfo is linked to a outer ScopeInfo.
@@ -176,6 +190,10 @@ class ScopeInfo : public FixedArray {
// Return the outer ScopeInfo if present.
ScopeInfo OuterScopeInfo() const;
+ // Returns true if this ScopeInfo was created for a scope that skips the
+ // closest outer class when resolving private names.
+ bool PrivateNameLookupSkipsOuterClass() const;
+
#ifdef DEBUG
bool Equals(ScopeInfo other) const;
#endif
@@ -228,7 +246,8 @@ class ScopeInfo : public FixedArray {
using ReceiverVariableField =
DeclarationScopeField::Next<VariableAllocationInfo, 2>;
using HasClassBrandField = ReceiverVariableField::Next<bool, 1>;
- using HasNewTargetField = HasClassBrandField::Next<bool, 1>;
+ using HasSavedClassVariableIndexField = HasClassBrandField::Next<bool, 1>;
+ using HasNewTargetField = HasSavedClassVariableIndexField::Next<bool, 1>;
using FunctionVariableField =
HasNewTargetField::Next<VariableAllocationInfo, 2>;
// TODO(cbruni): Combine with function variable field when only storing the
@@ -240,6 +259,8 @@ class ScopeInfo : public FixedArray {
using HasOuterScopeInfoField = FunctionKindField::Next<bool, 1>;
using IsDebugEvaluateScopeField = HasOuterScopeInfoField::Next<bool, 1>;
using ForceContextAllocationField = IsDebugEvaluateScopeField::Next<bool, 1>;
+ using PrivateNameLookupSkipsOuterClassField =
+ ForceContextAllocationField::Next<bool, 1>;
STATIC_ASSERT(kLastFunctionKind <= FunctionKindField::kMax);
@@ -256,27 +277,32 @@ class ScopeInfo : public FixedArray {
// the context locals in ContextLocalNames. One slot is used per
// context local, so in total this part occupies ContextLocalCount()
// slots in the array.
- // 3. ReceiverInfo:
+ // 3. SavedClassVariableInfo:
+ // If the scope is a class scope and it has static private methods that
+ // may be accessed directly or through eval, one slot is reserved to hold
+ // the context slot index for the class variable.
+ // 4. ReceiverInfo:
// If the scope binds a "this" value, one slot is reserved to hold the
// context or stack slot index for the variable.
- // 4. FunctionNameInfo:
+ // 5. FunctionNameInfo:
// If the scope belongs to a named function expression this part contains
// information about the function variable. It always occupies two array
// slots: a. The name of the function variable.
// b. The context or stack slot index for the variable.
- // 5. InferredFunctionName:
+ // 6. InferredFunctionName:
// Contains the function's inferred name.
- // 6. SourcePosition:
+ // 7. SourcePosition:
// Contains two slots with a) the startPosition and b) the endPosition if
// the scope belongs to a function or script.
- // 7. OuterScopeInfoIndex:
+ // 8. OuterScopeInfoIndex:
// The outer scope's ScopeInfo or the hole if there's none.
- // 8. SourceTextModuleInfo, ModuleVariableCount, and ModuleVariables:
+ // 9. SourceTextModuleInfo, ModuleVariableCount, and ModuleVariables:
// For a module scope, this part contains the SourceTextModuleInfo, the
// number of MODULE-allocated variables, and the metadata of those
// variables. For non-module scopes it is empty.
int ContextLocalNamesIndex() const;
int ContextLocalInfosIndex() const;
+ int SavedClassVariableInfoIndex() const;
int ReceiverInfoIndex() const;
int FunctionNameInfoIndex() const;
int InferredFunctionNameIndex() const;
@@ -310,6 +336,7 @@ class ScopeInfo : public FixedArray {
using InitFlagField = VariableModeField::Next<InitializationFlag, 1>;
using MaybeAssignedFlagField = InitFlagField::Next<MaybeAssignedFlag, 1>;
using ParameterNumberField = MaybeAssignedFlagField::Next<uint32_t, 16>;
+ using IsStaticFlagField = ParameterNumberField::Next<IsStaticFlag, 1>;
friend class ScopeIterator;
friend std::ostream& operator<<(std::ostream& os,
diff --git a/deps/v8/src/objects/script-inl.h b/deps/v8/src/objects/script-inl.h
index 07450c73ec..c306c2c092 100644
--- a/deps/v8/src/objects/script-inl.h
+++ b/deps/v8/src/objects/script-inl.h
@@ -36,15 +36,17 @@ ACCESSORS_CHECKED(Script, eval_from_shared_or_wrapped_arguments, Object,
this->type() != TYPE_WASM)
SMI_ACCESSORS_CHECKED(Script, eval_from_position, kEvalFromPositionOffset,
this->type() != TYPE_WASM)
-ACCESSORS(Script, shared_function_infos, WeakFixedArray,
- kSharedFunctionInfosOffset)
SMI_ACCESSORS(Script, flags, kFlagsOffset)
ACCESSORS(Script, source_url, Object, kSourceUrlOffset)
ACCESSORS(Script, source_mapping_url, Object, kSourceMappingUrlOffset)
ACCESSORS(Script, host_defined_options, FixedArray, kHostDefinedOptionsOffset)
-ACCESSORS_CHECKED(Script, wasm_module_object, Object,
+ACCESSORS_CHECKED(Script, wasm_breakpoint_infos, FixedArray,
kEvalFromSharedOrWrappedArgumentsOffset,
this->type() == TYPE_WASM)
+ACCESSORS_CHECKED(Script, wasm_managed_native_module, Object,
+ kEvalFromPositionOffset, this->type() == TYPE_WASM)
+ACCESSORS_CHECKED(Script, wasm_weak_instance_list, WeakArrayList,
+ kSharedFunctionInfosOffset, this->type() == TYPE_WASM)
bool Script::is_wrapped() const {
return eval_from_shared_or_wrapped_arguments().IsFixedArray();
@@ -75,6 +77,28 @@ FixedArray Script::wrapped_arguments() const {
return FixedArray::cast(eval_from_shared_or_wrapped_arguments());
}
+DEF_GETTER(Script, shared_function_infos, WeakFixedArray) {
+ return type() == TYPE_WASM
+ ? ReadOnlyRoots(GetHeap()).empty_weak_fixed_array()
+ : TaggedField<WeakFixedArray, kSharedFunctionInfosOffset>::load(
+ *this);
+}
+
+void Script::set_shared_function_infos(WeakFixedArray value,
+ WriteBarrierMode mode) {
+ DCHECK_NE(TYPE_WASM, type());
+ TaggedField<WeakFixedArray, kSharedFunctionInfosOffset>::store(*this, value);
+ CONDITIONAL_WRITE_BARRIER(*this, kSharedFunctionInfosOffset, value, mode);
+}
+
+bool Script::has_wasm_breakpoint_infos() const {
+ return type() == TYPE_WASM && wasm_breakpoint_infos().length() > 0;
+}
+
+wasm::NativeModule* Script::wasm_native_module() const {
+ return Managed<wasm::NativeModule>::cast(wasm_managed_native_module()).raw();
+}
+
Script::CompilationType Script::compilation_type() {
return BooleanBit::get(flags(), kCompilationTypeBit) ? COMPILATION_TYPE_EVAL
: COMPILATION_TYPE_HOST;
diff --git a/deps/v8/src/objects/script.h b/deps/v8/src/objects/script.h
index 2d9e4bca78..935241a149 100644
--- a/deps/v8/src/objects/script.h
+++ b/deps/v8/src/objects/script.h
@@ -5,6 +5,8 @@
#ifndef V8_OBJECTS_SCRIPT_H_
#define V8_OBJECTS_SCRIPT_H_
+#include <memory>
+
#include "src/objects/fixed-array.h"
#include "src/objects/objects.h"
#include "src/objects/struct.h"
@@ -101,9 +103,21 @@ class Script : public Struct {
// [source_mapping_url]: sourceMappingURL magic comment
DECL_ACCESSORS(source_mapping_url, Object)
- // [wasm_module_object]: the wasm module object this script belongs to.
+ // [wasm_breakpoint_infos]: the list of {BreakPointInfo} objects describing
+ // all WebAssembly breakpoints for modules/instances managed via this script.
+ // This must only be called if the type of this script is TYPE_WASM.
+ DECL_ACCESSORS(wasm_breakpoint_infos, FixedArray)
+ inline bool has_wasm_breakpoint_infos() const;
+
+ // [wasm_native_module]: the wasm {NativeModule} this script belongs to.
+ // This must only be called if the type of this script is TYPE_WASM.
+ DECL_ACCESSORS(wasm_managed_native_module, Object)
+ inline wasm::NativeModule* wasm_native_module() const;
+
+ // [wasm_weak_instance_list]: the list of all {WasmInstanceObject} being
+ // affected by breakpoints that are managed via this script.
// This must only be called if the type of this script is TYPE_WASM.
- DECL_ACCESSORS(wasm_module_object, Object)
+ DECL_ACCESSORS(wasm_weak_instance_list, WeakArrayList)
// [host_defined_options]: Options defined by the embedder.
DECL_ACCESSORS(host_defined_options, FixedArray)
diff --git a/deps/v8/src/objects/shared-function-info-inl.h b/deps/v8/src/objects/shared-function-info-inl.h
index 6023c3b828..4f12bc4324 100644
--- a/deps/v8/src/objects/shared-function-info-inl.h
+++ b/deps/v8/src/objects/shared-function-info-inl.h
@@ -21,11 +21,7 @@
namespace v8 {
namespace internal {
-OBJECT_CONSTRUCTORS_IMPL(PreparseData, HeapObject)
-
-CAST_ACCESSOR(PreparseData)
-INT_ACCESSORS(PreparseData, data_length, kDataLengthOffset)
-INT_ACCESSORS(PreparseData, children_length, kInnerLengthOffset)
+TQ_OBJECT_CONSTRUCTORS_IMPL(PreparseData)
int PreparseData::inner_start_offset() const {
return InnerOffset(data_length());
@@ -84,26 +80,9 @@ void PreparseData::set_child(int index, PreparseData value,
CONDITIONAL_WRITE_BARRIER(*this, offset, value, mode);
}
-OBJECT_CONSTRUCTORS_IMPL(UncompiledData, HeapObject)
-OBJECT_CONSTRUCTORS_IMPL(UncompiledDataWithoutPreparseData, UncompiledData)
-OBJECT_CONSTRUCTORS_IMPL(UncompiledDataWithPreparseData, UncompiledData)
-CAST_ACCESSOR(UncompiledData)
-ACCESSORS(UncompiledData, inferred_name, String, kInferredNameOffset)
-INT32_ACCESSORS(UncompiledData, start_position, kStartPositionOffset)
-INT32_ACCESSORS(UncompiledData, end_position, kEndPositionOffset)
-
-void UncompiledData::clear_padding() {
- if (FIELD_SIZE(kOptionalPaddingOffset) == 0) return;
- DCHECK_EQ(4, FIELD_SIZE(kOptionalPaddingOffset));
- memset(reinterpret_cast<void*>(address() + kOptionalPaddingOffset), 0,
- FIELD_SIZE(kOptionalPaddingOffset));
-}
-
-CAST_ACCESSOR(UncompiledDataWithoutPreparseData)
-
-CAST_ACCESSOR(UncompiledDataWithPreparseData)
-ACCESSORS(UncompiledDataWithPreparseData, preparse_data, PreparseData,
- kPreparseDataOffset)
+TQ_OBJECT_CONSTRUCTORS_IMPL(UncompiledData)
+TQ_OBJECT_CONSTRUCTORS_IMPL(UncompiledDataWithoutPreparseData)
+TQ_OBJECT_CONSTRUCTORS_IMPL(UncompiledDataWithPreparseData)
DEF_GETTER(HeapObject, IsUncompiledData, bool) {
return IsUncompiledDataWithoutPreparseData(isolate) ||
@@ -124,7 +103,7 @@ DEFINE_DEOPT_ELEMENT_ACCESSORS(SharedFunctionInfo, Object)
ACCESSORS(SharedFunctionInfo, name_or_scope_info, Object,
kNameOrScopeInfoOffset)
-ACCESSORS(SharedFunctionInfo, script_or_debug_info, Object,
+ACCESSORS(SharedFunctionInfo, script_or_debug_info, HeapObject,
kScriptOrDebugInfoOffset)
INT32_ACCESSORS(SharedFunctionInfo, function_literal_id,
@@ -229,6 +208,9 @@ BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags,
BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags,
is_safe_to_skip_arguments_adaptor,
SharedFunctionInfo::IsSafeToSkipArgumentsAdaptorBit)
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags,
+ private_name_lookup_skips_outer_class,
+ SharedFunctionInfo::PrivateNameLookupSkipsOuterClassBit)
bool SharedFunctionInfo::optimization_disabled() const {
return disable_optimization_reason() != BailoutReason::kNoReason;
@@ -613,12 +595,11 @@ void SharedFunctionInfo::ClearPreparseData() {
Heap* heap = GetHeapFromWritableObject(data);
// Swap the map.
- heap->NotifyObjectLayoutChange(data, UncompiledDataWithPreparseData::kSize,
- no_gc);
+ heap->NotifyObjectLayoutChange(data, no_gc);
STATIC_ASSERT(UncompiledDataWithoutPreparseData::kSize <
UncompiledDataWithPreparseData::kSize);
STATIC_ASSERT(UncompiledDataWithoutPreparseData::kSize ==
- UncompiledData::kSize);
+ UncompiledData::kHeaderSize);
data.synchronized_set_map(
GetReadOnlyRoots().uncompiled_data_without_preparse_data_map());
@@ -644,7 +625,6 @@ void UncompiledData::Initialize(
data, data.RawField(UncompiledData::kInferredNameOffset), inferred_name);
data.set_start_position(start_position);
data.set_end_position(end_position);
- data.clear_padding();
}
void UncompiledDataWithPreparseData::Initialize(
@@ -672,16 +652,16 @@ bool SharedFunctionInfo::HasWasmCapiFunctionData() const {
return function_data().IsWasmCapiFunctionData();
}
-Object SharedFunctionInfo::script() const {
- Object maybe_script = script_or_debug_info();
+HeapObject SharedFunctionInfo::script() const {
+ HeapObject maybe_script = script_or_debug_info();
if (maybe_script.IsDebugInfo()) {
return DebugInfo::cast(maybe_script).script();
}
return maybe_script;
}
-void SharedFunctionInfo::set_script(Object script) {
- Object maybe_debug_info = script_or_debug_info();
+void SharedFunctionInfo::set_script(HeapObject script) {
+ HeapObject maybe_debug_info = script_or_debug_info();
if (maybe_debug_info.IsDebugInfo()) {
DebugInfo::cast(maybe_debug_info).set_script(script);
} else {
diff --git a/deps/v8/src/objects/shared-function-info.h b/deps/v8/src/objects/shared-function-info.h
index dc84653ede..9c57d36697 100644
--- a/deps/v8/src/objects/shared-function-info.h
+++ b/deps/v8/src/objects/shared-function-info.h
@@ -5,6 +5,8 @@
#ifndef V8_OBJECTS_SHARED_FUNCTION_INFO_H_
#define V8_OBJECTS_SHARED_FUNCTION_INFO_H_
+#include <memory>
+
#include "src/codegen/bailout-reason.h"
#include "src/objects/compressed-slots.h"
#include "src/objects/function-kind.h"
@@ -55,11 +57,9 @@ class WasmJSFunctionData;
// +-------------------------------+
// | Inner PreparseData N |
// +-------------------------------+
-class PreparseData : public HeapObject {
+class PreparseData
+ : public TorqueGeneratedPreparseData<PreparseData, HeapObject> {
public:
- DECL_INT_ACCESSORS(data_length)
- DECL_INT_ACCESSORS(children_length)
-
inline int inner_start_offset() const;
inline ObjectSlot inner_data_start() const;
@@ -74,12 +74,9 @@ class PreparseData : public HeapObject {
// Clear uninitialized padding space.
inline void clear_padding();
- DECL_CAST(PreparseData)
DECL_PRINTER(PreparseData)
DECL_VERIFIER(PreparseData)
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- TORQUE_GENERATED_PREPARSE_DATA_FIELDS)
static const int kDataStartOffset = kSize;
class BodyDescriptor;
@@ -92,7 +89,7 @@ class PreparseData : public HeapObject {
return InnerOffset(data_length) + children_length * kTaggedSize;
}
- OBJECT_CONSTRUCTORS(PreparseData, HeapObject);
+ TQ_OBJECT_CONSTRUCTORS(PreparseData)
private:
inline Object get_child_raw(int index) const;
@@ -100,14 +97,9 @@ class PreparseData : public HeapObject {
// Abstract class representing extra data for an uncompiled function, which is
// not stored in the SharedFunctionInfo.
-class UncompiledData : public HeapObject {
+class UncompiledData
+ : public TorqueGeneratedUncompiledData<UncompiledData, HeapObject> {
public:
- DECL_ACCESSORS(inferred_name, String)
- DECL_INT32_ACCESSORS(start_position)
- DECL_INT32_ACCESSORS(end_position)
-
- DECL_CAST(UncompiledData)
-
inline static void Initialize(
UncompiledData data, String inferred_name, int start_position,
int end_position,
@@ -115,56 +107,35 @@ class UncompiledData : public HeapObject {
gc_notify_updated_slot =
[](HeapObject object, ObjectSlot slot, HeapObject target) {});
- // Layout description.
-#define UNCOMPILED_DATA_FIELDS(V) \
- V(kStartOfStrongFieldsOffset, 0) \
- V(kInferredNameOffset, kTaggedSize) \
- V(kEndOfStrongFieldsOffset, 0) \
- /* Raw data fields. */ \
- V(kStartPositionOffset, kInt32Size) \
- V(kEndPositionOffset, kInt32Size) \
- V(kOptionalPaddingOffset, POINTER_SIZE_PADDING(kOptionalPaddingOffset)) \
- /* Header size. */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, UNCOMPILED_DATA_FIELDS)
-#undef UNCOMPILED_DATA_FIELDS
-
- using BodyDescriptor = FixedBodyDescriptor<kStartOfStrongFieldsOffset,
- kEndOfStrongFieldsOffset, kSize>;
-
- // Clear uninitialized padding space.
- inline void clear_padding();
+ using BodyDescriptor =
+ FixedBodyDescriptor<kStartOfStrongFieldsOffset, kEndOfStrongFieldsOffset,
+ kHeaderSize>;
- OBJECT_CONSTRUCTORS(UncompiledData, HeapObject);
+ TQ_OBJECT_CONSTRUCTORS(UncompiledData)
};
// Class representing data for an uncompiled function that does not have any
// data from the pre-parser, either because it's a leaf function or because the
// pre-parser bailed out.
-class UncompiledDataWithoutPreparseData : public UncompiledData {
+class UncompiledDataWithoutPreparseData
+ : public TorqueGeneratedUncompiledDataWithoutPreparseData<
+ UncompiledDataWithoutPreparseData, UncompiledData> {
public:
- DECL_CAST(UncompiledDataWithoutPreparseData)
DECL_PRINTER(UncompiledDataWithoutPreparseData)
- DECL_VERIFIER(UncompiledDataWithoutPreparseData)
-
- static const int kSize = UncompiledData::kSize;
// No extra fields compared to UncompiledData.
using BodyDescriptor = UncompiledData::BodyDescriptor;
- OBJECT_CONSTRUCTORS(UncompiledDataWithoutPreparseData, UncompiledData);
+ TQ_OBJECT_CONSTRUCTORS(UncompiledDataWithoutPreparseData)
};
// Class representing data for an uncompiled function that has pre-parsed scope
// data.
-class UncompiledDataWithPreparseData : public UncompiledData {
+class UncompiledDataWithPreparseData
+ : public TorqueGeneratedUncompiledDataWithPreparseData<
+ UncompiledDataWithPreparseData, UncompiledData> {
public:
- DECL_ACCESSORS(preparse_data, PreparseData)
-
- DECL_CAST(UncompiledDataWithPreparseData)
DECL_PRINTER(UncompiledDataWithPreparseData)
- DECL_VERIFIER(UncompiledDataWithPreparseData)
inline static void Initialize(
UncompiledDataWithPreparseData data, String inferred_name,
@@ -173,28 +144,12 @@ class UncompiledDataWithPreparseData : public UncompiledData {
gc_notify_updated_slot =
[](HeapObject object, ObjectSlot slot, HeapObject target) {});
- // Layout description.
-
-#define UNCOMPILED_DATA_WITH_PREPARSE_DATA_FIELDS(V) \
- V(kStartOfStrongFieldsOffset, 0) \
- V(kPreparseDataOffset, kTaggedSize) \
- V(kEndOfStrongFieldsOffset, 0) \
- /* Total size. */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(UncompiledData::kSize,
- UNCOMPILED_DATA_WITH_PREPARSE_DATA_FIELDS)
-#undef UNCOMPILED_DATA_WITH_PREPARSE_DATA_FIELDS
-
- // Make sure the size is aligned
- STATIC_ASSERT(IsAligned(kSize, kTaggedSize));
-
using BodyDescriptor = SubclassBodyDescriptor<
UncompiledData::BodyDescriptor,
FixedBodyDescriptor<kStartOfStrongFieldsOffset, kEndOfStrongFieldsOffset,
kSize>>;
- OBJECT_CONSTRUCTORS(UncompiledDataWithPreparseData, UncompiledData);
+ TQ_OBJECT_CONSTRUCTORS(UncompiledDataWithPreparseData)
};
class InterpreterData : public Struct {
@@ -242,7 +197,7 @@ class SharedFunctionInfo : public HeapObject {
// Set up the link between shared function info and the script. The shared
// function info is added to the list on the script.
V8_EXPORT_PRIVATE static void SetScript(
- Handle<SharedFunctionInfo> shared, Handle<Object> script_object,
+ Handle<SharedFunctionInfo> shared, Handle<HeapObject> script_object,
int function_literal_id, bool reset_preparsed_scope_data = true);
// Layout description of the optimized code map.
@@ -408,10 +363,10 @@ class SharedFunctionInfo : public HeapObject {
// [script_or_debug_info]: One of:
// - Script from which the function originates.
// - a DebugInfo which holds the actual script [HasDebugInfo()].
- DECL_ACCESSORS(script_or_debug_info, Object)
+ DECL_ACCESSORS(script_or_debug_info, HeapObject)
- inline Object script() const;
- inline void set_script(Object script);
+ inline HeapObject script() const;
+ inline void set_script(HeapObject script);
// The function is subject to debugging if a debug info is attached.
inline bool HasDebugInfo() const;
@@ -490,6 +445,10 @@ class SharedFunctionInfo : public HeapObject {
// Indicates that the function has been reported for binary code coverage.
DECL_BOOLEAN_ACCESSORS(has_reported_binary_coverage)
+ // Indicates that the private name lookups inside the function skips the
+ // closest outer class scope.
+ DECL_BOOLEAN_ACCESSORS(private_name_lookup_skips_outer_class)
+
inline FunctionKind kind() const;
// Defines the index in a native context of closure's map instantiated using
@@ -640,21 +599,6 @@ class SharedFunctionInfo : public HeapObject {
DISALLOW_COPY_AND_ASSIGN(ScriptIterator);
};
- // Iterate over all shared function infos on the heap.
- class GlobalIterator {
- public:
- V8_EXPORT_PRIVATE explicit GlobalIterator(Isolate* isolate);
- V8_EXPORT_PRIVATE SharedFunctionInfo Next();
-
- private:
- Isolate* isolate_;
- Script::Iterator script_iterator_;
- WeakArrayList::Iterator noscript_sfi_iterator_;
- SharedFunctionInfo::ScriptIterator sfi_iterator_;
- DISALLOW_HEAP_ALLOCATION(no_gc_)
- DISALLOW_COPY_AND_ASSIGN(GlobalIterator);
- };
-
DECL_CAST(SharedFunctionInfo)
// Constants.
@@ -691,7 +635,8 @@ class SharedFunctionInfo : public HeapObject {
V(HasReportedBinaryCoverageBit, bool, 1, _) \
V(IsTopLevelBit, bool, 1, _) \
V(IsOneshotIIFEOrPropertiesAreFinalBit, bool, 1, _) \
- V(IsSafeToSkipArgumentsAdaptorBit, bool, 1, _)
+ V(IsSafeToSkipArgumentsAdaptorBit, bool, 1, _) \
+ V(PrivateNameLookupSkipsOuterClassBit, bool, 1, _)
DEFINE_BIT_FIELDS(FLAGS_BIT_FIELDS)
#undef FLAGS_BIT_FIELDS
diff --git a/deps/v8/src/objects/slots-inl.h b/deps/v8/src/objects/slots-inl.h
index b240729114..7e692b7948 100644
--- a/deps/v8/src/objects/slots-inl.h
+++ b/deps/v8/src/objects/slots-inl.h
@@ -119,7 +119,7 @@ inline void MemsetTagged(ObjectSlot start, Object value, size_t counter) {
#ifdef V8_COMPRESS_POINTERS
Tagged_t raw_value = CompressTagged(value.ptr());
STATIC_ASSERT(kTaggedSize == kInt32Size);
- MemsetInt32(start.location(), raw_value, counter);
+ MemsetInt32(reinterpret_cast<int32_t*>(start.location()), raw_value, counter);
#else
Address raw_value = value.ptr();
MemsetPointer(start.location(), raw_value, counter);
diff --git a/deps/v8/src/objects/source-text-module.cc b/deps/v8/src/objects/source-text-module.cc
index f17c59de1a..2959e1b854 100644
--- a/deps/v8/src/objects/source-text-module.cc
+++ b/deps/v8/src/objects/source-text-module.cc
@@ -78,8 +78,6 @@ class Module::ResolveSet
SharedFunctionInfo SourceTextModule::GetSharedFunctionInfo() const {
DisallowHeapAllocation no_alloc;
- DCHECK_NE(status(), Module::kEvaluating);
- DCHECK_NE(status(), Module::kEvaluated);
switch (status()) {
case kUninstantiated:
case kPreInstantiating:
@@ -89,10 +87,10 @@ SharedFunctionInfo SourceTextModule::GetSharedFunctionInfo() const {
DCHECK(code().IsJSFunction());
return JSFunction::cast(code()).shared();
case kInstantiated:
- DCHECK(code().IsJSGeneratorObject());
- return JSGeneratorObject::cast(code()).function().shared();
case kEvaluating:
case kEvaluated:
+ DCHECK(code().IsJSGeneratorObject());
+ return JSGeneratorObject::cast(code()).function().shared();
case kErrored:
UNREACHABLE();
}
@@ -580,58 +578,518 @@ Handle<JSModuleNamespace> SourceTextModule::GetModuleNamespace(
return Module::GetModuleNamespace(isolate, requested_module);
}
+MaybeHandle<Object> SourceTextModule::EvaluateMaybeAsync(
+ Isolate* isolate, Handle<SourceTextModule> module) {
+ // In the event of errored evaluation, return a rejected promise.
+ if (module->status() == kErrored) {
+ // If we have a top level capability we assume it has already been
+ // rejected, and return it here. Otherwise create a new promise and
+ // reject it with the module's exception.
+ if (module->top_level_capability().IsJSPromise()) {
+ Handle<JSPromise> top_level_capability(
+ JSPromise::cast(module->top_level_capability()), isolate);
+ DCHECK(top_level_capability->status() == Promise::kRejected &&
+ top_level_capability->result() == module->exception());
+ return top_level_capability;
+ }
+ Handle<JSPromise> capability = isolate->factory()->NewJSPromise();
+ JSPromise::Reject(capability, handle(module->exception(), isolate));
+ return capability;
+ }
+
+ // Start of Evaluate () Concrete Method
+ // 2. Assert: module.[[Status]] is "linked" or "evaluated".
+ CHECK(module->status() == kInstantiated || module->status() == kEvaluated);
+
+ // 3. If module.[[Status]] is "evaluated", set module to
+ // GetAsyncCycleRoot(module).
+ if (module->status() == kEvaluated) {
+ module = GetAsyncCycleRoot(isolate, module);
+ }
+
+ // 4. If module.[[TopLevelCapability]] is not undefined, then
+ // a. Return module.[[TopLevelCapability]].[[Promise]].
+ if (module->top_level_capability().IsJSPromise()) {
+ return handle(JSPromise::cast(module->top_level_capability()), isolate);
+ }
+ DCHECK(module->top_level_capability().IsUndefined());
+
+ // 6. Let capability be ! NewPromiseCapability(%Promise%).
+ Handle<JSPromise> capability = isolate->factory()->NewJSPromise();
+
+ // 7. Set module.[[TopLevelCapability]] to capability.
+ module->set_top_level_capability(*capability);
+ DCHECK(module->top_level_capability().IsJSPromise());
+
+ // 9. If result is an abrupt completion, then
+ Handle<Object> unused_result;
+ if (!Evaluate(isolate, module).ToHandle(&unused_result)) {
+ // d. Perform ! Call(capability.[[Reject]], undefined,
+ // «result.[[Value]]»).
+ isolate->clear_pending_exception();
+ JSPromise::Reject(capability, handle(module->exception(), isolate));
+ } else {
+ // 10. Otherwise,
+ // a. Assert: module.[[Status]] is "evaluated"...
+ CHECK_EQ(module->status(), kEvaluated);
+
+ // b. If module.[[AsyncEvaluating]] is false, then
+ if (!module->async_evaluating()) {
+ // i. Perform ! Call(capability.[[Resolve]], undefined,
+ // «undefined»).
+ JSPromise::Resolve(capability, isolate->factory()->undefined_value())
+ .ToHandleChecked();
+ }
+ }
+
+ // 11. Return capability.[[Promise]].
+ return capability;
+}
+
MaybeHandle<Object> SourceTextModule::Evaluate(
+ Isolate* isolate, Handle<SourceTextModule> module) {
+ // Evaluate () Concrete Method continued from EvaluateMaybeAsync.
+ CHECK(module->status() == kInstantiated || module->status() == kEvaluated);
+
+ // 5. Let stack be a new empty List.
+ Zone zone(isolate->allocator(), ZONE_NAME);
+ ZoneForwardList<Handle<SourceTextModule>> stack(&zone);
+ unsigned dfs_index = 0;
+
+ // 8. Let result be InnerModuleEvaluation(module, stack, 0).
+ // 9. If result is an abrupt completion, then
+ Handle<Object> result;
+ if (!InnerModuleEvaluation(isolate, module, &stack, &dfs_index)
+ .ToHandle(&result)) {
+ // a. For each Cyclic Module Record m in stack, do
+ for (auto& descendant : stack) {
+ // i. Assert: m.[[Status]] is "evaluating".
+ CHECK_EQ(descendant->status(), kEvaluating);
+ // ii. Set m.[[Status]] to "evaluated".
+ // iii. Set m.[[EvaluationError]] to result.
+ descendant->RecordErrorUsingPendingException(isolate);
+ }
+ DCHECK_EQ(module->exception(), isolate->pending_exception());
+ } else {
+ // 10. Otherwise,
+ // c. Assert: stack is empty.
+ DCHECK(stack.empty());
+ }
+ return result;
+}
+
+void SourceTextModule::AsyncModuleExecutionFulfilled(
+ Isolate* isolate, Handle<SourceTextModule> module) {
+ // 1. Assert: module.[[Status]] is "evaluated".
+ CHECK(module->status() == kEvaluated || module->status() == kErrored);
+
+ // 2. If module.[[AsyncEvaluating]] is false,
+ if (!module->async_evaluating()) {
+ // a. Assert: module.[[EvaluationError]] is not undefined.
+ CHECK_EQ(module->status(), kErrored);
+
+ // b. Return undefined.
+ return;
+ }
+
+ // 3. Assert: module.[[EvaluationError]] is undefined.
+ CHECK_EQ(module->status(), kEvaluated);
+
+ // 4. Set module.[[AsyncEvaluating]] to false.
+ module->set_async_evaluating(false);
+
+ // 5. For each Module m of module.[[AsyncParentModules]], do
+ for (int i = 0; i < module->AsyncParentModuleCount(); i++) {
+ Handle<SourceTextModule> m = module->GetAsyncParentModule(isolate, i);
+
+ // a. If module.[[DFSIndex]] is not equal to module.[[DFSAncestorIndex]],
+ // then
+ if (module->dfs_index() != module->dfs_ancestor_index()) {
+ // i. Assert: m.[[DFSAncestorIndex]] is equal to
+ // module.[[DFSAncestorIndex]].
+ DCHECK_LE(m->dfs_ancestor_index(), module->dfs_ancestor_index());
+ }
+ // b. Decrement m.[[PendingAsyncDependencies]] by 1.
+ m->DecrementPendingAsyncDependencies();
+
+ // c. If m.[[PendingAsyncDependencies]] is 0 and m.[[EvaluationError]] is
+ // undefined, then
+ if (!m->HasPendingAsyncDependencies() && m->status() == kEvaluated) {
+ // i. Assert: m.[[AsyncEvaluating]] is true.
+ DCHECK(m->async_evaluating());
+
+ // ii. Let cycleRoot be ! GetAsyncCycleRoot(m).
+ auto cycle_root = GetAsyncCycleRoot(isolate, m);
+
+ // iii. If cycleRoot.[[EvaluationError]] is not undefined,
+ // return undefined.
+ if (cycle_root->status() == kErrored) {
+ return;
+ }
+
+ // iv. If m.[[Async]] is true, then
+ if (m->async()) {
+ // 1. Perform ! ExecuteAsyncModule(m).
+ ExecuteAsyncModule(isolate, m);
+ } else {
+ // v. Otherwise,
+ // 1. Let result be m.ExecuteModule().
+ // 2. If result is a normal completion,
+ Handle<Object> unused_result;
+ if (ExecuteModule(isolate, m).ToHandle(&unused_result)) {
+ // a. Perform ! AsyncModuleExecutionFulfilled(m).
+ AsyncModuleExecutionFulfilled(isolate, m);
+ } else {
+ // 3. Otherwise,
+ // a. Perform ! AsyncModuleExecutionRejected(m,
+ // result.[[Value]]).
+ Handle<Object> exception(isolate->pending_exception(), isolate);
+ isolate->clear_pending_exception();
+ AsyncModuleExecutionRejected(isolate, m, exception);
+ }
+ }
+ }
+ }
+
+ // 6. If module.[[TopLevelCapability]] is not undefined, then
+ if (!module->top_level_capability().IsUndefined(isolate)) {
+ // a. Assert: module.[[DFSIndex]] is equal to module.[[DFSAncestorIndex]].
+ DCHECK_EQ(module->dfs_index(), module->dfs_ancestor_index());
+
+ // b. Perform ! Call(module.[[TopLevelCapability]].[[Resolve]],
+ // undefined, «undefined»).
+ Handle<JSPromise> capability(
+ JSPromise::cast(module->top_level_capability()), isolate);
+ JSPromise::Resolve(capability, isolate->factory()->undefined_value())
+ .ToHandleChecked();
+ }
+
+ // 7. Return undefined.
+}
+
+void SourceTextModule::AsyncModuleExecutionRejected(
Isolate* isolate, Handle<SourceTextModule> module,
- ZoneForwardList<Handle<SourceTextModule>>* stack, unsigned* dfs_index) {
+ Handle<Object> exception) {
+ // 1. Assert: module.[[Status]] is "evaluated".
+ CHECK(module->status() == kEvaluated || module->status() == kErrored);
+
+ // 2. If module.[[AsyncEvaluating]] is false,
+ if (!module->async_evaluating()) {
+ // a. Assert: module.[[EvaluationError]] is not undefined.
+ CHECK_EQ(module->status(), kErrored);
+
+ // b. Return undefined.
+ return;
+ }
+
+ // 4. Set module.[[EvaluationError]] to ThrowCompletion(error).
+ module->RecordError(isolate, exception);
+
+ // 5. Set module.[[AsyncEvaluating]] to false.
+ module->set_async_evaluating(false);
+
+ // 6. For each Module m of module.[[AsyncParentModules]], do
+ for (int i = 0; i < module->AsyncParentModuleCount(); i++) {
+ Handle<SourceTextModule> m = module->GetAsyncParentModule(isolate, i);
+
+ // a. If module.[[DFSIndex]] is not equal to module.[[DFSAncestorIndex]],
+ // then
+ if (module->dfs_index() != module->dfs_ancestor_index()) {
+ // i. Assert: m.[[DFSAncestorIndex]] is equal to
+ // module.[[DFSAncestorIndex]].
+ DCHECK_EQ(m->dfs_ancestor_index(), module->dfs_ancestor_index());
+ }
+ // b. Perform ! AsyncModuleExecutionRejected(m, error).
+ AsyncModuleExecutionRejected(isolate, m, exception);
+ }
+
+ // 7. If module.[[TopLevelCapability]] is not undefined, then
+ if (!module->top_level_capability().IsUndefined(isolate)) {
+ // a. Assert: module.[[DFSIndex]] is equal to module.[[DFSAncestorIndex]].
+ DCHECK(module->dfs_index() == module->dfs_ancestor_index());
+
+ // b. Perform ! Call(module.[[TopLevelCapability]].[[Reject]],
+ // undefined, «error»).
+ Handle<JSPromise> capability(
+ JSPromise::cast(module->top_level_capability()), isolate);
+ JSPromise::Reject(capability, exception);
+ }
+
+ // 8. Return undefined.
+}
+
+void SourceTextModule::ExecuteAsyncModule(Isolate* isolate,
+ Handle<SourceTextModule> module) {
+ // 1. Assert: module.[[Status]] is "evaluating" or "evaluated".
+ CHECK(module->status() == kEvaluating || module->status() == kEvaluated);
+
+ // 2. Assert: module.[[Async]] is true.
+ DCHECK(module->async());
+
+ // 3. Set module.[[AsyncEvaluating]] to true.
+ module->set_async_evaluating(true);
+
+ // 4. Let capability be ! NewPromiseCapability(%Promise%).
+ Handle<JSPromise> capability = isolate->factory()->NewJSPromise();
+
+ // 5. Let stepsFulfilled be the steps of a CallAsyncModuleFulfilled
+ Handle<JSFunction> steps_fulfilled(
+ isolate->native_context()->call_async_module_fulfilled(), isolate);
+
+ ScopedVector<Handle<Object>> empty_argv(0);
+
+ // 6. Let onFulfilled be CreateBuiltinFunction(stepsFulfilled,
+ // «[[Module]]»).
+ // 7. Set onFulfilled.[[Module]] to module.
+ Handle<JSBoundFunction> on_fulfilled =
+ isolate->factory()
+ ->NewJSBoundFunction(steps_fulfilled, module, empty_argv)
+ .ToHandleChecked();
+
+ // 8. Let stepsRejected be the steps of a CallAsyncModuleRejected.
+ Handle<JSFunction> steps_rejected(
+ isolate->native_context()->call_async_module_rejected(), isolate);
+
+ // 9. Let onRejected be CreateBuiltinFunction(stepsRejected, «[[Module]]»).
+ // 10. Set onRejected.[[Module]] to module.
+ Handle<JSBoundFunction> on_rejected =
+ isolate->factory()
+ ->NewJSBoundFunction(steps_rejected, module, empty_argv)
+ .ToHandleChecked();
+
+ // 11. Perform ! PerformPromiseThen(capability.[[Promise]],
+ // onFulfilled, onRejected).
+ Handle<Object> argv[] = {on_fulfilled, on_rejected};
+ Execution::CallBuiltin(isolate, isolate->promise_then(), capability,
+ arraysize(argv), argv)
+ .ToHandleChecked();
+
+ // 12. Perform ! module.ExecuteModule(capability).
+ // Note: In V8 we have broken module.ExecuteModule into
+ // ExecuteModule for synchronous module execution and
+ // InnerExecuteAsyncModule for asynchronous execution.
+ InnerExecuteAsyncModule(isolate, module, capability).ToHandleChecked();
+
+ // 13. Return.
+}
+
+MaybeHandle<Object> SourceTextModule::InnerExecuteAsyncModule(
+ Isolate* isolate, Handle<SourceTextModule> module,
+ Handle<JSPromise> capability) {
+ // If we have an async module, then it has an associated
+ // JSAsyncFunctionObject, which we then evaluate with the passed in promise
+ // capability.
+ Handle<JSAsyncFunctionObject> async_function_object(
+ JSAsyncFunctionObject::cast(module->code()), isolate);
+ async_function_object->set_promise(*capability);
+ Handle<JSFunction> resume(
+ isolate->native_context()->async_module_evaluate_internal(), isolate);
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result,
+ Execution::Call(isolate, resume, async_function_object, 0, nullptr),
+ Object);
+ return result;
+}
+
+MaybeHandle<Object> SourceTextModule::ExecuteModule(
+ Isolate* isolate, Handle<SourceTextModule> module) {
+ // Synchronous modules have an associated JSGeneratorObject.
Handle<JSGeneratorObject> generator(JSGeneratorObject::cast(module->code()),
isolate);
- module->set_code(
- generator->function().shared().scope_info().ModuleDescriptorInfo());
+ Handle<JSFunction> resume(
+ isolate->native_context()->generator_next_internal(), isolate);
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result, Execution::Call(isolate, resume, generator, 0, nullptr),
+ Object);
+ DCHECK(JSIteratorResult::cast(*result).done().BooleanValue(isolate));
+ return handle(JSIteratorResult::cast(*result).value(), isolate);
+}
+
+MaybeHandle<Object> SourceTextModule::InnerModuleEvaluation(
+ Isolate* isolate, Handle<SourceTextModule> module,
+ ZoneForwardList<Handle<SourceTextModule>>* stack, unsigned* dfs_index) {
+ STACK_CHECK(isolate, MaybeHandle<Object>());
+
+ // InnerModuleEvaluation(module, stack, index)
+ // 2. If module.[[Status]] is "evaluated", then
+ // a. If module.[[EvaluationError]] is undefined, return index.
+ // (We return undefined instead)
+ if (module->status() == kEvaluated || module->status() == kEvaluating) {
+ return isolate->factory()->undefined_value();
+ }
+
+ // b. Otherwise return module.[[EvaluationError]].
+ // (We throw on isolate and return a MaybeHandle<Object>
+ // instead)
+ if (module->status() == kErrored) {
+ isolate->Throw(module->exception());
+ return MaybeHandle<Object>();
+ }
+
+ // 4. Assert: module.[[Status]] is "linked".
+ CHECK_EQ(module->status(), kInstantiated);
+
+ // 5. Set module.[[Status]] to "evaluating".
module->SetStatus(kEvaluating);
+
+ // 6. Set module.[[DFSIndex]] to index.
module->set_dfs_index(*dfs_index);
+
+ // 7. Set module.[[DFSAncestorIndex]] to index.
module->set_dfs_ancestor_index(*dfs_index);
- stack->push_front(module);
+
+ // 8. Set module.[[PendingAsyncDependencies]] to 0.
+ DCHECK(!module->HasPendingAsyncDependencies());
+
+ // 9. Set module.[[AsyncParentModules]] to a new empty List.
+ Handle<ArrayList> async_parent_modules = ArrayList::New(isolate, 0);
+ module->set_async_parent_modules(*async_parent_modules);
+
+ // 10. Set index to index + 1.
(*dfs_index)++;
+ // 11. Append module to stack.
+ stack->push_front(module);
+
// Recursion.
Handle<FixedArray> requested_modules(module->requested_modules(), isolate);
+
+ // 12. For each String required that is an element of
+ // module.[[RequestedModules]], do
for (int i = 0, length = requested_modules->length(); i < length; ++i) {
Handle<Module> requested_module(Module::cast(requested_modules->get(i)),
isolate);
- RETURN_ON_EXCEPTION(
- isolate, Module::Evaluate(isolate, requested_module, stack, dfs_index),
- Object);
-
- DCHECK_GE(requested_module->status(), kEvaluating);
- DCHECK_NE(requested_module->status(), kErrored);
- SLOW_DCHECK(
- // {requested_module} is evaluating iff it's on the {stack}.
- (requested_module->status() == kEvaluating) ==
- std::count_if(stack->begin(), stack->end(), [&](Handle<Module> m) {
- return *m == *requested_module;
- }));
-
- if (requested_module->status() == kEvaluating) {
- // SyntheticModules go straight to kEvaluated so this must be a
- // SourceTextModule
- module->set_dfs_ancestor_index(
- std::min(module->dfs_ancestor_index(),
- Handle<SourceTextModule>::cast(requested_module)
- ->dfs_ancestor_index()));
+ // d. If requiredModule is a Cyclic Module Record, then
+ if (requested_module->IsSourceTextModule()) {
+ Handle<SourceTextModule> required_module(
+ SourceTextModule::cast(*requested_module), isolate);
+ RETURN_ON_EXCEPTION(
+ isolate,
+ InnerModuleEvaluation(isolate, required_module, stack, dfs_index),
+ Object);
+
+ // i. Assert: requiredModule.[[Status]] is either "evaluating" or
+ // "evaluated".
+ // (We also assert the module cannot be errored, because if it was
+ // we would have already returned from InnerModuleEvaluation)
+ CHECK_GE(required_module->status(), kEvaluating);
+ CHECK_NE(required_module->status(), kErrored);
+
+ // ii. Assert: requiredModule.[[Status]] is "evaluating" if and
+ // only if requiredModule is in stack.
+ SLOW_DCHECK(
+ (requested_module->status() == kEvaluating) ==
+ std::count_if(stack->begin(), stack->end(), [&](Handle<Module> m) {
+ return *m == *requested_module;
+ }));
+
+ // iii. If requiredModule.[[Status]] is "evaluating", then
+ if (required_module->status() == kEvaluating) {
+ // 1. Set module.[[DFSAncestorIndex]] to
+ // min(
+ // module.[[DFSAncestorIndex]],
+ // requiredModule.[[DFSAncestorIndex]]).
+ module->set_dfs_ancestor_index(
+ std::min(module->dfs_ancestor_index(),
+ required_module->dfs_ancestor_index()));
+ } else {
+ // iv. Otherwise,
+ // 1. Set requiredModule to GetAsyncCycleRoot(requiredModule).
+ required_module = GetAsyncCycleRoot(isolate, required_module);
+
+ // 2. Assert: requiredModule.[[Status]] is "evaluated".
+ CHECK_GE(required_module->status(), kEvaluated);
+
+ // 3. If requiredModule.[[EvaluationError]] is not undefined,
+ // return module.[[EvaluationError]].
+ // (If there was an exception on the original required module
+ // we would have already returned. This check handles the case
+ // where the AsyncCycleRoot has an error. Instead of returning
+ // the exception, we throw on isolate and return a
+ // MaybeHandle<Object>)
+ if (required_module->status() == kErrored) {
+ isolate->Throw(required_module->exception());
+ return MaybeHandle<Object>();
+ }
+ }
+ // v. If requiredModule.[[AsyncEvaluating]] is true, then
+ if (required_module->async_evaluating()) {
+ // 1. Set module.[[PendingAsyncDependencies]] to
+ // module.[[PendingAsyncDependencies]] + 1.
+ module->IncrementPendingAsyncDependencies();
+
+ // 2. Append module to requiredModule.[[AsyncParentModules]].
+ required_module->AddAsyncParentModule(isolate, module);
+ }
+ } else {
+ RETURN_ON_EXCEPTION(isolate, Module::Evaluate(isolate, requested_module),
+ Object);
}
}
- // Evaluation of module body.
- Handle<JSFunction> resume(
- isolate->native_context()->generator_next_internal(), isolate);
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result, Execution::Call(isolate, resume, generator, 0, nullptr),
- Object);
- DCHECK(JSIteratorResult::cast(*result).done().BooleanValue(isolate));
+ // The spec returns the module index for proper numbering of dependencies.
+ // However, we pass the module index by pointer instead.
+ //
+ // Before async modules v8 returned the value result from calling next
+ // on the module's implicit iterator. We preserve this behavior for
+ // synchronous modules, but return undefined for AsyncModules.
+ Handle<Object> result = isolate->factory()->undefined_value();
+
+ // 14. If module.[[PendingAsyncDependencies]] is > 0, set
+ // module.[[AsyncEvaluating]] to true.
+ if (module->HasPendingAsyncDependencies()) {
+ module->set_async_evaluating(true);
+ } else if (module->async()) {
+ // 15. Otherwise, if module.[[Async]] is true,
+ // perform ! ExecuteAsyncModule(module).
+ SourceTextModule::ExecuteAsyncModule(isolate, module);
+ } else {
+ // 16. Otherwise, perform ? module.ExecuteModule().
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, result, ExecuteModule(isolate, module),
+ Object);
+ }
CHECK(MaybeTransitionComponent(isolate, module, stack, kEvaluated));
- return handle(JSIteratorResult::cast(*result).value(), isolate);
+ return result;
+}
+
+Handle<SourceTextModule> SourceTextModule::GetAsyncCycleRoot(
+ Isolate* isolate, Handle<SourceTextModule> module) {
+ // 1. Assert: module.[[Status]] is "evaluated".
+ CHECK_GE(module->status(), kEvaluated);
+
+ // 2. If module.[[AsyncParentModules]] is an empty List, return module.
+ if (module->AsyncParentModuleCount() == 0) {
+ return module;
+ }
+
+ // 3. Repeat, while module.[[DFSIndex]] is greater than
+ // module.[[DFSAncestorIndex]],
+ while (module->dfs_index() > module->dfs_ancestor_index()) {
+ // a. Assert: module.[[AsyncParentModules]] is a non-empty List.
+ DCHECK_GT(module->AsyncParentModuleCount(), 0);
+
+ // b. Let nextCycleModule be the first element of
+ // module.[[AsyncParentModules]].
+ Handle<SourceTextModule> next_cycle_module =
+ module->GetAsyncParentModule(isolate, 0);
+
+ // c. Assert: nextCycleModule.[[DFSAncestorIndex]] is less than or equal
+ // to module.[[DFSAncestorIndex]].
+ DCHECK_LE(next_cycle_module->dfs_ancestor_index(),
+ module->dfs_ancestor_index());
+
+ // d. Set module to nextCycleModule
+ module = next_cycle_module;
+ }
+
+ // 4. Assert: module.[[DFSIndex]] is equal to module.[[DFSAncestorIndex]].
+ DCHECK_EQ(module->dfs_index(), module->dfs_ancestor_index());
+
+ // 5. Return module.
+ return module;
}
void SourceTextModule::Reset(Isolate* isolate,
diff --git a/deps/v8/src/objects/source-text-module.h b/deps/v8/src/objects/source-text-module.h
index e6cf260e10..f1387635d0 100644
--- a/deps/v8/src/objects/source-text-module.h
+++ b/deps/v8/src/objects/source-text-module.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_SOURCE_TEXT_MODULE_H_
#include "src/objects/module.h"
+#include "src/objects/promise.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -28,6 +29,10 @@ class SourceTextModule
// kErrored.
SharedFunctionInfo GetSharedFunctionInfo() const;
+ // Whether or not this module is an async module. Set during module creation
+ // and does not change afterwards.
+ DECL_BOOLEAN_ACCESSORS(async)
+
// Get the SourceTextModuleInfo associated with the code.
inline SourceTextModuleInfo info() const;
@@ -41,6 +46,14 @@ class SourceTextModule
static int ImportIndex(int cell_index);
static int ExportIndex(int cell_index);
+ // Used by builtins to fulfill or reject the promise associated
+ // with async SourceTextModules.
+ static void AsyncModuleExecutionFulfilled(Isolate* isolate,
+ Handle<SourceTextModule> module);
+ static void AsyncModuleExecutionRejected(Isolate* isolate,
+ Handle<SourceTextModule> module,
+ Handle<Object> exception);
+
// Get the namespace object for [module_request] of [module]. If it doesn't
// exist yet, it is created.
static Handle<JSModuleNamespace> GetModuleNamespace(
@@ -54,12 +67,54 @@ class SourceTextModule
friend class Factory;
friend class Module;
+ // Appends a tuple of module and generator to the async parent modules
+ // ArrayList.
+ inline void AddAsyncParentModule(Isolate* isolate,
+ Handle<SourceTextModule> module);
+
+ // Returns a SourceTextModule, the
+ // ith parent in depth first traversal order of a given async child.
+ inline Handle<SourceTextModule> GetAsyncParentModule(Isolate* isolate,
+ int index);
+
+ // Returns the number of async parent modules for a given async child.
+ inline int AsyncParentModuleCount();
+
+ inline bool HasPendingAsyncDependencies();
+ inline void IncrementPendingAsyncDependencies();
+ inline void DecrementPendingAsyncDependencies();
+
// TODO(neis): Don't store those in the module object?
DECL_INT_ACCESSORS(dfs_index)
DECL_INT_ACCESSORS(dfs_ancestor_index)
- // Helpers for Instantiate and Evaluate.
+ // Storage for boolean flags.
+ DECL_INT_ACCESSORS(flags)
+
+ // Bits for flags.
+ static const int kAsyncBit = 0;
+ static const int kAsyncEvaluatingBit = 1;
+
+ // async_evaluating, top_level_capability, pending_async_dependencies, and
+ // async_parent_modules are used exclusively during evaluation of async
+ // modules and the modules which depend on them.
+ //
+ // Whether or not this module is async and evaluating or currently evaluating
+ // an async child.
+ DECL_BOOLEAN_ACCESSORS(async_evaluating)
+
+ // The top level promise capability of this module. Will only be defined
+ // for cycle roots.
+ DECL_ACCESSORS(top_level_capability, HeapObject)
+
+ // The number of currently evaluating async dependencies of this module.
+ DECL_INT_ACCESSORS(pending_async_dependencies)
+
+ // The parent modules of a given async dependency, use async_parent_modules()
+ // to retrieve the ArrayList representation.
+ DECL_ACCESSORS(async_parent_modules, ArrayList)
+ // Helpers for Instantiate and Evaluate.
static void CreateExport(Isolate* isolate, Handle<SourceTextModule> module,
int cell_index, Handle<FixedArray> names);
static void CreateIndirectExport(Isolate* isolate,
@@ -95,7 +150,16 @@ class SourceTextModule
Handle<SourceTextModule> module, Zone* zone,
UnorderedModuleSet* visited);
+ // Implementation of spec concrete method Evaluate.
+ static V8_WARN_UNUSED_RESULT MaybeHandle<Object> EvaluateMaybeAsync(
+ Isolate* isolate, Handle<SourceTextModule> module);
+
+ // Continued implementation of spec concrete method Evaluate.
static V8_WARN_UNUSED_RESULT MaybeHandle<Object> Evaluate(
+ Isolate* isolate, Handle<SourceTextModule> module);
+
+ // Implementation of spec abstract operation InnerModuleEvaluation.
+ static V8_WARN_UNUSED_RESULT MaybeHandle<Object> InnerModuleEvaluation(
Isolate* isolate, Handle<SourceTextModule> module,
ZoneForwardList<Handle<SourceTextModule>>* stack, unsigned* dfs_index);
@@ -103,6 +167,24 @@ class SourceTextModule
Isolate* isolate, Handle<SourceTextModule> module,
ZoneForwardList<Handle<SourceTextModule>>* stack, Status new_status);
+ // Implementation of spec GetAsyncCycleRoot.
+ static V8_WARN_UNUSED_RESULT Handle<SourceTextModule> GetAsyncCycleRoot(
+ Isolate* isolate, Handle<SourceTextModule> module);
+
+ // Implementation of spec ExecuteModule is broken up into
+ // InnerExecuteAsyncModule for asynchronous modules and ExecuteModule
+ // for synchronous modules.
+ static V8_WARN_UNUSED_RESULT MaybeHandle<Object> InnerExecuteAsyncModule(
+ Isolate* isolate, Handle<SourceTextModule> module,
+ Handle<JSPromise> capability);
+
+ static V8_WARN_UNUSED_RESULT MaybeHandle<Object> ExecuteModule(
+ Isolate* isolate, Handle<SourceTextModule> module);
+
+ // Implementation of spec ExecuteAsyncModule.
+ static void ExecuteAsyncModule(Isolate* isolate,
+ Handle<SourceTextModule> module);
+
static void Reset(Isolate* isolate, Handle<SourceTextModule> module);
TQ_OBJECT_CONSTRUCTORS(SourceTextModule)
@@ -169,9 +251,10 @@ class SourceTextModuleInfoEntry
DECL_INT_ACCESSORS(end_pos)
static Handle<SourceTextModuleInfoEntry> New(
- Isolate* isolate, Handle<HeapObject> export_name,
- Handle<HeapObject> local_name, Handle<HeapObject> import_name,
- int module_request, int cell_index, int beg_pos, int end_pos);
+ Isolate* isolate, Handle<PrimitiveHeapObject> export_name,
+ Handle<PrimitiveHeapObject> local_name,
+ Handle<PrimitiveHeapObject> import_name, int module_request,
+ int cell_index, int beg_pos, int end_pos);
TQ_OBJECT_CONSTRUCTORS(SourceTextModuleInfoEntry)
};
diff --git a/deps/v8/src/objects/stack-frame-info.cc b/deps/v8/src/objects/stack-frame-info.cc
index 323c4b8fcb..040c6f7b32 100644
--- a/deps/v8/src/objects/stack-frame-info.cc
+++ b/deps/v8/src/objects/stack-frame-info.cc
@@ -299,10 +299,8 @@ void AppendMethodCall(Isolate* isolate, Handle<StackTraceFrame> frame,
}
}
-void SerializeJSStackFrame(
- Isolate* isolate, Handle<StackTraceFrame> frame,
- IncrementalStringBuilder& builder // NOLINT(runtime/references)
-) {
+void SerializeJSStackFrame(Isolate* isolate, Handle<StackTraceFrame> frame,
+ IncrementalStringBuilder* builder) {
Handle<Object> function_name = StackTraceFrame::GetFunctionName(frame);
const bool is_toplevel = StackTraceFrame::IsToplevel(frame);
@@ -316,96 +314,91 @@ void SerializeJSStackFrame(
const bool is_method_call = !(is_toplevel || is_constructor);
if (is_async) {
- builder.AppendCString("async ");
+ builder->AppendCString("async ");
}
if (is_promise_all) {
- builder.AppendCString("Promise.all (index ");
- builder.AppendInt(StackTraceFrame::GetPromiseAllIndex(frame));
- builder.AppendCString(")");
+ builder->AppendCString("Promise.all (index ");
+ builder->AppendInt(StackTraceFrame::GetPromiseAllIndex(frame));
+ builder->AppendCString(")");
return;
}
if (is_method_call) {
- AppendMethodCall(isolate, frame, &builder);
+ AppendMethodCall(isolate, frame, builder);
} else if (is_constructor) {
- builder.AppendCString("new ");
+ builder->AppendCString("new ");
if (IsNonEmptyString(function_name)) {
- builder.AppendString(Handle<String>::cast(function_name));
+ builder->AppendString(Handle<String>::cast(function_name));
} else {
- builder.AppendCString("<anonymous>");
+ builder->AppendCString("<anonymous>");
}
} else if (IsNonEmptyString(function_name)) {
- builder.AppendString(Handle<String>::cast(function_name));
+ builder->AppendString(Handle<String>::cast(function_name));
} else {
- AppendFileLocation(isolate, frame, &builder);
+ AppendFileLocation(isolate, frame, builder);
return;
}
- builder.AppendCString(" (");
- AppendFileLocation(isolate, frame, &builder);
- builder.AppendCString(")");
+ builder->AppendCString(" (");
+ AppendFileLocation(isolate, frame, builder);
+ builder->AppendCString(")");
}
-void SerializeAsmJsWasmStackFrame(
- Isolate* isolate, Handle<StackTraceFrame> frame,
- IncrementalStringBuilder& builder // NOLINT(runtime/references)
-) {
+void SerializeAsmJsWasmStackFrame(Isolate* isolate,
+ Handle<StackTraceFrame> frame,
+ IncrementalStringBuilder* builder) {
// The string should look exactly as the respective javascript frame string.
// Keep this method in line to
// JSStackFrame::ToString(IncrementalStringBuilder&).
Handle<Object> function_name = StackTraceFrame::GetFunctionName(frame);
if (IsNonEmptyString(function_name)) {
- builder.AppendString(Handle<String>::cast(function_name));
- builder.AppendCString(" (");
+ builder->AppendString(Handle<String>::cast(function_name));
+ builder->AppendCString(" (");
}
- AppendFileLocation(isolate, frame, &builder);
+ AppendFileLocation(isolate, frame, builder);
- if (IsNonEmptyString(function_name)) builder.AppendCString(")");
+ if (IsNonEmptyString(function_name)) builder->AppendCString(")");
return;
}
-void SerializeWasmStackFrame(
- Isolate* isolate, Handle<StackTraceFrame> frame,
- IncrementalStringBuilder& builder // NOLINT(runtime/references)
-) {
+void SerializeWasmStackFrame(Isolate* isolate, Handle<StackTraceFrame> frame,
+ IncrementalStringBuilder* builder) {
Handle<Object> module_name = StackTraceFrame::GetWasmModuleName(frame);
Handle<Object> function_name = StackTraceFrame::GetFunctionName(frame);
const bool has_name = !module_name->IsNull() || !function_name->IsNull();
if (has_name) {
if (module_name->IsNull()) {
- builder.AppendString(Handle<String>::cast(function_name));
+ builder->AppendString(Handle<String>::cast(function_name));
} else {
- builder.AppendString(Handle<String>::cast(module_name));
+ builder->AppendString(Handle<String>::cast(module_name));
if (!function_name->IsNull()) {
- builder.AppendCString(".");
- builder.AppendString(Handle<String>::cast(function_name));
+ builder->AppendCString(".");
+ builder->AppendString(Handle<String>::cast(function_name));
}
}
- builder.AppendCString(" (");
+ builder->AppendCString(" (");
}
const int wasm_func_index = StackTraceFrame::GetLineNumber(frame);
- builder.AppendCString("wasm-function[");
- builder.AppendInt(wasm_func_index);
- builder.AppendCString("]:");
+ builder->AppendCString("wasm-function[");
+ builder->AppendInt(wasm_func_index);
+ builder->AppendCString("]:");
char buffer[16];
SNPrintF(ArrayVector(buffer), "0x%x",
StackTraceFrame::GetColumnNumber(frame));
- builder.AppendCString(buffer);
+ builder->AppendCString(buffer);
- if (has_name) builder.AppendCString(")");
+ if (has_name) builder->AppendCString(")");
}
} // namespace
-void SerializeStackTraceFrame(
- Isolate* isolate, Handle<StackTraceFrame> frame,
- IncrementalStringBuilder& builder // NOLINT(runtime/references)
-) {
+void SerializeStackTraceFrame(Isolate* isolate, Handle<StackTraceFrame> frame,
+ IncrementalStringBuilder* builder) {
// Ordering here is important, as AsmJs frames are also marked as Wasm.
if (StackTraceFrame::IsAsmJsWasm(frame)) {
SerializeAsmJsWasmStackFrame(isolate, frame, builder);
@@ -419,7 +412,7 @@ void SerializeStackTraceFrame(
MaybeHandle<String> SerializeStackTraceFrame(Isolate* isolate,
Handle<StackTraceFrame> frame) {
IncrementalStringBuilder builder(isolate);
- SerializeStackTraceFrame(isolate, frame, builder);
+ SerializeStackTraceFrame(isolate, frame, &builder);
return builder.Finish();
}
diff --git a/deps/v8/src/objects/stack-frame-info.h b/deps/v8/src/objects/stack-frame-info.h
index 7c4918a3c6..54b64b6118 100644
--- a/deps/v8/src/objects/stack-frame-info.h
+++ b/deps/v8/src/objects/stack-frame-info.h
@@ -124,10 +124,8 @@ Handle<FrameArray> GetFrameArrayFromStackTrace(Isolate* isolate,
Handle<FixedArray> stack_trace);
class IncrementalStringBuilder;
-void SerializeStackTraceFrame(
- Isolate* isolate, Handle<StackTraceFrame> frame,
- IncrementalStringBuilder& builder // NOLINT(runtime/references)
-);
+void SerializeStackTraceFrame(Isolate* isolate, Handle<StackTraceFrame> frame,
+ IncrementalStringBuilder* builder);
MaybeHandle<String> SerializeStackTraceFrame(Isolate* isolate,
Handle<StackTraceFrame> frame);
diff --git a/deps/v8/src/objects/string-inl.h b/deps/v8/src/objects/string-inl.h
index 083928d211..b4aea68cb1 100644
--- a/deps/v8/src/objects/string-inl.h
+++ b/deps/v8/src/objects/string-inl.h
@@ -778,6 +778,14 @@ bool String::AsArrayIndex(uint32_t* index) {
return SlowAsArrayIndex(index);
}
+bool String::AsIntegerIndex(size_t* index) {
+ uint32_t field = hash_field();
+ if (IsHashFieldComputed(field) && (field & kIsNotIntegerIndexMask)) {
+ return false;
+ }
+ return SlowAsIntegerIndex(index);
+}
+
SubStringRange::SubStringRange(String string,
const DisallowHeapAllocation& no_gc, int first,
int length)
diff --git a/deps/v8/src/objects/string.cc b/deps/v8/src/objects/string.cc
index 41de3aef04..a1eb7f4310 100644
--- a/deps/v8/src/objects/string.cc
+++ b/deps/v8/src/objects/string.cc
@@ -113,7 +113,10 @@ void String::MakeThin(Isolate* isolate, String internalized) {
bool has_pointers = StringShape(*this).IsIndirect();
int old_size = this->Size();
- isolate->heap()->NotifyObjectLayoutChange(*this, old_size, no_gc);
+ // Slot invalidation is not necessary here: ThinString only stores tagged
+ // value, so it can't store an untagged value in a recorded slot.
+ isolate->heap()->NotifyObjectLayoutChange(*this, no_gc,
+ InvalidateRecordedSlots::kNo);
bool one_byte = internalized.IsOneByteRepresentation();
Handle<Map> map = one_byte ? isolate->factory()->thin_one_byte_string_map()
: isolate->factory()->thin_string_map();
@@ -158,7 +161,8 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
bool has_pointers = StringShape(*this).IsIndirect();
if (has_pointers) {
- isolate->heap()->NotifyObjectLayoutChange(*this, size, no_allocation);
+ isolate->heap()->NotifyObjectLayoutChange(*this, no_allocation,
+ InvalidateRecordedSlots::kYes);
}
// Morph the string to an external string by replacing the map and
// reinitializing the fields. This won't work if the space the existing
@@ -184,10 +188,6 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
isolate->heap()->CreateFillerObjectAt(
this->address() + new_size, size - new_size,
has_pointers ? ClearRecordedSlots::kYes : ClearRecordedSlots::kNo);
- if (has_pointers) {
- isolate->heap()->ClearRecordedSlotRange(this->address(),
- this->address() + new_size);
- }
// We are storing the new map using release store after creating a filler for
// the left-over space to avoid races with the sweeper thread.
@@ -232,7 +232,8 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
bool has_pointers = StringShape(*this).IsIndirect();
if (has_pointers) {
- isolate->heap()->NotifyObjectLayoutChange(*this, size, no_allocation);
+ isolate->heap()->NotifyObjectLayoutChange(*this, no_allocation,
+ InvalidateRecordedSlots::kYes);
}
// Morph the string to an external string by replacing the map and
// reinitializing the fields. This won't work if the space the existing
@@ -257,10 +258,6 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
isolate->heap()->CreateFillerObjectAt(
this->address() + new_size, size - new_size,
has_pointers ? ClearRecordedSlots::kYes : ClearRecordedSlots::kNo);
- if (has_pointers) {
- isolate->heap()->ClearRecordedSlotRange(this->address(),
- this->address() + new_size);
- }
// We are storing the new map using release store after creating a filler for
// the left-over space to avoid races with the sweeper thread.
@@ -598,9 +595,8 @@ void String::WriteToFlat(String src, sinkchar* sink, int f, int t) {
String source = src;
int from = f;
int to = t;
- while (true) {
+ while (from < to) {
DCHECK_LE(0, from);
- DCHECK_LE(from, to);
DCHECK_LE(to, source.length());
switch (StringShape(source).full_representation_tag()) {
case kOneByteStringTag | kExternalStringTag: {
@@ -678,6 +674,7 @@ void String::WriteToFlat(String src, sinkchar* sink, int f, int t) {
break;
}
}
+ DCHECK_EQ(from, to);
}
template <typename SourceChar>
@@ -1358,25 +1355,39 @@ uint32_t String::ComputeAndSetHash() {
return result;
}
-bool String::ComputeArrayIndex(uint32_t* index) {
+bool String::SlowAsArrayIndex(uint32_t* index) {
+ DisallowHeapAllocation no_gc;
int length = this->length();
+ if (length <= kMaxCachedArrayIndexLength) {
+ Hash(); // Force computation of hash code.
+ uint32_t field = hash_field();
+ if ((field & kIsNotArrayIndexMask) != 0) return false;
+ *index = ArrayIndexValueBits::decode(field);
+ return true;
+ }
if (length == 0 || length > kMaxArrayIndexSize) return false;
StringCharacterStream stream(*this);
return StringToArrayIndex(&stream, index);
}
-bool String::SlowAsArrayIndex(uint32_t* index) {
+bool String::SlowAsIntegerIndex(size_t* index) {
DisallowHeapAllocation no_gc;
- if (length() <= kMaxCachedArrayIndexLength) {
- Hash(); // force computation of hash code
+ int length = this->length();
+ if (length <= kMaxCachedArrayIndexLength) {
+ Hash(); // Force computation of hash code.
uint32_t field = hash_field();
- if ((field & kIsNotArrayIndexMask) != 0) return false;
- // Isolate the array index form the full hash field.
+ if ((field & kIsNotArrayIndexMask) != 0) {
+ // If it was short but it's not an array index, then it can't be an
+ // integer index either.
+ DCHECK_NE(0, field & kIsNotIntegerIndexMask);
+ return false;
+ }
*index = ArrayIndexValueBits::decode(field);
return true;
- } else {
- return ComputeArrayIndex(index);
}
+ if (length == 0 || length > kMaxIntegerIndexSize) return false;
+ StringCharacterStream stream(*this);
+ return StringToArrayIndex(&stream, index);
}
void String::PrintOn(FILE* file) {
diff --git a/deps/v8/src/objects/string.h b/deps/v8/src/objects/string.h
index 27bd7e8765..fcdf75a968 100644
--- a/deps/v8/src/objects/string.h
+++ b/deps/v8/src/objects/string.h
@@ -5,6 +5,8 @@
#ifndef V8_OBJECTS_STRING_H_
#define V8_OBJECTS_STRING_H_
+#include <memory>
+
#include "src/base/bits.h"
#include "src/base/export-template.h"
#include "src/objects/instance-type.h"
@@ -306,8 +308,6 @@ class String : public TorqueGeneratedString<String, Name> {
RobustnessFlag robustness_flag = FAST_STRING_TRAVERSAL,
int* length_output = nullptr);
- bool ComputeArrayIndex(uint32_t* index);
-
// Externalization.
V8_EXPORT_PRIVATE bool MakeExternal(
v8::String::ExternalStringResource* resource);
@@ -316,8 +316,12 @@ class String : public TorqueGeneratedString<String, Name> {
bool SupportsExternalization();
// Conversion.
+ // "array index": an index allowed by the ES spec for JSArrays.
inline bool AsArrayIndex(uint32_t* index);
uint32_t inline ToValidIndex(Object number);
+ // "integer index": the string is the decimal representation of an
+ // integer in the range of a size_t. Useful for TypedArray accesses.
+ inline bool AsIntegerIndex(size_t* index);
// Trimming.
enum TrimMode { kTrim, kTrimStart, kTrimEnd };
@@ -448,6 +452,7 @@ class String : public TorqueGeneratedString<String, Name> {
// Slow case of AsArrayIndex.
V8_EXPORT_PRIVATE bool SlowAsArrayIndex(uint32_t* index);
+ V8_EXPORT_PRIVATE bool SlowAsIntegerIndex(size_t* index);
// Compute and set the hash code.
V8_EXPORT_PRIVATE uint32_t ComputeAndSetHash();
diff --git a/deps/v8/src/objects/struct-inl.h b/deps/v8/src/objects/struct-inl.h
index af0fed126b..34de889786 100644
--- a/deps/v8/src/objects/struct-inl.h
+++ b/deps/v8/src/objects/struct-inl.h
@@ -22,12 +22,10 @@ namespace internal {
TQ_OBJECT_CONSTRUCTORS_IMPL(Struct)
TQ_OBJECT_CONSTRUCTORS_IMPL(Tuple2)
TQ_OBJECT_CONSTRUCTORS_IMPL(Tuple3)
-OBJECT_CONSTRUCTORS_IMPL(AccessorPair, Struct)
+TQ_OBJECT_CONSTRUCTORS_IMPL(AccessorPair)
TQ_OBJECT_CONSTRUCTORS_IMPL(ClassPositions)
-CAST_ACCESSOR(AccessorPair)
-
void Struct::InitializeBody(int object_size) {
Object value = GetReadOnlyRoots().undefined_value();
for (int offset = kHeaderSize; offset < object_size; offset += kTaggedSize) {
@@ -35,9 +33,6 @@ void Struct::InitializeBody(int object_size) {
}
}
-ACCESSORS(AccessorPair, getter, Object, kGetterOffset)
-ACCESSORS(AccessorPair, setter, Object, kSetterOffset)
-
TQ_SMI_ACCESSORS(ClassPositions, start)
TQ_SMI_ACCESSORS(ClassPositions, end)
diff --git a/deps/v8/src/objects/struct.h b/deps/v8/src/objects/struct.h
index c9372d9ada..f786c4711a 100644
--- a/deps/v8/src/objects/struct.h
+++ b/deps/v8/src/objects/struct.h
@@ -16,12 +16,13 @@ namespace v8 {
namespace internal {
// An abstract superclass, a marker class really, for simple structure classes.
-// It doesn't carry much functionality but allows struct classes to be
+// It doesn't carry any functionality but allows struct classes to be
// identified in the type system.
class Struct : public TorqueGeneratedStruct<Struct, HeapObject> {
public:
inline void InitializeBody(int object_size);
void BriefPrintDetails(std::ostream& os);
+ STATIC_ASSERT(kHeaderSize == HeapObject::kHeaderSize);
TQ_OBJECT_CONSTRUCTORS(Struct)
};
@@ -46,13 +47,8 @@ class Tuple3 : public TorqueGeneratedTuple3<Tuple3, Tuple2> {
// * a FunctionTemplateInfo: a real (lazy) accessor
// * undefined: considered an accessor by the spec, too, strangely enough
// * null: an accessor which has not been set
-class AccessorPair : public Struct {
+class AccessorPair : public TorqueGeneratedAccessorPair<AccessorPair, Struct> {
public:
- DECL_ACCESSORS(getter, Object)
- DECL_ACCESSORS(setter, Object)
-
- DECL_CAST(AccessorPair)
-
static Handle<AccessorPair> Copy(Isolate* isolate, Handle<AccessorPair> pair);
inline Object get(AccessorComponent component);
@@ -71,13 +67,8 @@ class AccessorPair : public Struct {
// Dispatched behavior.
DECL_PRINTER(AccessorPair)
- DECL_VERIFIER(AccessorPair)
-
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- TORQUE_GENERATED_ACCESSOR_PAIR_FIELDS)
- OBJECT_CONSTRUCTORS(AccessorPair, Struct);
+ TQ_OBJECT_CONSTRUCTORS(AccessorPair)
};
class ClassPositions
diff --git a/deps/v8/src/objects/synthetic-module.cc b/deps/v8/src/objects/synthetic-module.cc
index 850721ac99..58e0c1b58c 100644
--- a/deps/v8/src/objects/synthetic-module.cc
+++ b/deps/v8/src/objects/synthetic-module.cc
@@ -17,36 +17,16 @@ namespace internal {
// Implements SetSyntheticModuleBinding:
// https://heycam.github.io/webidl/#setsyntheticmoduleexport
-Maybe<bool> SyntheticModule::SetExport(Isolate* isolate,
- Handle<SyntheticModule> module,
- Handle<String> export_name,
- Handle<Object> export_value) {
+void SyntheticModule::SetExport(Isolate* isolate,
+ Handle<SyntheticModule> module,
+ Handle<String> export_name,
+ Handle<Object> export_value) {
Handle<ObjectHashTable> exports(module->exports(), isolate);
Handle<Object> export_object(exports->Lookup(export_name), isolate);
-
- if (!export_object->IsCell()) {
- isolate->Throw(*isolate->factory()->NewReferenceError(
- MessageTemplate::kModuleExportUndefined, export_name));
- return Nothing<bool>();
- }
-
+ CHECK(export_object->IsCell());
Handle<Cell> export_cell(Handle<Cell>::cast(export_object));
// Spec step 2: Set the mutable binding of export_name to export_value
export_cell->set_value(*export_value);
-
- return Just(true);
-}
-
-void SyntheticModule::SetExportStrict(Isolate* isolate,
- Handle<SyntheticModule> module,
- Handle<String> export_name,
- Handle<Object> export_value) {
- Handle<ObjectHashTable> exports(module->exports(), isolate);
- Handle<Object> export_object(exports->Lookup(export_name), isolate);
- CHECK(export_object->IsCell());
- Maybe<bool> set_export_result =
- SetExport(isolate, module, export_name, export_value);
- CHECK(set_export_result.FromJust());
}
// Implements Synthetic Module Record's ResolveExport concrete method:
@@ -116,7 +96,7 @@ MaybeHandle<Object> SyntheticModule::Evaluate(Isolate* isolate,
Utils::ToLocal(Handle<Module>::cast(module)))
.ToLocal(&result)) {
isolate->PromoteScheduledException();
- module->RecordError(isolate);
+ module->RecordErrorUsingPendingException(isolate);
return MaybeHandle<Object>();
}
diff --git a/deps/v8/src/objects/synthetic-module.h b/deps/v8/src/objects/synthetic-module.h
index 77a6eed276..6f3bb0438e 100644
--- a/deps/v8/src/objects/synthetic-module.h
+++ b/deps/v8/src/objects/synthetic-module.h
@@ -24,21 +24,9 @@ class SyntheticModule
DECL_VERIFIER(SyntheticModule)
DECL_PRINTER(SyntheticModule)
- // Set module's exported value for the specified export_name to the specified
- // export_value. An error will be thrown if export_name is not one
- // of the export_names that were supplied during module construction.
- // Returns Just(true) on success, Nothing<bool>() if an error was thrown.
- static Maybe<bool> SetExport(Isolate* isolate, Handle<SyntheticModule> module,
- Handle<String> export_name,
- Handle<Object> export_value);
- // The following redundant method should be deleted when the deprecated
- // version of v8::SetSyntheticModuleExport is removed. It differs from
- // SetExport in that it crashes rather than throwing an error if the caller
- // attempts to set an export_name that was not present during construction of
- // the module.
- static void SetExportStrict(Isolate* isolate, Handle<SyntheticModule> module,
- Handle<String> export_name,
- Handle<Object> export_value);
+ static void SetExport(Isolate* isolate, Handle<SyntheticModule> module,
+ Handle<String> export_name,
+ Handle<Object> export_value);
using BodyDescriptor = SubclassBodyDescriptor<
Module::BodyDescriptor,
diff --git a/deps/v8/src/objects/transitions-inl.h b/deps/v8/src/objects/transitions-inl.h
index 048774f49b..5694d66d94 100644
--- a/deps/v8/src/objects/transitions-inl.h
+++ b/deps/v8/src/objects/transitions-inl.h
@@ -64,6 +64,10 @@ Name TransitionArray::GetKey(int transition_number) {
Get(ToKeyIndex(transition_number))->GetHeapObjectAssumeStrong());
}
+Name TransitionArray::GetKey(InternalIndex index) {
+ return GetKey(index.as_int());
+}
+
Name TransitionsAccessor::GetKey(int transition_number) {
switch (encoding()) {
case kPrototypeInfo:
@@ -95,7 +99,7 @@ HeapObjectSlot TransitionArray::GetTargetSlot(int transition_number) {
// static
PropertyDetails TransitionsAccessor::GetTargetDetails(Name name, Map target) {
DCHECK(!IsSpecialTransition(name.GetReadOnlyRoots(), name));
- int descriptor = target.LastAdded();
+ InternalIndex descriptor = target.LastAdded();
DescriptorArray descriptors = target.instance_descriptors();
// Transitions are allowed only for the last added property.
DCHECK(descriptors.GetKey(descriptor).Equals(name));
@@ -108,7 +112,7 @@ PropertyDetails TransitionsAccessor::GetSimpleTargetDetails(Map transition) {
// static
Name TransitionsAccessor::GetSimpleTransitionKey(Map transition) {
- int descriptor = transition.LastAdded();
+ InternalIndex descriptor = transition.LastAdded();
return transition.instance_descriptors().GetKey(descriptor);
}
diff --git a/deps/v8/src/objects/transitions.cc b/deps/v8/src/objects/transitions.cc
index 843b790b7d..e0ba40ce7d 100644
--- a/deps/v8/src/objects/transitions.cc
+++ b/deps/v8/src/objects/transitions.cc
@@ -247,7 +247,7 @@ bool TransitionsAccessor::CanHaveMoreTransitions() {
bool TransitionsAccessor::IsMatchingMap(Map target, Name name,
PropertyKind kind,
PropertyAttributes attributes) {
- int descriptor = target.LastAdded();
+ InternalIndex descriptor = target.LastAdded();
DescriptorArray descriptors = target.instance_descriptors();
Name key = descriptors.GetKey(descriptor);
if (key != name) return false;
@@ -296,8 +296,7 @@ Handle<WeakFixedArray> TransitionArray::GrowPrototypeTransitionArray(
new_capacity = Min(kMaxCachedPrototypeTransitions, new_capacity);
DCHECK_GT(new_capacity, capacity);
int grow_by = new_capacity - capacity;
- array = isolate->factory()->CopyWeakFixedArrayAndGrow(array, grow_by,
- AllocationType::kOld);
+ array = isolate->factory()->CopyWeakFixedArrayAndGrow(array, grow_by);
if (capacity < 0) {
// There was no prototype transitions array before, so the size
// couldn't be copied. Initialize it explicitly.
diff --git a/deps/v8/src/objects/transitions.h b/deps/v8/src/objects/transitions.h
index f21e8cd54e..5a7db13e51 100644
--- a/deps/v8/src/objects/transitions.h
+++ b/deps/v8/src/objects/transitions.h
@@ -221,6 +221,7 @@ class TransitionArray : public WeakFixedArray {
Map* target);
// Required for templatized Search interface.
+ inline Name GetKey(InternalIndex index);
static constexpr int kNotFound = -1;
inline Name GetSortedKey(int transition_number);
diff --git a/deps/v8/src/objects/value-serializer.cc b/deps/v8/src/objects/value-serializer.cc
index 3b3506fbb9..af5cdc57ea 100644
--- a/deps/v8/src/objects/value-serializer.cc
+++ b/deps/v8/src/objects/value-serializer.cc
@@ -52,8 +52,6 @@ static const uint32_t kLatestVersion = 13;
static_assert(kLatestVersion == v8::CurrentValueSerializerFormatVersion(),
"Exported format version must match latest version.");
-static const int kPretenureThreshold = 100 * KB;
-
template <typename T>
static size_t BytesNeededForVarint(T value) {
static_assert(std::is_integral<T>::value && std::is_unsigned<T>::value,
@@ -554,7 +552,7 @@ Maybe<bool> ValueSerializer::WriteJSReceiver(Handle<JSReceiver> receiver) {
case JS_PRIMITIVE_WRAPPER_TYPE:
return WriteJSPrimitiveWrapper(
Handle<JSPrimitiveWrapper>::cast(receiver));
- case JS_REGEXP_TYPE:
+ case JS_REG_EXP_TYPE:
WriteJSRegExp(JSRegExp::cast(*receiver));
return ThrowIfOutOfMemory();
case JS_MAP_TYPE:
@@ -568,7 +566,7 @@ Maybe<bool> ValueSerializer::WriteJSReceiver(Handle<JSReceiver> receiver) {
return WriteJSArrayBufferView(JSArrayBufferView::cast(*receiver));
case JS_ERROR_TYPE:
return WriteJSError(Handle<JSObject>::cast(receiver));
- case WASM_MODULE_TYPE: {
+ case WASM_MODULE_OBJECT_TYPE: {
auto enabled_features = wasm::WasmFeaturesFromIsolate(isolate_);
if (!FLAG_wasm_disable_structured_cloning || enabled_features.threads) {
// Only write WebAssembly modules if not disabled by a flag.
@@ -576,7 +574,7 @@ Maybe<bool> ValueSerializer::WriteJSReceiver(Handle<JSReceiver> receiver) {
}
break;
}
- case WASM_MEMORY_TYPE: {
+ case WASM_MEMORY_OBJECT_TYPE: {
auto enabled_features = wasm::WasmFeaturesFromIsolate(isolate_);
if (enabled_features.threads) {
return WriteWasmMemory(Handle<WasmMemoryObject>::cast(receiver));
@@ -604,7 +602,7 @@ Maybe<bool> ValueSerializer::WriteJSObject(Handle<JSObject> object) {
// map doesn't change.
uint32_t properties_written = 0;
bool map_changed = false;
- for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
+ for (InternalIndex i : map->IterateOwnDescriptors()) {
Handle<Name> key(map->instance_descriptors().GetKey(i), isolate_);
if (!key->IsString()) continue;
PropertyDetails details = map->instance_descriptors().GetDetails(i);
@@ -1025,8 +1023,8 @@ Maybe<bool> ValueSerializer::WriteWasmMemory(Handle<WasmMemoryObject> object) {
return Nothing<bool>();
}
- isolate_->wasm_engine()->memory_tracker()->RegisterWasmMemoryAsShared(
- object, isolate_);
+ GlobalBackingStoreRegistry::Register(
+ object->array_buffer().GetBackingStore());
WriteTag(SerializationTag::kWasmMemoryTransfer);
WriteZigZag<int32_t>(object->maximum_pages());
@@ -1112,8 +1110,6 @@ ValueDeserializer::ValueDeserializer(Isolate* isolate,
delegate_(delegate),
position_(data.begin()),
end_(data.begin() + data.length()),
- allocation_(data.length() > kPretenureThreshold ? AllocationType::kOld
- : AllocationType::kYoung),
id_map_(isolate->global_handles()->Create(
ReadOnlyRoots(isolate_).empty_fixed_array())) {}
@@ -1302,19 +1298,17 @@ MaybeHandle<Object> ValueDeserializer::ReadObjectInternal() {
case SerializationTag::kInt32: {
Maybe<int32_t> number = ReadZigZag<int32_t>();
if (number.IsNothing()) return MaybeHandle<Object>();
- return isolate_->factory()->NewNumberFromInt(number.FromJust(),
- allocation_);
+ return isolate_->factory()->NewNumberFromInt(number.FromJust());
}
case SerializationTag::kUint32: {
Maybe<uint32_t> number = ReadVarint<uint32_t>();
if (number.IsNothing()) return MaybeHandle<Object>();
- return isolate_->factory()->NewNumberFromUint(number.FromJust(),
- allocation_);
+ return isolate_->factory()->NewNumberFromUint(number.FromJust());
}
case SerializationTag::kDouble: {
Maybe<double> number = ReadDouble();
if (number.IsNothing()) return MaybeHandle<Object>();
- return isolate_->factory()->NewNumber(number.FromJust(), allocation_);
+ return isolate_->factory()->NewNumber(number.FromJust());
}
case SerializationTag::kBigInt:
return ReadBigInt();
@@ -1398,8 +1392,7 @@ MaybeHandle<BigInt> ValueDeserializer::ReadBigInt() {
if (!ReadRawBytes(bytelength).To(&digits_storage)) {
return MaybeHandle<BigInt>();
}
- return BigInt::FromSerializedDigits(isolate_, bitfield, digits_storage,
- allocation_);
+ return BigInt::FromSerializedDigits(isolate_, bitfield, digits_storage);
}
MaybeHandle<String> ValueDeserializer::ReadUtf8String() {
@@ -1412,7 +1405,7 @@ MaybeHandle<String> ValueDeserializer::ReadUtf8String() {
return MaybeHandle<String>();
}
return isolate_->factory()->NewStringFromUtf8(
- Vector<const char>::cast(utf8_bytes), allocation_);
+ Vector<const char>::cast(utf8_bytes));
}
MaybeHandle<String> ValueDeserializer::ReadOneByteString() {
@@ -1424,7 +1417,7 @@ MaybeHandle<String> ValueDeserializer::ReadOneByteString() {
!ReadRawBytes(byte_length).To(&bytes)) {
return MaybeHandle<String>();
}
- return isolate_->factory()->NewStringFromOneByte(bytes, allocation_);
+ return isolate_->factory()->NewStringFromOneByte(bytes);
}
MaybeHandle<String> ValueDeserializer::ReadTwoByteString() {
@@ -1443,7 +1436,7 @@ MaybeHandle<String> ValueDeserializer::ReadTwoByteString() {
if (byte_length == 0) return isolate_->factory()->empty_string();
Handle<SeqTwoByteString> string;
if (!isolate_->factory()
- ->NewRawTwoByteString(byte_length / sizeof(uc16), allocation_)
+ ->NewRawTwoByteString(byte_length / sizeof(uc16))
.ToHandle(&string)) {
return MaybeHandle<String>();
}
@@ -1506,8 +1499,8 @@ MaybeHandle<JSObject> ValueDeserializer::ReadJSObject() {
uint32_t id = next_id_++;
HandleScope scope(isolate_);
- Handle<JSObject> object = isolate_->factory()->NewJSObject(
- isolate_->object_function(), allocation_);
+ Handle<JSObject> object =
+ isolate_->factory()->NewJSObject(isolate_->object_function());
AddObjectWithID(id, object);
uint32_t num_properties;
@@ -1532,8 +1525,8 @@ MaybeHandle<JSArray> ValueDeserializer::ReadSparseJSArray() {
uint32_t id = next_id_++;
HandleScope scope(isolate_);
- Handle<JSArray> array = isolate_->factory()->NewJSArray(
- 0, TERMINAL_FAST_ELEMENTS_KIND, allocation_);
+ Handle<JSArray> array =
+ isolate_->factory()->NewJSArray(0, TERMINAL_FAST_ELEMENTS_KIND);
JSArray::SetLength(array, length);
AddObjectWithID(id, array);
@@ -1569,8 +1562,7 @@ MaybeHandle<JSArray> ValueDeserializer::ReadDenseJSArray() {
uint32_t id = next_id_++;
HandleScope scope(isolate_);
Handle<JSArray> array = isolate_->factory()->NewJSArray(
- HOLEY_ELEMENTS, length, length, INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE,
- allocation_);
+ HOLEY_ELEMENTS, length, length, INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
AddObjectWithID(id, array);
Handle<FixedArray> elements(FixedArray::cast(array->elements()), isolate_);
@@ -1631,22 +1623,21 @@ MaybeHandle<JSPrimitiveWrapper> ValueDeserializer::ReadJSPrimitiveWrapper(
Handle<JSPrimitiveWrapper> value;
switch (tag) {
case SerializationTag::kTrueObject:
- value = Handle<JSPrimitiveWrapper>::cast(isolate_->factory()->NewJSObject(
- isolate_->boolean_function(), allocation_));
+ value = Handle<JSPrimitiveWrapper>::cast(
+ isolate_->factory()->NewJSObject(isolate_->boolean_function()));
value->set_value(ReadOnlyRoots(isolate_).true_value());
break;
case SerializationTag::kFalseObject:
- value = Handle<JSPrimitiveWrapper>::cast(isolate_->factory()->NewJSObject(
- isolate_->boolean_function(), allocation_));
+ value = Handle<JSPrimitiveWrapper>::cast(
+ isolate_->factory()->NewJSObject(isolate_->boolean_function()));
value->set_value(ReadOnlyRoots(isolate_).false_value());
break;
case SerializationTag::kNumberObject: {
double number;
if (!ReadDouble().To(&number)) return MaybeHandle<JSPrimitiveWrapper>();
- value = Handle<JSPrimitiveWrapper>::cast(isolate_->factory()->NewJSObject(
- isolate_->number_function(), allocation_));
- Handle<Object> number_object =
- isolate_->factory()->NewNumber(number, allocation_);
+ value = Handle<JSPrimitiveWrapper>::cast(
+ isolate_->factory()->NewJSObject(isolate_->number_function()));
+ Handle<Object> number_object = isolate_->factory()->NewNumber(number);
value->set_value(*number_object);
break;
}
@@ -1654,8 +1645,8 @@ MaybeHandle<JSPrimitiveWrapper> ValueDeserializer::ReadJSPrimitiveWrapper(
Handle<BigInt> bigint;
if (!ReadBigInt().ToHandle(&bigint))
return MaybeHandle<JSPrimitiveWrapper>();
- value = Handle<JSPrimitiveWrapper>::cast(isolate_->factory()->NewJSObject(
- isolate_->bigint_function(), allocation_));
+ value = Handle<JSPrimitiveWrapper>::cast(
+ isolate_->factory()->NewJSObject(isolate_->bigint_function()));
value->set_value(*bigint);
break;
}
@@ -1663,8 +1654,8 @@ MaybeHandle<JSPrimitiveWrapper> ValueDeserializer::ReadJSPrimitiveWrapper(
Handle<String> string;
if (!ReadString().ToHandle(&string))
return MaybeHandle<JSPrimitiveWrapper>();
- value = Handle<JSPrimitiveWrapper>::cast(isolate_->factory()->NewJSObject(
- isolate_->string_function(), allocation_));
+ value = Handle<JSPrimitiveWrapper>::cast(
+ isolate_->factory()->NewJSObject(isolate_->string_function()));
value->set_value(*string);
break;
}
@@ -1801,13 +1792,12 @@ MaybeHandle<JSArrayBuffer> ValueDeserializer::ReadJSArrayBuffer(
byte_length > static_cast<size_t>(end_ - position_)) {
return MaybeHandle<JSArrayBuffer>();
}
- const bool should_initialize = false;
- Handle<JSArrayBuffer> array_buffer = isolate_->factory()->NewJSArrayBuffer(
- SharedFlag::kNotShared, allocation_);
- if (!JSArrayBuffer::SetupAllocatingData(array_buffer, isolate_, byte_length,
- should_initialize)) {
- return MaybeHandle<JSArrayBuffer>();
- }
+ MaybeHandle<JSArrayBuffer> result =
+ isolate_->factory()->NewJSArrayBufferAndBackingStore(
+ byte_length, InitializedFlag::kUninitialized);
+ Handle<JSArrayBuffer> array_buffer;
+ if (!result.ToHandle(&array_buffer)) return result;
+
if (byte_length > 0) {
memcpy(array_buffer->backing_store(), position_, byte_length);
}
@@ -1871,8 +1861,7 @@ MaybeHandle<JSArrayBufferView> ValueDeserializer::ReadJSArrayBufferView(
return MaybeHandle<JSArrayBufferView>();
}
Handle<JSTypedArray> typed_array = isolate_->factory()->NewJSTypedArray(
- external_array_type, buffer, byte_offset, byte_length / element_size,
- allocation_);
+ external_array_type, buffer, byte_offset, byte_length / element_size);
AddObjectWithID(id, typed_array);
return typed_array;
}
@@ -2049,9 +2038,6 @@ MaybeHandle<WasmMemoryObject> ValueDeserializer::ReadWasmMemory() {
Handle<WasmMemoryObject> result =
WasmMemoryObject::New(isolate_, buffer, maximum_pages);
- isolate_->wasm_engine()->memory_tracker()->RegisterWasmMemoryAsShared(
- result, isolate_);
-
AddObjectWithID(id, result);
return result;
}
@@ -2081,9 +2067,10 @@ static void CommitProperties(Handle<JSObject> object, Handle<Map> map,
DisallowHeapAllocation no_gc;
DescriptorArray descriptors = object->map().instance_descriptors();
- for (unsigned i = 0; i < properties.size(); i++) {
+ for (InternalIndex i : InternalIndex::Range(properties.size())) {
// Initializing store.
- object->WriteToField(i, descriptors.GetDetails(i), *properties[i]);
+ object->WriteToField(i, descriptors.GetDetails(i),
+ *properties[i.raw_value()]);
}
}
@@ -2150,7 +2137,7 @@ Maybe<uint32_t> ValueDeserializer::ReadJSObjectProperties(
// (though generalization may be required), store the property value so
// that we can copy them all at once. Otherwise, stop transitioning.
if (transitioning) {
- int descriptor = static_cast<int>(properties.size());
+ InternalIndex descriptor(properties.size());
PropertyDetails details =
target->instance_descriptors().GetDetails(descriptor);
Representation expected_representation = details.representation();
@@ -2316,8 +2303,8 @@ ValueDeserializer::ReadObjectUsingEntireBufferForLegacyFormat() {
size_t begin_properties =
stack.size() - 2 * static_cast<size_t>(num_properties);
- Handle<JSObject> js_object = isolate_->factory()->NewJSObject(
- isolate_->object_function(), allocation_);
+ Handle<JSObject> js_object =
+ isolate_->factory()->NewJSObject(isolate_->object_function());
if (num_properties &&
!SetPropertiesFromKeyValuePairs(
isolate_, js_object, &stack[begin_properties], num_properties)
@@ -2344,8 +2331,8 @@ ValueDeserializer::ReadObjectUsingEntireBufferForLegacyFormat() {
return MaybeHandle<Object>();
}
- Handle<JSArray> js_array = isolate_->factory()->NewJSArray(
- 0, TERMINAL_FAST_ELEMENTS_KIND, allocation_);
+ Handle<JSArray> js_array =
+ isolate_->factory()->NewJSArray(0, TERMINAL_FAST_ELEMENTS_KIND);
JSArray::SetLength(js_array, length);
size_t begin_properties =
stack.size() - 2 * static_cast<size_t>(num_properties);
diff --git a/deps/v8/src/objects/value-serializer.h b/deps/v8/src/objects/value-serializer.h
index cc9bc1caea..839636ceef 100644
--- a/deps/v8/src/objects/value-serializer.h
+++ b/deps/v8/src/objects/value-serializer.h
@@ -298,7 +298,6 @@ class ValueDeserializer {
v8::ValueDeserializer::Delegate* const delegate_;
const uint8_t* position_;
const uint8_t* const end_;
- AllocationType allocation_;
uint32_t version_ = 0;
uint32_t next_id_ = 0;
bool expect_inline_wasm_ = false;
diff --git a/deps/v8/src/parsing/expression-scope-reparenter.cc b/deps/v8/src/parsing/expression-scope-reparenter.cc
index 3f62616ebd..2f4914398f 100644
--- a/deps/v8/src/parsing/expression-scope-reparenter.cc
+++ b/deps/v8/src/parsing/expression-scope-reparenter.cc
@@ -54,7 +54,14 @@ void Reparenter::VisitClassLiteral(ClassLiteral* class_literal) {
#if DEBUG
// The same goes for the rest of the class, but we do some
// sanity checking in debug mode.
- for (ClassLiteralProperty* prop : *class_literal->properties()) {
+ for (ClassLiteralProperty* prop : *class_literal->private_members()) {
+ // No need to visit the values, since all values are functions with
+ // the class scope on their scope chain.
+ DCHECK(prop->value()->IsFunctionLiteral());
+ DCHECK_EQ(prop->value()->AsFunctionLiteral()->scope()->outer_scope(),
+ class_literal->scope());
+ }
+ for (ClassLiteralProperty* prop : *class_literal->public_members()) {
// No need to visit the values, since all values are functions with
// the class scope on their scope chain.
DCHECK(prop->value()->IsFunctionLiteral());
diff --git a/deps/v8/src/parsing/expression-scope.h b/deps/v8/src/parsing/expression-scope.h
index ba931d36da..709231ebb0 100644
--- a/deps/v8/src/parsing/expression-scope.h
+++ b/deps/v8/src/parsing/expression-scope.h
@@ -625,14 +625,8 @@ class AccumulationScope {
if (!scope->CanBeExpression()) return;
scope_ = scope->AsExpressionParsingScope();
for (int i = 0; i < kNumberOfErrors; i++) {
- // If the underlying scope is already invalid at the start, stop
- // accumulating. That means an error was found outside of an
- // accumulating path.
- if (!scope_->is_valid(i)) {
- scope_ = nullptr;
- break;
- }
copy(i);
+ scope_->clear(i);
}
}
diff --git a/deps/v8/src/parsing/parse-info.cc b/deps/v8/src/parsing/parse-info.cc
index e927c1a0d1..b0a455e88d 100644
--- a/deps/v8/src/parsing/parse-info.cc
+++ b/deps/v8/src/parsing/parse-info.cc
@@ -7,7 +7,6 @@
#include "src/ast/ast-source-ranges.h"
#include "src/ast/ast-value-factory.h"
#include "src/ast/ast.h"
-#include "src/base/template-utils.h"
#include "src/compiler-dispatcher/compiler-dispatcher.h"
#include "src/heap/heap-inl.h"
#include "src/logging/counters.h"
@@ -21,7 +20,7 @@ namespace v8 {
namespace internal {
ParseInfo::ParseInfo(AccountingAllocator* zone_allocator)
- : zone_(base::make_unique<Zone>(zone_allocator, ZONE_NAME)),
+ : zone_(std::make_unique<Zone>(zone_allocator, ZONE_NAME)),
flags_(0),
extension_(nullptr),
script_scope_(nullptr),
@@ -66,6 +65,7 @@ ParseInfo::ParseInfo(Isolate* isolate, AccountingAllocator* zone_allocator)
set_allow_harmony_optional_chaining(FLAG_harmony_optional_chaining);
set_allow_harmony_nullish(FLAG_harmony_nullish);
set_allow_harmony_private_methods(FLAG_harmony_private_methods);
+ set_allow_harmony_top_level_await(FLAG_harmony_top_level_await);
}
ParseInfo::ParseInfo(Isolate* isolate)
@@ -129,7 +129,7 @@ std::unique_ptr<ParseInfo> ParseInfo::FromParent(
const ParseInfo* outer_parse_info, AccountingAllocator* zone_allocator,
const FunctionLiteral* literal, const AstRawString* function_name) {
std::unique_ptr<ParseInfo> result =
- base::make_unique<ParseInfo>(zone_allocator);
+ std::make_unique<ParseInfo>(zone_allocator);
// Replicate shared state of the outer_parse_info.
result->flags_ = outer_parse_info->flags_;
diff --git a/deps/v8/src/parsing/parse-info.h b/deps/v8/src/parsing/parse-info.h
index 8afb124155..dde331b88a 100644
--- a/deps/v8/src/parsing/parse-info.h
+++ b/deps/v8/src/parsing/parse-info.h
@@ -110,6 +110,8 @@ class V8_EXPORT_PRIVATE ParseInfo {
set_collect_source_positions)
FLAG_ACCESSOR(kAllowHarmonyNullish, allow_harmony_nullish,
set_allow_harmony_nullish)
+ FLAG_ACCESSOR(kAllowHarmonyTopLevelAwait, allow_harmony_top_level_await,
+ set_allow_harmony_top_level_await)
#undef FLAG_ACCESSOR
@@ -319,6 +321,7 @@ class V8_EXPORT_PRIVATE ParseInfo {
kIsOneshotIIFE = 1 << 27,
kCollectSourcePositions = 1 << 28,
kAllowHarmonyNullish = 1 << 29,
+ kAllowHarmonyTopLevelAwait = 1 << 30,
};
//------------- Inputs to parsing and scope analysis -----------------------
diff --git a/deps/v8/src/parsing/parser-base.h b/deps/v8/src/parsing/parser-base.h
index 1b3bd64cdd..847774910a 100644
--- a/deps/v8/src/parsing/parser-base.h
+++ b/deps/v8/src/parsing/parser-base.h
@@ -267,6 +267,7 @@ class ParserBase {
allow_harmony_dynamic_import_(false),
allow_harmony_import_meta_(false),
allow_harmony_private_methods_(false),
+ allow_harmony_top_level_await_(false),
allow_eval_cache_(true) {
pointer_buffer_.reserve(32);
variable_buffer_.reserve(32);
@@ -280,6 +281,7 @@ class ParserBase {
ALLOW_ACCESSORS(harmony_dynamic_import)
ALLOW_ACCESSORS(harmony_import_meta)
ALLOW_ACCESSORS(harmony_private_methods)
+ ALLOW_ACCESSORS(harmony_top_level_await)
ALLOW_ACCESSORS(eval_cache)
#undef ALLOW_ACCESSORS
@@ -527,9 +529,9 @@ class ParserBase {
struct ClassInfo {
public:
explicit ClassInfo(ParserBase* parser)
- : variable(nullptr),
- extends(parser->impl()->NullExpression()),
- properties(parser->impl()->NewClassPropertyList(4)),
+ : extends(parser->impl()->NullExpression()),
+ public_members(parser->impl()->NewClassPropertyList(4)),
+ private_members(parser->impl()->NewClassPropertyList(4)),
static_fields(parser->impl()->NewClassPropertyList(4)),
instance_fields(parser->impl()->NewClassPropertyList(4)),
constructor(parser->impl()->NullExpression()),
@@ -540,12 +542,13 @@ class ParserBase {
has_instance_members(false),
requires_brand(false),
is_anonymous(false),
+ has_private_methods(false),
static_fields_scope(nullptr),
instance_members_scope(nullptr),
computed_field_count(0) {}
- Variable* variable;
ExpressionT extends;
- ClassPropertyListT properties;
+ ClassPropertyListT public_members;
+ ClassPropertyListT private_members;
ClassPropertyListT static_fields;
ClassPropertyListT instance_fields;
FunctionLiteralT constructor;
@@ -557,6 +560,7 @@ class ParserBase {
bool has_instance_members;
bool requires_brand;
bool is_anonymous;
+ bool has_private_methods;
DeclarationScope* static_fields_scope;
DeclarationScope* instance_members_scope;
int computed_field_count;
@@ -670,8 +674,8 @@ class ParserBase {
return new (zone()) DeclarationScope(zone(), parent, EVAL_SCOPE);
}
- ClassScope* NewClassScope(Scope* parent) const {
- return new (zone()) ClassScope(zone(), parent);
+ ClassScope* NewClassScope(Scope* parent, bool is_anonymous) const {
+ return new (zone()) ClassScope(zone(), parent, is_anonymous);
}
Scope* NewScope(ScopeType scope_type) const {
@@ -942,7 +946,10 @@ class ParserBase {
bool is_resumable() const {
return IsResumableFunction(function_state_->kind());
}
-
+ bool is_await_allowed() const {
+ return is_async_function() || (allow_harmony_top_level_await() &&
+ IsModule(function_state_->kind()));
+ }
const PendingCompilationErrorHandler* pending_error_handler() const {
return pending_error_handler_;
}
@@ -1456,6 +1463,7 @@ class ParserBase {
bool allow_harmony_dynamic_import_;
bool allow_harmony_import_meta_;
bool allow_harmony_private_methods_;
+ bool allow_harmony_top_level_await_;
bool allow_eval_cache_;
};
@@ -1582,16 +1590,17 @@ ParserBase<Impl>::ParsePropertyOrPrivatePropertyName() {
//
// Here, we check if this is a new private name reference in a top
// level function and throw an error if so.
- ClassScope* class_scope = scope()->GetClassScope();
+ PrivateNameScopeIterator private_name_scope_iter(scope());
// Parse the identifier so that we can display it in the error message
name = impl()->GetIdentifier();
- if (class_scope == nullptr) {
+ if (private_name_scope_iter.Done()) {
impl()->ReportMessageAt(Scanner::Location(pos, pos + 1),
MessageTemplate::kInvalidPrivateFieldResolution,
impl()->GetRawNameFromIdentifier(name));
return impl()->FailureExpression();
}
- key = impl()->ExpressionFromPrivateName(class_scope, name, pos);
+ key =
+ impl()->ExpressionFromPrivateName(&private_name_scope_iter, name, pos);
} else {
ReportUnexpectedToken(next);
return impl()->FailureExpression();
@@ -3062,7 +3071,7 @@ ParserBase<Impl>::ParseUnaryExpression() {
Token::Value op = peek();
if (Token::IsUnaryOrCountOp(op)) return ParseUnaryOrPrefixExpression();
- if (is_async_function() && op == Token::AWAIT) {
+ if (is_await_allowed() && op == Token::AWAIT) {
return ParseAwaitExpression();
}
return ParsePostfixExpression();
@@ -3577,7 +3586,19 @@ void ParserBase<Impl>::ParseFormalParameter(FormalParametersT* parameters) {
auto declaration_end = scope()->declarations()->end();
int initializer_end = end_position();
for (; declaration_it != declaration_end; ++declaration_it) {
- declaration_it->var()->set_initializer_position(initializer_end);
+ Variable* var = declaration_it->var();
+
+ // The first time a variable is initialized (i.e. when the initializer
+ // position is unset), clear its maybe_assigned flag as it is not a true
+ // assignment. Since this is done directly on the Variable objects, it has
+ // no effect on VariableProxy objects appearing on the left-hand side of
+ // true assignments, so x will be still be marked as maybe_assigned for:
+ // (x = 1, y = (x = 2)) => {}
+ // and even:
+ // (x = (x = 2)) => {}.
+ if (var->initializer_position() == kNoSourcePosition)
+ var->clear_maybe_assigned();
+ var->set_initializer_position(initializer_end);
}
impl()->AddFormalParameter(parameters, pattern, initializer, end_position(),
@@ -4355,16 +4376,16 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseClassLiteral(
}
}
- ClassScope* class_scope = NewClassScope(scope());
+ ClassScope* class_scope = NewClassScope(scope(), is_anonymous);
BlockState block_state(&scope_, class_scope);
RaiseLanguageMode(LanguageMode::kStrict);
ClassInfo class_info(this);
class_info.is_anonymous = is_anonymous;
- impl()->DeclareClassVariable(name, &class_info, class_token_pos);
scope()->set_start_position(end_position());
if (Check(Token::EXTENDS)) {
+ ClassScope::HeritageParsingScope heritage(class_scope);
FuncNameInferrerState fni_state(&fni_);
ExpressionParsingScope scope(impl());
class_info.extends = ParseLeftHandSideExpression();
@@ -4399,7 +4420,9 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseClassLiteral(
if (V8_UNLIKELY(prop_info.is_private)) {
DCHECK(!is_constructor);
- class_info.requires_brand |= !is_field;
+ class_info.requires_brand |= (!is_field && !prop_info.is_static);
+ class_info.has_private_methods |=
+ property_kind == ClassLiteralProperty::METHOD;
impl()->DeclarePrivateClassMember(class_scope, prop_info.name, property,
property_kind, prop_info.is_static,
&class_info);
@@ -4438,7 +4461,20 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseClassLiteral(
}
if (class_info.requires_brand) {
- class_scope->DeclareBrandVariable(ast_value_factory(), kNoSourcePosition);
+ // TODO(joyee): implement static brand checking
+ class_scope->DeclareBrandVariable(
+ ast_value_factory(), IsStaticFlag::kNotStatic, kNoSourcePosition);
+ }
+
+ bool should_save_class_variable_index =
+ class_scope->should_save_class_variable_index();
+ if (!is_anonymous || should_save_class_variable_index) {
+ impl()->DeclareClassVariable(class_scope, name, &class_info,
+ class_token_pos);
+ if (should_save_class_variable_index) {
+ class_scope->class_variable()->set_is_used();
+ class_scope->class_variable()->ForceContextAllocation();
+ }
}
return impl()->RewriteClassLiteral(class_scope, name, &class_info,
@@ -4861,7 +4897,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseStatement(
case Token::WHILE:
return ParseWhileStatement(labels, own_labels);
case Token::FOR:
- if (V8_UNLIKELY(is_async_function() && PeekAhead() == Token::AWAIT)) {
+ if (V8_UNLIKELY(is_await_allowed() && PeekAhead() == Token::AWAIT)) {
return ParseForAwaitStatement(labels, own_labels);
}
return ParseForStatement(labels, own_labels);
@@ -5921,7 +5957,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForAwaitStatement(
ZonePtrList<const AstRawString>* labels,
ZonePtrList<const AstRawString>* own_labels) {
// for await '(' ForDeclaration of AssignmentExpression ')'
- DCHECK(is_async_function());
+ DCHECK(is_await_allowed());
typename FunctionState::LoopScope loop_scope(function_state_);
int stmt_pos = peek_position();
diff --git a/deps/v8/src/parsing/parser.cc b/deps/v8/src/parsing/parser.cc
index 3a61253db5..edb9604bb5 100644
--- a/deps/v8/src/parsing/parser.cc
+++ b/deps/v8/src/parsing/parser.cc
@@ -427,6 +427,7 @@ Parser::Parser(ParseInfo* info)
set_allow_harmony_nullish(info->allow_harmony_nullish());
set_allow_harmony_optional_chaining(info->allow_harmony_optional_chaining());
set_allow_harmony_private_methods(info->allow_harmony_private_methods());
+ set_allow_harmony_top_level_await(info->allow_harmony_top_level_await());
for (int feature = 0; feature < v8::Isolate::kUseCounterFeatureCount;
++feature) {
use_counts_[feature] = 0;
@@ -576,8 +577,32 @@ FunctionLiteral* Parser::DoParseProgram(Isolate* isolate, ParseInfo* info) {
BuildInitialYield(kNoSourcePosition, kGeneratorFunction);
body.Add(
factory()->NewExpressionStatement(initial_yield, kNoSourcePosition));
-
- ParseModuleItemList(&body);
+ if (allow_harmony_top_level_await()) {
+ // First parse statements into a buffer. Then, if there was a
+ // top level await, create an inner block and rewrite the body of the
+ // module as an async function. Otherwise merge the statements back
+ // into the main body.
+ BlockT block = impl()->NullBlock();
+ {
+ StatementListT statements(pointer_buffer());
+ ParseModuleItemList(&statements);
+ // Modules will always have an initial yield. If there are any
+ // additional suspends, i.e. awaits, then we treat the module as an
+ // AsyncModule.
+ if (function_state.suspend_count() > 1) {
+ scope->set_is_async_module();
+ block = factory()->NewBlock(true, statements);
+ } else {
+ statements.MergeInto(&body);
+ }
+ }
+ if (IsAsyncModule(scope->function_kind())) {
+ impl()->RewriteAsyncFunctionBody(
+ &body, block, factory()->NewUndefinedLiteral(kNoSourcePosition));
+ }
+ } else {
+ ParseModuleItemList(&body);
+ }
if (!has_error() &&
!module()->Validate(this->scope()->AsModuleScope(),
pending_error_handler(), zone())) {
@@ -705,8 +730,17 @@ FunctionLiteral* Parser::ParseFunction(Isolate* isolate, ParseInfo* info,
info->set_function_name(ast_value_factory()->GetString(name));
scanner_.Initialize();
- FunctionLiteral* result =
- DoParseFunction(isolate, info, info->function_name());
+ FunctionLiteral* result;
+ if (V8_UNLIKELY(shared_info->private_name_lookup_skips_outer_class() &&
+ original_scope_->is_class_scope())) {
+ // If the function skips the outer class and the outer scope is a class, the
+ // function is in heritage position. Otherwise the function scope's skip bit
+ // will be correctly inherited from the outer scope.
+ ClassScope::HeritageParsingScope heritage(original_scope_->AsClassScope());
+ result = DoParseFunction(isolate, info, info->function_name());
+ } else {
+ result = DoParseFunction(isolate, info, info->function_name());
+ }
MaybeResetCharacterStream(info, result);
MaybeProcessSourceRanges(info, result, stack_limit_);
if (result != nullptr) {
@@ -2484,10 +2518,10 @@ bool Parser::SkipFunction(const AstRawString* function_name, FunctionKind kind,
bookmark.Set(function_scope->start_position());
UnresolvedList::Iterator unresolved_private_tail;
- ClassScope* closest_class_scope = function_scope->GetClassScope();
- if (closest_class_scope != nullptr) {
+ PrivateNameScopeIterator private_name_scope_iter(function_scope);
+ if (!private_name_scope_iter.Done()) {
unresolved_private_tail =
- closest_class_scope->GetUnresolvedPrivateNameTail();
+ private_name_scope_iter.GetScope()->GetUnresolvedPrivateNameTail();
}
// With no cached data, we partially parse the function, without building an
@@ -2511,8 +2545,8 @@ bool Parser::SkipFunction(const AstRawString* function_name, FunctionKind kind,
// the state before preparsing. The caller may then fully parse the function
// to identify the actual error.
bookmark.Apply();
- if (closest_class_scope != nullptr) {
- closest_class_scope->ResetUnresolvedPrivateNameTail(
+ if (!private_name_scope_iter.Done()) {
+ private_name_scope_iter.GetScope()->ResetUnresolvedPrivateNameTail(
unresolved_private_tail);
}
function_scope->ResetAfterPreparsing(ast_value_factory_, true);
@@ -2533,8 +2567,8 @@ bool Parser::SkipFunction(const AstRawString* function_name, FunctionKind kind,
*num_parameters = logger->num_parameters();
*function_length = logger->function_length();
SkipFunctionLiterals(logger->num_inner_functions());
- if (closest_class_scope != nullptr) {
- closest_class_scope->MigrateUnresolvedPrivateNameTail(
+ if (!private_name_scope_iter.Done()) {
+ private_name_scope_iter.GetScope()->MigrateUnresolvedPrivateNameTail(
factory(), unresolved_private_tail);
}
function_scope->AnalyzePartially(this, factory(), MaybeParsingArrowhead());
@@ -2739,17 +2773,20 @@ void Parser::ParseFunction(
*suspend_count = function_state.suspend_count();
}
-void Parser::DeclareClassVariable(const AstRawString* name,
+void Parser::DeclareClassVariable(ClassScope* scope, const AstRawString* name,
ClassInfo* class_info, int class_token_pos) {
#ifdef DEBUG
- scope()->SetScopeName(name);
+ scope->SetScopeName(name);
#endif
- if (name != nullptr) {
- VariableProxy* proxy =
- DeclareBoundVariable(name, VariableMode::kConst, class_token_pos);
- class_info->variable = proxy->var();
- }
+ DCHECK_IMPLIES(name == nullptr, class_info->is_anonymous);
+ // Declare a special class variable for anonymous classes with the dot
+ // if we need to save it for static private method access.
+ Variable* class_variable =
+ scope->DeclareClassVariable(ast_value_factory(), name, class_token_pos);
+ Declaration* declaration = factory()->NewVariableDeclaration(class_token_pos);
+ scope->declarations()->Add(declaration);
+ declaration->set_var(class_variable);
}
// TODO(gsathya): Ideally, this should just bypass scope analysis and
@@ -2764,13 +2801,15 @@ Variable* Parser::CreateSyntheticContextVariable(const AstRawString* name) {
Variable* Parser::CreatePrivateNameVariable(ClassScope* scope,
VariableMode mode,
+ IsStaticFlag is_static_flag,
const AstRawString* name) {
DCHECK_NOT_NULL(name);
int begin = position();
int end = end_position();
bool was_added = false;
DCHECK(IsConstVariableMode(mode));
- Variable* var = scope->DeclarePrivateName(name, mode, &was_added);
+ Variable* var =
+ scope->DeclarePrivateName(name, mode, is_static_flag, &was_added);
if (!was_added) {
Scanner::Location loc(begin, end);
ReportMessageAt(loc, MessageTemplate::kVarRedeclaration, var->raw_name());
@@ -2796,7 +2835,7 @@ void Parser::DeclarePublicClassField(ClassScope* scope,
CreateSyntheticContextVariable(ClassFieldVariableName(
ast_value_factory(), class_info->computed_field_count));
property->set_computed_name_var(computed_name_var);
- class_info->properties->Add(property, zone());
+ class_info->public_members->Add(property, zone());
}
}
@@ -2816,15 +2855,17 @@ void Parser::DeclarePrivateClassMember(ClassScope* scope,
}
}
- Variable* private_name_var =
- CreatePrivateNameVariable(scope, GetVariableMode(kind), property_name);
+ Variable* private_name_var = CreatePrivateNameVariable(
+ scope, GetVariableMode(kind),
+ is_static ? IsStaticFlag::kStatic : IsStaticFlag::kNotStatic,
+ property_name);
int pos = property->value()->position();
if (pos == kNoSourcePosition) {
pos = property->key()->position();
}
private_name_var->set_initializer_position(pos);
property->set_private_name_var(private_name_var);
- class_info->properties->Add(property, zone());
+ class_info->private_members->Add(property, zone());
}
// This method declares a property of the given class. It updates the
@@ -2845,7 +2886,7 @@ void Parser::DeclarePublicClassMethod(const AstRawString* class_name,
return;
}
- class_info->properties->Add(property, zone());
+ class_info->public_members->Add(property, zone());
}
FunctionLiteral* Parser::CreateInitializerFunction(
@@ -2894,8 +2935,8 @@ Expression* Parser::RewriteClassLiteral(ClassScope* block_scope,
}
if (name != nullptr) {
- DCHECK_NOT_NULL(class_info->variable);
- class_info->variable->set_initializer_position(end_pos);
+ DCHECK_NOT_NULL(block_scope->class_variable());
+ block_scope->class_variable()->set_initializer_position(end_pos);
}
FunctionLiteral* static_fields_initializer = nullptr;
@@ -2916,11 +2957,12 @@ Expression* Parser::RewriteClassLiteral(ClassScope* block_scope,
}
ClassLiteral* class_literal = factory()->NewClassLiteral(
- block_scope, class_info->variable, class_info->extends,
- class_info->constructor, class_info->properties,
+ block_scope, class_info->extends, class_info->constructor,
+ class_info->public_members, class_info->private_members,
static_fields_initializer, instance_members_initializer_function, pos,
end_pos, class_info->has_name_static_property,
- class_info->has_static_computed_names, class_info->is_anonymous);
+ class_info->has_static_computed_names, class_info->is_anonymous,
+ class_info->has_private_methods);
AddFunctionForNameInference(class_info->constructor);
return class_literal;
@@ -3241,7 +3283,7 @@ void Parser::RewriteAsyncFunctionBody(ScopedPtrList<Statement>* body,
// })
// }
- block->statements()->Add(factory()->NewAsyncReturnStatement(
+ block->statements()->Add(factory()->NewSyntheticAsyncReturnStatement(
return_value, return_value->position()),
zone());
block = BuildRejectPromiseOnException(block);
diff --git a/deps/v8/src/parsing/parser.h b/deps/v8/src/parsing/parser.h
index 2bd555e881..6f570b8751 100644
--- a/deps/v8/src/parsing/parser.h
+++ b/deps/v8/src/parsing/parser.h
@@ -173,8 +173,9 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
parsing::ReportErrorsAndStatisticsMode stats_mode);
bool AllowsLazyParsingWithoutUnresolvedVariables() const {
- return scope()->AllowsLazyParsingWithoutUnresolvedVariables(
- original_scope_);
+ return !MaybeParsingArrowhead() &&
+ scope()->AllowsLazyParsingWithoutUnresolvedVariables(
+ original_scope_);
}
bool parse_lazily() const { return mode_ == PARSE_LAZILY; }
@@ -301,6 +302,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
ZonePtrList<const AstRawString>* names);
Variable* CreateSyntheticContextVariable(const AstRawString* synthetic_name);
Variable* CreatePrivateNameVariable(ClassScope* scope, VariableMode mode,
+ IsStaticFlag is_static_flag,
const AstRawString* name);
FunctionLiteral* CreateInitializerFunction(
const char* name, DeclarationScope* scope,
@@ -314,8 +316,8 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
Statement* DeclareClass(const AstRawString* variable_name, Expression* value,
ZonePtrList<const AstRawString>* names,
int class_token_pos, int end_pos);
- void DeclareClassVariable(const AstRawString* name, ClassInfo* class_info,
- int class_token_pos);
+ void DeclareClassVariable(ClassScope* scope, const AstRawString* name,
+ ClassInfo* class_info, int class_token_pos);
void DeclareClassBrandVariable(ClassScope* scope, ClassInfo* class_info,
int class_token_pos);
void DeclarePrivateClassMember(ClassScope* scope,
@@ -779,12 +781,12 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
Expression* ExpressionFromLiteral(Token::Value token, int pos);
- V8_INLINE VariableProxy* ExpressionFromPrivateName(ClassScope* class_scope,
- const AstRawString* name,
- int start_position) {
+ V8_INLINE VariableProxy* ExpressionFromPrivateName(
+ PrivateNameScopeIterator* private_name_scope, const AstRawString* name,
+ int start_position) {
VariableProxy* proxy = factory()->ast_node_factory()->NewVariableProxy(
name, NORMAL_VARIABLE, start_position);
- class_scope->AddUnresolvedPrivateName(proxy);
+ private_name_scope->AddUnresolvedPrivateName(proxy);
return proxy;
}
diff --git a/deps/v8/src/parsing/preparse-data-impl.h b/deps/v8/src/parsing/preparse-data-impl.h
index 0bc8027266..a993fdf93f 100644
--- a/deps/v8/src/parsing/preparse-data-impl.h
+++ b/deps/v8/src/parsing/preparse-data-impl.h
@@ -7,6 +7,8 @@
#include "src/parsing/preparse-data.h"
+#include <memory>
+
#include "src/common/assert-scope.h"
namespace v8 {
@@ -155,16 +157,18 @@ class BaseConsumedPreparseData : public ConsumedPreparseData {
int* function_length, int* num_inner_functions, bool* uses_super_property,
LanguageMode* language_mode) final;
- void RestoreScopeAllocationData(DeclarationScope* scope) final;
+ void RestoreScopeAllocationData(DeclarationScope* scope,
+ AstValueFactory* ast_value_factory) final;
#ifdef DEBUG
bool VerifyDataStart();
#endif
private:
- void RestoreDataForScope(Scope* scope);
+ void RestoreDataForScope(Scope* scope, AstValueFactory* ast_value_factory);
void RestoreDataForVariable(Variable* var);
- void RestoreDataForInnerScopes(Scope* scope);
+ void RestoreDataForInnerScopes(Scope* scope,
+ AstValueFactory* ast_value_factory);
std::unique_ptr<ByteData> scope_data_;
// When consuming the data, these indexes point to the data we're going to
diff --git a/deps/v8/src/parsing/preparse-data.cc b/deps/v8/src/parsing/preparse-data.cc
index 8743732ea2..460ae65a30 100644
--- a/deps/v8/src/parsing/preparse-data.cc
+++ b/deps/v8/src/parsing/preparse-data.cc
@@ -24,6 +24,10 @@ namespace {
using ScopeSloppyEvalCanExtendVarsField = BitField8<bool, 0, 1>;
using InnerScopeCallsEvalField =
ScopeSloppyEvalCanExtendVarsField::Next<bool, 1>;
+using NeedsPrivateNameContextChainRecalcField =
+ InnerScopeCallsEvalField::Next<bool, 1>;
+using ShouldSaveClassVariableIndexField =
+ NeedsPrivateNameContextChainRecalcField::Next<bool, 1>;
using VariableMaybeAssignedField = BitField8<bool, 0, 1>;
using VariableContextAllocatedField = VariableMaybeAssignedField::Next<bool, 1>;
@@ -322,7 +326,7 @@ void PreparseDataBuilder::SaveScopeAllocationData(DeclarationScope* scope,
if (SaveDataForSkippableFunction(builder)) num_inner_with_data_++;
}
- // Don't save imcoplete scope information when bailed out.
+ // Don't save incomplete scope information when bailed out.
if (!bailed_out_) {
#ifdef DEBUG
// function data items, kSkippableMinFunctionDataSize each.
@@ -352,13 +356,20 @@ void PreparseDataBuilder::SaveDataForScope(Scope* scope) {
byte_data_.WriteUint8(scope->scope_type());
#endif
- uint8_t eval =
+ uint8_t eval_and_private_recalc =
ScopeSloppyEvalCanExtendVarsField::encode(
scope->is_declaration_scope() &&
scope->AsDeclarationScope()->sloppy_eval_can_extend_vars()) |
- InnerScopeCallsEvalField::encode(scope->inner_scope_calls_eval());
+ InnerScopeCallsEvalField::encode(scope->inner_scope_calls_eval()) |
+ NeedsPrivateNameContextChainRecalcField::encode(
+ scope->is_function_scope() &&
+ scope->AsDeclarationScope()
+ ->needs_private_name_context_chain_recalc()) |
+ ShouldSaveClassVariableIndexField::encode(
+ scope->is_class_scope() &&
+ scope->AsClassScope()->should_save_class_variable_index());
byte_data_.Reserve(kUint8Size);
- byte_data_.WriteUint8(eval);
+ byte_data_.WriteUint8(eval_and_private_recalc);
if (scope->is_function_scope()) {
Variable* function = scope->AsDeclarationScope()->function_var();
@@ -562,7 +573,7 @@ BaseConsumedPreparseData<Data>::GetDataForSkippableFunction(
template <class Data>
void BaseConsumedPreparseData<Data>::RestoreScopeAllocationData(
- DeclarationScope* scope) {
+ DeclarationScope* scope, AstValueFactory* ast_value_factory) {
DCHECK_EQ(scope->scope_type(), ScopeType::FUNCTION_SCOPE);
typename ByteData::ReadingScope reading_scope(this);
@@ -577,14 +588,15 @@ void BaseConsumedPreparseData<Data>::RestoreScopeAllocationData(
DCHECK_EQ(end_position_from_data, scope->end_position());
#endif
- RestoreDataForScope(scope);
+ RestoreDataForScope(scope, ast_value_factory);
// Check that we consumed all scope data.
DCHECK_EQ(scope_data_->RemainingBytes(), 0);
}
template <typename Data>
-void BaseConsumedPreparseData<Data>::RestoreDataForScope(Scope* scope) {
+void BaseConsumedPreparseData<Data>::RestoreDataForScope(
+ Scope* scope, AstValueFactory* ast_value_factory) {
if (scope->is_declaration_scope() &&
scope->AsDeclarationScope()->is_skipped_function()) {
return;
@@ -599,20 +611,48 @@ void BaseConsumedPreparseData<Data>::RestoreDataForScope(Scope* scope) {
DCHECK_EQ(scope_data_->ReadUint8(), scope->scope_type());
CHECK(scope_data_->HasRemainingBytes(ByteData::kUint8Size));
- uint32_t eval = scope_data_->ReadUint8();
- if (ScopeSloppyEvalCanExtendVarsField::decode(eval)) scope->RecordEvalCall();
- if (InnerScopeCallsEvalField::decode(eval)) scope->RecordInnerScopeEvalCall();
+ uint32_t scope_data_flags = scope_data_->ReadUint8();
+ if (ScopeSloppyEvalCanExtendVarsField::decode(scope_data_flags)) {
+ scope->RecordEvalCall();
+ }
+ if (InnerScopeCallsEvalField::decode(scope_data_flags)) {
+ scope->RecordInnerScopeEvalCall();
+ }
+ if (NeedsPrivateNameContextChainRecalcField::decode(scope_data_flags)) {
+ scope->AsDeclarationScope()->RecordNeedsPrivateNameContextChainRecalc();
+ }
+ if (ShouldSaveClassVariableIndexField::decode(scope_data_flags)) {
+ Variable* var;
+ // An anonymous class whose class variable needs to be saved do not
+ // have the class variable created during reparse since we skip parsing
+ // the inner scopes that contain potential access to static private
+ // methods. So create it now.
+ if (scope->AsClassScope()->is_anonymous_class()) {
+ var = scope->AsClassScope()->DeclareClassVariable(
+ ast_value_factory, nullptr, kNoSourcePosition);
+ AstNodeFactory factory(ast_value_factory, ast_value_factory->zone());
+ Declaration* declaration =
+ factory.NewVariableDeclaration(kNoSourcePosition);
+ scope->declarations()->Add(declaration);
+ declaration->set_var(var);
+ } else {
+ var = scope->AsClassScope()->class_variable();
+ DCHECK_NOT_NULL(var);
+ }
+ var->set_is_used();
+ var->ForceContextAllocation();
+ scope->AsClassScope()->set_should_save_class_variable_index();
+ }
if (scope->is_function_scope()) {
Variable* function = scope->AsDeclarationScope()->function_var();
if (function != nullptr) RestoreDataForVariable(function);
}
-
for (Variable* var : *scope->locals()) {
if (IsSerializableVariableMode(var->mode())) RestoreDataForVariable(var);
}
- RestoreDataForInnerScopes(scope);
+ RestoreDataForInnerScopes(scope, ast_value_factory);
}
template <typename Data>
@@ -651,10 +691,11 @@ void BaseConsumedPreparseData<Data>::RestoreDataForVariable(Variable* var) {
}
template <typename Data>
-void BaseConsumedPreparseData<Data>::RestoreDataForInnerScopes(Scope* scope) {
+void BaseConsumedPreparseData<Data>::RestoreDataForInnerScopes(
+ Scope* scope, AstValueFactory* ast_value_factory) {
for (Scope* inner = scope->inner_scope(); inner != nullptr;
inner = inner->sibling()) {
- RestoreDataForScope(inner);
+ RestoreDataForScope(inner, ast_value_factory);
}
}
@@ -731,13 +772,13 @@ ProducedPreparseData* ZoneConsumedPreparseData::GetChildData(Zone* zone,
std::unique_ptr<ConsumedPreparseData> ConsumedPreparseData::For(
Isolate* isolate, Handle<PreparseData> data) {
DCHECK(!data.is_null());
- return base::make_unique<OnHeapConsumedPreparseData>(isolate, data);
+ return std::make_unique<OnHeapConsumedPreparseData>(isolate, data);
}
std::unique_ptr<ConsumedPreparseData> ConsumedPreparseData::For(
Zone* zone, ZonePreparseData* data) {
if (data == nullptr) return {};
- return base::make_unique<ZoneConsumedPreparseData>(zone, data);
+ return std::make_unique<ZoneConsumedPreparseData>(zone, data);
}
} // namespace internal
diff --git a/deps/v8/src/parsing/preparse-data.h b/deps/v8/src/parsing/preparse-data.h
index 613f13bc82..581adfa1d5 100644
--- a/deps/v8/src/parsing/preparse-data.h
+++ b/deps/v8/src/parsing/preparse-data.h
@@ -5,6 +5,8 @@
#ifndef V8_PARSING_PREPARSE_DATA_H_
#define V8_PARSING_PREPARSE_DATA_H_
+#include <memory>
+
#include "src/common/globals.h"
#include "src/handles/handles.h"
#include "src/handles/maybe-handles.h"
@@ -22,6 +24,7 @@ class Parser;
class PreParser;
class PreparseData;
class ZonePreparseData;
+class AstValueFactory;
/*
@@ -286,7 +289,8 @@ class ConsumedPreparseData {
// Restores the information needed for allocating the Scope's (and its
// subscopes') variables.
- virtual void RestoreScopeAllocationData(DeclarationScope* scope) = 0;
+ virtual void RestoreScopeAllocationData(
+ DeclarationScope* scope, AstValueFactory* ast_value_factory) = 0;
protected:
ConsumedPreparseData() = default;
diff --git a/deps/v8/src/parsing/preparser.h b/deps/v8/src/parsing/preparser.h
index b4d66d726f..adc3d09cac 100644
--- a/deps/v8/src/parsing/preparser.h
+++ b/deps/v8/src/parsing/preparser.h
@@ -1108,9 +1108,10 @@ class PreParser : public ParserBase<PreParser> {
Variable* DeclarePrivateVariableName(const AstRawString* name,
ClassScope* scope, VariableMode mode,
+ IsStaticFlag is_static_flag,
bool* was_added) {
DCHECK(IsConstVariableMode(mode));
- return scope->DeclarePrivateName(name, mode, was_added);
+ return scope->DeclarePrivateName(name, mode, is_static_flag, was_added);
}
Variable* DeclareVariableName(const AstRawString* name, VariableMode mode,
@@ -1226,14 +1227,15 @@ class PreParser : public ParserBase<PreParser> {
&was_added);
return PreParserStatement::Default();
}
- V8_INLINE void DeclareClassVariable(const PreParserIdentifier& name,
+ V8_INLINE void DeclareClassVariable(ClassScope* scope,
+ const PreParserIdentifier& name,
ClassInfo* class_info,
int class_token_pos) {
- if (!IsNull(name)) {
- bool was_added;
- DeclareVariableName(name.string_, VariableMode::kConst, scope(),
- &was_added);
- }
+ DCHECK_IMPLIES(IsNull(name), class_info->is_anonymous);
+ // Declare a special class variable for anonymous classes with the dot
+ // if we need to save it for static private method access.
+ scope->DeclareClassVariable(ast_value_factory(), name.string_,
+ class_token_pos);
}
V8_INLINE void DeclarePublicClassMethod(const PreParserIdentifier& class_name,
const PreParserExpression& property,
@@ -1258,8 +1260,10 @@ class PreParser : public ParserBase<PreParser> {
bool is_static, ClassInfo* class_info) {
bool was_added;
- DeclarePrivateVariableName(property_name.string_, scope,
- GetVariableMode(kind), &was_added);
+ DeclarePrivateVariableName(
+ property_name.string_, scope, GetVariableMode(kind),
+ is_static ? IsStaticFlag::kStatic : IsStaticFlag::kNotStatic,
+ &was_added);
if (!was_added) {
Scanner::Location loc(property.position(), property.position() + 1);
ReportMessageAt(loc, MessageTemplate::kVarRedeclaration,
@@ -1591,12 +1595,12 @@ class PreParser : public ParserBase<PreParser> {
return PreParserExpression::StringLiteral();
}
- PreParserExpression ExpressionFromPrivateName(ClassScope* class_scope,
- const PreParserIdentifier& name,
- int start_position) {
+ PreParserExpression ExpressionFromPrivateName(
+ PrivateNameScopeIterator* private_name_scope,
+ const PreParserIdentifier& name, int start_position) {
VariableProxy* proxy = factory()->ast_node_factory()->NewVariableProxy(
name.string_, NORMAL_VARIABLE, start_position);
- class_scope->AddUnresolvedPrivateName(proxy);
+ private_name_scope->AddUnresolvedPrivateName(proxy);
return PreParserExpression::FromIdentifier(name);
}
@@ -1636,11 +1640,11 @@ class PreParser : public ParserBase<PreParser> {
return PreParserStatement::Jump();
}
- V8_INLINE void AddFormalParameter(
- PreParserFormalParameters* parameters,
- PreParserExpression& pattern, // NOLINT(runtime/references)
- const PreParserExpression& initializer, int initializer_end_position,
- bool is_rest) {
+ V8_INLINE void AddFormalParameter(PreParserFormalParameters* parameters,
+ const PreParserExpression& pattern,
+ const PreParserExpression& initializer,
+ int initializer_end_position,
+ bool is_rest) {
DeclarationScope* scope = parameters->scope;
scope->RecordParameter(is_rest);
parameters->UpdateArityAndFunctionLength(!initializer.IsNull(), is_rest);
diff --git a/deps/v8/src/parsing/scanner-character-streams.cc b/deps/v8/src/parsing/scanner-character-streams.cc
index 0cd295fd29..49c7e1f793 100644
--- a/deps/v8/src/parsing/scanner-character-streams.cc
+++ b/deps/v8/src/parsing/scanner-character-streams.cc
@@ -265,7 +265,7 @@ class BufferedCharacterStream : public Utf16CharacterStream {
}
size_t length = Min(kBufferSize, range.length());
- i::CopyCharsUnsigned(buffer_, range.start, length);
+ i::CopyChars(buffer_, range.start, length);
buffer_end_ = &buffer_[length];
return true;
}
diff --git a/deps/v8/src/parsing/scanner-character-streams.h b/deps/v8/src/parsing/scanner-character-streams.h
index 4b85567480..c4c7064013 100644
--- a/deps/v8/src/parsing/scanner-character-streams.h
+++ b/deps/v8/src/parsing/scanner-character-streams.h
@@ -5,6 +5,8 @@
#ifndef V8_PARSING_SCANNER_CHARACTER_STREAMS_H_
#define V8_PARSING_SCANNER_CHARACTER_STREAMS_H_
+#include <memory>
+
#include "include/v8.h" // for v8::ScriptCompiler
#include "src/common/globals.h"
diff --git a/deps/v8/src/parsing/scanner.h b/deps/v8/src/parsing/scanner.h
index c40d8f4ba3..d9216f222a 100644
--- a/deps/v8/src/parsing/scanner.h
+++ b/deps/v8/src/parsing/scanner.h
@@ -8,6 +8,7 @@
#define V8_PARSING_SCANNER_H_
#include <algorithm>
+#include <memory>
#include "src/base/logging.h"
#include "src/common/globals.h"
@@ -443,7 +444,8 @@ class V8_EXPORT_PRIVATE Scanner {
#ifdef DEBUG
bool CanAccessLiteral() const {
return token == Token::PRIVATE_NAME || token == Token::ILLEGAL ||
- token == Token::UNINITIALIZED || token == Token::REGEXP_LITERAL ||
+ token == Token::ESCAPED_KEYWORD || token == Token::UNINITIALIZED ||
+ token == Token::REGEXP_LITERAL ||
IsInRange(token, Token::NUMBER, Token::STRING) ||
Token::IsAnyIdentifier(token) || Token::IsKeyword(token) ||
IsInRange(token, Token::TEMPLATE_SPAN, Token::TEMPLATE_TAIL);
@@ -585,15 +587,18 @@ class V8_EXPORT_PRIVATE Scanner {
// token as a one-byte literal. E.g. Token::FUNCTION pretends to have a
// literal "function".
Vector<const uint8_t> literal_one_byte_string() const {
- DCHECK(current().CanAccessLiteral() || Token::IsKeyword(current().token));
+ DCHECK(current().CanAccessLiteral() || Token::IsKeyword(current().token) ||
+ current().token == Token::ESCAPED_KEYWORD);
return current().literal_chars.one_byte_literal();
}
Vector<const uint16_t> literal_two_byte_string() const {
- DCHECK(current().CanAccessLiteral() || Token::IsKeyword(current().token));
+ DCHECK(current().CanAccessLiteral() || Token::IsKeyword(current().token) ||
+ current().token == Token::ESCAPED_KEYWORD);
return current().literal_chars.two_byte_literal();
}
bool is_literal_one_byte() const {
- DCHECK(current().CanAccessLiteral() || Token::IsKeyword(current().token));
+ DCHECK(current().CanAccessLiteral() || Token::IsKeyword(current().token) ||
+ current().token == Token::ESCAPED_KEYWORD);
return current().literal_chars.is_one_byte();
}
// Returns the literal string for the next token (the token that
diff --git a/deps/v8/src/parsing/token.cc b/deps/v8/src/parsing/token.cc
index 4dbae2d3f9..ec4b623775 100644
--- a/deps/v8/src/parsing/token.cc
+++ b/deps/v8/src/parsing/token.cc
@@ -34,7 +34,8 @@ const int8_t Token::precedence_[2][NUM_TOKENS] = {{TOKEN_LIST(T1, T1)},
#undef T2
#undef T1
-#define KT(a, b, c) IsPropertyNameBits::encode(Token::IsAnyIdentifier(a)),
+#define KT(a, b, c) \
+ IsPropertyNameBits::encode(Token::IsAnyIdentifier(a) || a == ESCAPED_KEYWORD),
#define KK(a, b, c) \
IsKeywordBits::encode(true) | IsPropertyNameBits::encode(true),
const uint8_t Token::token_flags[] = {TOKEN_LIST(KT, KK)};
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.cc b/deps/v8/src/profiler/heap-snapshot-generator.cc
index 75b6aa7b77..42e7220702 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.cc
+++ b/deps/v8/src/profiler/heap-snapshot-generator.cc
@@ -1306,8 +1306,7 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject js_obj,
Isolate* isolate = js_obj.GetIsolate();
if (js_obj.HasFastProperties()) {
DescriptorArray descs = js_obj.map().instance_descriptors();
- int real_size = js_obj.map().NumberOfOwnDescriptors();
- for (int i = 0; i < real_size; i++) {
+ for (InternalIndex i : js_obj.map().IterateOwnDescriptors()) {
PropertyDetails details = descs.GetDetails(i);
switch (details.location()) {
case kField: {
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.h b/deps/v8/src/profiler/heap-snapshot-generator.h
index 360ed1f009..e6c72ffcf9 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.h
+++ b/deps/v8/src/profiler/heap-snapshot-generator.h
@@ -6,6 +6,7 @@
#define V8_PROFILER_HEAP_SNAPSHOT_GENERATOR_H_
#include <deque>
+#include <memory>
#include <unordered_map>
#include <unordered_set>
#include <vector>
diff --git a/deps/v8/src/profiler/profile-generator-inl.h b/deps/v8/src/profiler/profile-generator-inl.h
index bb5ef0da5b..e3dc193db2 100644
--- a/deps/v8/src/profiler/profile-generator-inl.h
+++ b/deps/v8/src/profiler/profile-generator-inl.h
@@ -7,6 +7,8 @@
#include "src/profiler/profile-generator.h"
+#include <memory>
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/profiler/profile-generator.cc b/deps/v8/src/profiler/profile-generator.cc
index f5f7184613..c8fe890b58 100644
--- a/deps/v8/src/profiler/profile-generator.cc
+++ b/deps/v8/src/profiler/profile-generator.cc
@@ -517,7 +517,7 @@ CpuProfile::CpuProfile(CpuProfiler* profiler, const char* title,
DisallowHeapAllocation no_gc;
i::Address raw_filter_context =
reinterpret_cast<i::Address>(options_.raw_filter_context());
- context_filter_ = base::make_unique<ContextFilter>(raw_filter_context);
+ context_filter_ = std::make_unique<ContextFilter>(raw_filter_context);
}
}
diff --git a/deps/v8/src/profiler/profiler-listener.cc b/deps/v8/src/profiler/profiler-listener.cc
index b00c1f5cfd..13641bfd41 100644
--- a/deps/v8/src/profiler/profiler-listener.cc
+++ b/deps/v8/src/profiler/profiler-listener.cc
@@ -165,11 +165,10 @@ void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
SourcePosition(pos_info.shared->StartPosition()),
pos_info.shared);
- std::unique_ptr<CodeEntry> inline_entry =
- base::make_unique<CodeEntry>(
- tag, GetFunctionName(*pos_info.shared), resource_name,
- start_pos_info.line + 1, start_pos_info.column + 1, nullptr,
- code.InstructionStart(), inline_is_shared_cross_origin);
+ std::unique_ptr<CodeEntry> inline_entry = std::make_unique<CodeEntry>(
+ tag, GetFunctionName(*pos_info.shared), resource_name,
+ start_pos_info.line + 1, start_pos_info.column + 1, nullptr,
+ code.InstructionStart(), inline_is_shared_cross_origin);
inline_entry->FillFunctionInfo(*pos_info.shared);
// Create a canonical CodeEntry for each inlined frame and then re-use
diff --git a/deps/v8/src/profiler/sampling-heap-profiler.cc b/deps/v8/src/profiler/sampling-heap-profiler.cc
index de19d39eba..f5aa1dc3a0 100644
--- a/deps/v8/src/profiler/sampling-heap-profiler.cc
+++ b/deps/v8/src/profiler/sampling-heap-profiler.cc
@@ -9,7 +9,6 @@
#include "src/api/api-inl.h"
#include "src/base/ieee754.h"
-#include "src/base/template-utils.h"
#include "src/base/utils/random-number-generator.h"
#include "src/execution/frames-inl.h"
#include "src/execution/isolate.h"
@@ -89,7 +88,7 @@ void SamplingHeapProfiler::SampleObject(Address soon_object, size_t size) {
AllocationNode* node = AddStack();
node->allocations_[size]++;
auto sample =
- base::make_unique<Sample>(size, node, loc, this, next_sample_id());
+ std::make_unique<Sample>(size, node, loc, this, next_sample_id());
sample->global.SetWeak(sample.get(), OnWeakCallback,
WeakCallbackType::kParameter);
samples_.emplace(sample.get(), std::move(sample));
@@ -126,7 +125,7 @@ SamplingHeapProfiler::AllocationNode* SamplingHeapProfiler::FindOrAddChildNode(
DCHECK_EQ(strcmp(child->name_, name), 0);
return child;
}
- auto new_child = base::make_unique<AllocationNode>(
+ auto new_child = std::make_unique<AllocationNode>(
parent, name, script_id, start_position, next_node_id());
return parent->AddChildNode(id, std::move(new_child));
}
diff --git a/deps/v8/src/profiler/tracing-cpu-profiler.h b/deps/v8/src/profiler/tracing-cpu-profiler.h
index d5888f54a3..7a8fabe958 100644
--- a/deps/v8/src/profiler/tracing-cpu-profiler.h
+++ b/deps/v8/src/profiler/tracing-cpu-profiler.h
@@ -5,6 +5,8 @@
#ifndef V8_PROFILER_TRACING_CPU_PROFILER_H_
#define V8_PROFILER_TRACING_CPU_PROFILER_H_
+#include <memory>
+
#include "include/v8-platform.h"
#include "src/base/atomic-utils.h"
#include "src/base/macros.h"
diff --git a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
index 9e00063487..62a055e2a2 100644
--- a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
+++ b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
@@ -170,8 +170,11 @@ void RegExpMacroAssemblerARM64::AdvanceRegister(int reg, int by) {
}
case CACHED_MSW: {
Register to_advance = GetCachedRegister(reg);
- __ Add(to_advance, to_advance,
- static_cast<int64_t>(by) << kWRegSizeInBits);
+ // Sign-extend to int64, shift as uint64, cast back to int64.
+ __ Add(
+ to_advance, to_advance,
+ static_cast<int64_t>(static_cast<uint64_t>(static_cast<int64_t>(by))
+ << kWRegSizeInBits));
break;
}
default:
diff --git a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
index 13b5c85605..8babb204dd 100644
--- a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
+++ b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
@@ -37,7 +37,10 @@ namespace internal {
* The remaining registers are free for computations.
* Each call to a public method should retain this convention.
*
- * The stack will have the following structure:
+ * The stack will have the following structure
+ * - fp[44] Address regexp (address of the JSRegExp object; unused in
+ * native code, passed to match signature of
+ * the interpreter):
* - fp[40] Isolate* isolate (address of the current isolate)
* - fp[36] lr save area (currently unused)
* - fp[32] backchain (currently unused)
@@ -83,7 +86,8 @@ namespace internal {
* int num_capture_registers,
* byte* stack_area_base,
* bool direct_call = false,
- * Isolate* isolate);
+ * Isolate* isolate,
+ * Address regexp);
* The call is performed by NativeRegExpMacroAssembler::Execute()
* (in regexp-macro-assembler.cc) via the GeneratedCode wrapper.
*/
diff --git a/deps/v8/src/regexp/regexp-bytecode-generator.cc b/deps/v8/src/regexp/regexp-bytecode-generator.cc
index 85b144438e..0dcc288d3c 100644
--- a/deps/v8/src/regexp/regexp-bytecode-generator.cc
+++ b/deps/v8/src/regexp/regexp-bytecode-generator.cc
@@ -7,6 +7,7 @@
#include "src/ast/ast.h"
#include "src/objects/objects-inl.h"
#include "src/regexp/regexp-bytecode-generator-inl.h"
+#include "src/regexp/regexp-bytecode-peephole.h"
#include "src/regexp/regexp-bytecodes.h"
#include "src/regexp/regexp-macro-assembler.h"
@@ -18,6 +19,7 @@ RegExpBytecodeGenerator::RegExpBytecodeGenerator(Isolate* isolate, Zone* zone)
buffer_(Vector<byte>::New(1024)),
pc_(0),
advance_current_end_(kInvalidPC),
+ jump_edges_(zone),
isolate_(isolate) {}
RegExpBytecodeGenerator::~RegExpBytecodeGenerator() {
@@ -39,6 +41,7 @@ void RegExpBytecodeGenerator::Bind(Label* l) {
int fixup = pos;
pos = *reinterpret_cast<int32_t*>(buffer_.begin() + fixup);
*reinterpret_cast<uint32_t*>(buffer_.begin() + fixup) = pc_;
+ jump_edges_.emplace(fixup, pc_);
}
}
l->bind_to(pc_);
@@ -46,16 +49,17 @@ void RegExpBytecodeGenerator::Bind(Label* l) {
void RegExpBytecodeGenerator::EmitOrLink(Label* l) {
if (l == nullptr) l = &backtrack_;
+ int pos = 0;
if (l->is_bound()) {
- Emit32(l->pos());
+ pos = l->pos();
+ jump_edges_.emplace(pc_, pos);
} else {
- int pos = 0;
if (l->is_linked()) {
pos = l->pos();
}
l->link_to(pc_);
- Emit32(pos);
}
+ Emit32(pos);
}
void RegExpBytecodeGenerator::PopRegister(int register_index) {
@@ -365,8 +369,16 @@ void RegExpBytecodeGenerator::IfRegisterEqPos(int register_index,
Handle<HeapObject> RegExpBytecodeGenerator::GetCode(Handle<String> source) {
Bind(&backtrack_);
Emit(BC_POP_BT, 0);
- Handle<ByteArray> array = isolate_->factory()->NewByteArray(length());
- Copy(array->GetDataStartAddress());
+
+ Handle<ByteArray> array;
+ if (FLAG_regexp_peephole_optimization) {
+ array = RegExpBytecodePeepholeOptimization::OptimizeBytecode(
+ isolate_, zone(), source, buffer_.begin(), length(), jump_edges_);
+ } else {
+ array = isolate_->factory()->NewByteArray(length());
+ Copy(array->GetDataStartAddress());
+ }
+
return array;
}
diff --git a/deps/v8/src/regexp/regexp-bytecode-generator.h b/deps/v8/src/regexp/regexp-bytecode-generator.h
index 84b7ce361c..dfcc2ca5f8 100644
--- a/deps/v8/src/regexp/regexp-bytecode-generator.h
+++ b/deps/v8/src/regexp/regexp-bytecode-generator.h
@@ -100,6 +100,12 @@ class V8_EXPORT_PRIVATE RegExpBytecodeGenerator : public RegExpMacroAssembler {
int advance_current_offset_;
int advance_current_end_;
+ // Stores jump edges emitted for the bytecode (used by
+ // RegExpBytecodePeepholeOptimization).
+ // Key: jump source (offset in buffer_ where jump destination is stored).
+ // Value: jump destination (offset in buffer_ to jump to).
+ ZoneUnorderedMap<int, int> jump_edges_;
+
Isolate* isolate_;
static const int kInvalidPC = -1;
diff --git a/deps/v8/src/regexp/regexp-bytecode-peephole.cc b/deps/v8/src/regexp/regexp-bytecode-peephole.cc
new file mode 100644
index 0000000000..8f1f1d95a9
--- /dev/null
+++ b/deps/v8/src/regexp/regexp-bytecode-peephole.cc
@@ -0,0 +1,1037 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/regexp/regexp-bytecode-peephole.h"
+
+#include "src/execution/isolate.h"
+#include "src/flags/flags.h"
+#include "src/objects/fixed-array.h"
+#include "src/objects/objects-inl.h"
+#include "src/regexp/regexp-bytecodes.h"
+#include "src/utils/memcopy.h"
+#include "src/utils/utils.h"
+#include "src/zone/zone-containers.h"
+#include "src/zone/zone.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+struct BytecodeArgument {
+ int offset;
+ int length;
+
+ BytecodeArgument(int offset, int length) : offset(offset), length(length) {}
+};
+
+struct BytecodeArgumentMapping : BytecodeArgument {
+ int new_length;
+
+ BytecodeArgumentMapping(int offset, int length, int new_length)
+ : BytecodeArgument(offset, length), new_length(new_length) {}
+};
+
+struct BytecodeArgumentCheck : BytecodeArgument {
+ enum CheckType { kCheckAddress = 0, kCheckValue };
+ CheckType type;
+ int check_offset;
+ int check_length;
+
+ BytecodeArgumentCheck(int offset, int length, int check_offset)
+ : BytecodeArgument(offset, length),
+ type(kCheckAddress),
+ check_offset(check_offset) {}
+ BytecodeArgumentCheck(int offset, int length, int check_offset,
+ int check_length)
+ : BytecodeArgument(offset, length),
+ type(kCheckValue),
+ check_offset(check_offset),
+ check_length(check_length) {}
+};
+
+// Trie-Node for storing bytecode sequences we want to optimize.
+class BytecodeSequenceNode {
+ public:
+ // Dummy bytecode used when we need to store/return a bytecode but it's not a
+ // valid bytecode in the current context.
+ static constexpr int kDummyBytecode = -1;
+
+ BytecodeSequenceNode(int bytecode, Zone* zone);
+ // Adds a new node as child of the current node if it isn't a child already.
+ BytecodeSequenceNode& FollowedBy(int bytecode);
+ // Marks the end of a sequence and sets optimized bytecode to replace all
+ // bytecodes of the sequence with.
+ BytecodeSequenceNode& ReplaceWith(int bytecode);
+ // Maps arguments of bytecodes in the sequence to the optimized bytecode.
+ // Order of invocation determines order of arguments in the optimized
+ // bytecode.
+ // Invoking this method is only allowed on nodes that mark the end of a valid
+ // sequence (i.e. after ReplaceWith()).
+ // bytecode_index_in_sequence: Zero-based index of the referred bytecode
+ // within the sequence (e.g. the bytecode passed to CreateSequence() has
+ // index 0).
+ // argument_offset: Zero-based offset to the argument within the bytecode
+ // (e.g. the first argument that's not packed with the bytecode has offset 4).
+ // argument_byte_length: Length of the argument.
+ // new_argument_byte_length: Length of the argument in the new bytecode
+ // (= argument_byte_length if omitted).
+ BytecodeSequenceNode& MapArgument(int bytecode_index_in_sequence,
+ int argument_offset,
+ int argument_byte_length,
+ int new_argument_byte_length = 0);
+ // Adds a check to the sequence node making it only a valid sequence when the
+ // argument of the current bytecode at the specified offset matches the offset
+ // to check against.
+ // argument_offset: Zero-based offset to the argument within the bytecode
+ // (e.g. the first argument that's not packed with the bytecode has offset 4).
+ // argument_byte_length: Length of the argument.
+ // check_byte_offset: Zero-based offset relative to the beginning of the
+ // sequence that needs to match the value given by argument_offset. (e.g.
+ // check_byte_offset 0 matches the address of the first bytecode in the
+ // sequence).
+ BytecodeSequenceNode& IfArgumentEqualsOffset(int argument_offset,
+ int argument_byte_length,
+ int check_byte_offset);
+ // Adds a check to the sequence node making it only a valid sequence when the
+ // argument of the current bytecode at the specified offset matches the
+ // argument of another bytecode in the sequence.
+ // This is similar to IfArgumentEqualsOffset, except that this method matches
+ // the values of both arguments.
+ BytecodeSequenceNode& IfArgumentEqualsValueAtOffset(
+ int argument_offset, int argument_byte_length,
+ int other_bytecode_index_in_sequence, int other_argument_offset,
+ int other_argument_byte_length);
+ // Marks an argument as unused.
+ // All arguments that are not mapped explicitly have to be marked as unused.
+ // bytecode_index_in_sequence: Zero-based index of the referred bytecode
+ // within the sequence (e.g. the bytecode passed to CreateSequence() has
+ // index 0).
+ // argument_offset: Zero-based offset to the argument within the bytecode
+ // (e.g. the first argument that's not packed with the bytecode has offset 4).
+ // argument_byte_length: Length of the argument.
+ BytecodeSequenceNode& IgnoreArgument(int bytecode_index_in_sequence,
+ int argument_offset,
+ int argument_byte_length);
+ // Checks if the current node is valid for the sequence. I.e. all conditions
+ // set by IfArgumentEqualsOffset and IfArgumentEquals are fulfilled by this
+ // node for the actual bytecode sequence.
+ bool CheckArguments(const byte* bytecode, int pc);
+ // Returns whether this node marks the end of a valid sequence (i.e. can be
+ // replaced with an optimized bytecode).
+ bool IsSequence() const;
+ // Returns the length of the sequence in bytes.
+ int SequenceLength() const;
+ // Returns the optimized bytecode for the node or kDummyBytecode if it is not
+ // the end of a valid sequence.
+ int OptimizedBytecode() const;
+ // Returns the child of the current node matching the given bytecode or
+ // nullptr if no such child is found.
+ BytecodeSequenceNode* Find(int bytecode) const;
+ // Returns number of arguments mapped to the current node.
+ // Invoking this method is only allowed on nodes that mark the end of a valid
+ // sequence (i.e. if IsSequence())
+ size_t ArgumentSize() const;
+ // Returns the argument-mapping of the argument at index.
+ // Invoking this method is only allowed on nodes that mark the end of a valid
+ // sequence (i.e. if IsSequence())
+ BytecodeArgumentMapping ArgumentMapping(size_t index) const;
+ // Returns an iterator to begin of ignored arguments.
+ // Invoking this method is only allowed on nodes that mark the end of a valid
+ // sequence (i.e. if IsSequence())
+ ZoneLinkedList<BytecodeArgument>::iterator ArgumentIgnoredBegin() const;
+ // Returns an iterator to end of ignored arguments.
+ // Invoking this method is only allowed on nodes that mark the end of a valid
+ // sequence (i.e. if IsSequence())
+ ZoneLinkedList<BytecodeArgument>::iterator ArgumentIgnoredEnd() const;
+ // Returns whether the current node has ignored argument or not.
+ bool HasIgnoredArguments() const;
+
+ private:
+ // Returns a node in the sequence specified by its index within the sequence.
+ BytecodeSequenceNode& GetNodeByIndexInSequence(int index_in_sequence);
+ Zone* zone() const;
+
+ int bytecode_;
+ int bytecode_replacement_;
+ int index_in_sequence_;
+ int start_offset_;
+ BytecodeSequenceNode* parent_;
+ ZoneUnorderedMap<int, BytecodeSequenceNode*> children_;
+ ZoneVector<BytecodeArgumentMapping>* argument_mapping_;
+ ZoneLinkedList<BytecodeArgumentCheck>* argument_check_;
+ ZoneLinkedList<BytecodeArgument>* argument_ignored_;
+
+ Zone* zone_;
+};
+
+class RegExpBytecodePeephole {
+ public:
+ RegExpBytecodePeephole(Zone* zone, size_t buffer_size,
+ const ZoneUnorderedMap<int, int>& jump_edges);
+
+ // Parses bytecode and fills the internal buffer with the potentially
+ // optimized bytecode. Returns true when optimizations were performed, false
+ // otherwise.
+ bool OptimizeBytecode(const byte* bytecode, int length);
+ // Copies the internal bytecode buffer to another buffer. The caller is
+ // responsible for allocating/freeing the memory.
+ void CopyOptimizedBytecode(byte* to_address) const;
+ int Length() const;
+
+ private:
+ // Sets up all sequences that are going to be used.
+ void DefineStandardSequences();
+ // Starts a new bytecode sequence.
+ BytecodeSequenceNode& CreateSequence(int bytecode);
+ // Checks for optimization candidates at pc and emits optimized bytecode to
+ // the internal buffer. Returns the length of replaced bytecodes in bytes.
+ int TryOptimizeSequence(const byte* bytecode, int start_pc);
+ // Emits optimized bytecode to the internal buffer. start_pc points to the
+ // start of the sequence in bytecode and last_node is the last
+ // BytecodeSequenceNode of the matching sequence found.
+ void EmitOptimization(int start_pc, const byte* bytecode,
+ const BytecodeSequenceNode& last_node);
+ // Adds a relative jump source fixup at pos.
+ // Jump source fixups are used to find offsets in the new bytecode that
+ // contain jump sources.
+ void AddJumpSourceFixup(int fixup, int pos);
+ // Adds a relative jump destination fixup at pos.
+ // Jump destination fixups are used to find offsets in the new bytecode that
+ // can be jumped to.
+ void AddJumpDestinationFixup(int fixup, int pos);
+ // Sets an absolute jump destination fixup at pos.
+ void SetJumpDestinationFixup(int fixup, int pos);
+ // Prepare internal structures used to fixup jumps.
+ void PrepareJumpStructures(const ZoneUnorderedMap<int, int>& jump_edges);
+ // Updates all jump targets in the new bytecode.
+ void FixJumps();
+ // Update a single jump.
+ void FixJump(int jump_source, int jump_destination);
+ void AddSentinelFixups(int pos);
+ template <typename T>
+ void EmitValue(T value);
+ template <typename T>
+ void OverwriteValue(int offset, T value);
+ void CopyRangeToOutput(const byte* orig_bytecode, int start, int length);
+ void SetRange(byte value, int count);
+ void EmitArgument(int start_pc, const byte* bytecode,
+ BytecodeArgumentMapping arg);
+ int pc() const;
+ Zone* zone() const;
+
+ ZoneVector<byte> optimized_bytecode_buffer_;
+ BytecodeSequenceNode* sequences_;
+ // Jumps used in old bytecode.
+ // Key: Jump source (offset where destination is stored in old bytecode)
+ // Value: Destination
+ ZoneMap<int, int> jump_edges_;
+ // Jumps used in new bytecode.
+ // Key: Jump source (offset where destination is stored in new bytecode)
+ // Value: Destination
+ ZoneMap<int, int> jump_edges_mapped_;
+ // Number of times a jump destination is used within the bytecode.
+ // Key: Jump destination (offset in old bytecode).
+ // Value: Number of times jump destination is used.
+ ZoneMap<int, int> jump_usage_counts_;
+ // Maps offsets in old bytecode to fixups of sources (delta to new bytecode).
+ // Key: Offset in old bytecode from where the fixup is valid.
+ // Value: Delta to map jump source from old bytecode to new bytecode in bytes.
+ ZoneMap<int, int> jump_source_fixups_;
+ // Maps offsets in old bytecode to fixups of destinations (delta to new
+ // bytecode).
+ // Key: Offset in old bytecode from where the fixup is valid.
+ // Value: Delta to map jump destinations from old bytecode to new bytecode in
+ // bytes.
+ ZoneMap<int, int> jump_destination_fixups_;
+
+ Zone* zone_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(RegExpBytecodePeephole);
+};
+
+template <typename T>
+T GetValue(const byte* buffer, int pos) {
+ DCHECK(IsAligned(reinterpret_cast<Address>(buffer + pos), alignof(T)));
+ return *reinterpret_cast<const T*>(buffer + pos);
+}
+
+int32_t GetArgumentValue(const byte* bytecode, int offset, int length) {
+ switch (length) {
+ case 1:
+ return GetValue<byte>(bytecode, offset);
+ break;
+ case 2:
+ return GetValue<int16_t>(bytecode, offset);
+ break;
+ case 4:
+ return GetValue<int32_t>(bytecode, offset);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+BytecodeSequenceNode::BytecodeSequenceNode(int bytecode, Zone* zone)
+ : bytecode_(bytecode),
+ bytecode_replacement_(kDummyBytecode),
+ index_in_sequence_(0),
+ start_offset_(0),
+ parent_(nullptr),
+ children_(ZoneUnorderedMap<int, BytecodeSequenceNode*>(zone)),
+ argument_mapping_(new (zone->New(sizeof(*argument_mapping_)))
+ ZoneVector<BytecodeArgumentMapping>(zone)),
+ argument_check_(new (zone->New(sizeof(*argument_check_)))
+ ZoneLinkedList<BytecodeArgumentCheck>(zone)),
+ argument_ignored_(new (zone->New(sizeof(*argument_ignored_)))
+ ZoneLinkedList<BytecodeArgument>(zone)),
+ zone_(zone) {}
+
+BytecodeSequenceNode& BytecodeSequenceNode::FollowedBy(int bytecode) {
+ DCHECK(0 <= bytecode && bytecode < kRegExpBytecodeCount);
+
+ if (children_.find(bytecode) == children_.end()) {
+ BytecodeSequenceNode* new_node =
+ new (zone()->New(sizeof(BytecodeSequenceNode)))
+ BytecodeSequenceNode(bytecode, zone());
+ // If node is not the first in the sequence, set offsets and parent.
+ if (bytecode_ != kDummyBytecode) {
+ new_node->start_offset_ = start_offset_ + RegExpBytecodeLength(bytecode_);
+ new_node->index_in_sequence_ = index_in_sequence_ + 1;
+ new_node->parent_ = this;
+ }
+ children_[bytecode] = new_node;
+ }
+
+ return *children_[bytecode];
+}
+
+BytecodeSequenceNode& BytecodeSequenceNode::ReplaceWith(int bytecode) {
+ DCHECK(0 <= bytecode && bytecode < kRegExpBytecodeCount);
+
+ bytecode_replacement_ = bytecode;
+
+ return *this;
+}
+
+BytecodeSequenceNode& BytecodeSequenceNode::MapArgument(
+ int bytecode_index_in_sequence, int argument_offset,
+ int argument_byte_length, int new_argument_byte_length) {
+ DCHECK(IsSequence());
+ DCHECK_LE(bytecode_index_in_sequence, index_in_sequence_);
+
+ BytecodeSequenceNode& ref_node =
+ GetNodeByIndexInSequence(bytecode_index_in_sequence);
+ DCHECK_LT(argument_offset, RegExpBytecodeLength(ref_node.bytecode_));
+
+ int absolute_offset = ref_node.start_offset_ + argument_offset;
+ if (new_argument_byte_length == 0) {
+ new_argument_byte_length = argument_byte_length;
+ }
+
+ argument_mapping_->push_back(BytecodeArgumentMapping{
+ absolute_offset, argument_byte_length, new_argument_byte_length});
+
+ return *this;
+}
+
+BytecodeSequenceNode& BytecodeSequenceNode::IfArgumentEqualsOffset(
+ int argument_offset, int argument_byte_length, int check_byte_offset) {
+ DCHECK_LT(argument_offset, RegExpBytecodeLength(bytecode_));
+ DCHECK(argument_byte_length == 1 || argument_byte_length == 2 ||
+ argument_byte_length == 4);
+
+ int absolute_offset = start_offset_ + argument_offset;
+
+ argument_check_->push_back(BytecodeArgumentCheck{
+ absolute_offset, argument_byte_length, check_byte_offset});
+
+ return *this;
+}
+
+BytecodeSequenceNode& BytecodeSequenceNode::IfArgumentEqualsValueAtOffset(
+ int argument_offset, int argument_byte_length,
+ int other_bytecode_index_in_sequence, int other_argument_offset,
+ int other_argument_byte_length) {
+ DCHECK_LT(argument_offset, RegExpBytecodeLength(bytecode_));
+ DCHECK_LE(other_bytecode_index_in_sequence, index_in_sequence_);
+ DCHECK_EQ(argument_byte_length, other_argument_byte_length);
+
+ BytecodeSequenceNode& ref_node =
+ GetNodeByIndexInSequence(other_bytecode_index_in_sequence);
+ DCHECK_LT(other_argument_offset, RegExpBytecodeLength(ref_node.bytecode_));
+
+ int absolute_offset = start_offset_ + argument_offset;
+ int other_absolute_offset = ref_node.start_offset_ + other_argument_offset;
+
+ argument_check_->push_back(
+ BytecodeArgumentCheck{absolute_offset, argument_byte_length,
+ other_absolute_offset, other_argument_byte_length});
+
+ return *this;
+}
+
+BytecodeSequenceNode& BytecodeSequenceNode::IgnoreArgument(
+ int bytecode_index_in_sequence, int argument_offset,
+ int argument_byte_length) {
+ DCHECK(IsSequence());
+ DCHECK_LE(bytecode_index_in_sequence, index_in_sequence_);
+
+ BytecodeSequenceNode& ref_node =
+ GetNodeByIndexInSequence(bytecode_index_in_sequence);
+ DCHECK_LT(argument_offset, RegExpBytecodeLength(ref_node.bytecode_));
+
+ int absolute_offset = ref_node.start_offset_ + argument_offset;
+
+ argument_ignored_->push_back(
+ BytecodeArgument{absolute_offset, argument_byte_length});
+
+ return *this;
+}
+
+bool BytecodeSequenceNode::CheckArguments(const byte* bytecode, int pc) {
+ bool is_valid = true;
+ for (auto check_iter = argument_check_->begin();
+ check_iter != argument_check_->end() && is_valid; check_iter++) {
+ auto value =
+ GetArgumentValue(bytecode, pc + check_iter->offset, check_iter->length);
+ if (check_iter->type == BytecodeArgumentCheck::kCheckAddress) {
+ is_valid &= value == pc + check_iter->check_offset;
+ } else if (check_iter->type == BytecodeArgumentCheck::kCheckValue) {
+ auto other_value = GetArgumentValue(
+ bytecode, pc + check_iter->check_offset, check_iter->check_length);
+ is_valid &= value == other_value;
+ } else {
+ UNREACHABLE();
+ }
+ }
+ return is_valid;
+}
+
+bool BytecodeSequenceNode::IsSequence() const {
+ return bytecode_replacement_ != kDummyBytecode;
+}
+
+int BytecodeSequenceNode::SequenceLength() const {
+ return start_offset_ + RegExpBytecodeLength(bytecode_);
+}
+
+int BytecodeSequenceNode::OptimizedBytecode() const {
+ return bytecode_replacement_;
+}
+
+BytecodeSequenceNode* BytecodeSequenceNode::Find(int bytecode) const {
+ auto found = children_.find(bytecode);
+ if (found == children_.end()) return nullptr;
+ return found->second;
+}
+
+size_t BytecodeSequenceNode::ArgumentSize() const {
+ DCHECK(IsSequence());
+ return argument_mapping_->size();
+}
+
+BytecodeArgumentMapping BytecodeSequenceNode::ArgumentMapping(
+ size_t index) const {
+ DCHECK(IsSequence());
+ DCHECK(argument_mapping_ != nullptr);
+ DCHECK_GE(index, 0);
+ DCHECK_LT(index, argument_mapping_->size());
+
+ return argument_mapping_->at(index);
+}
+
+ZoneLinkedList<BytecodeArgument>::iterator
+BytecodeSequenceNode::ArgumentIgnoredBegin() const {
+ DCHECK(IsSequence());
+ DCHECK(argument_ignored_ != nullptr);
+ return argument_ignored_->begin();
+}
+
+ZoneLinkedList<BytecodeArgument>::iterator
+BytecodeSequenceNode::ArgumentIgnoredEnd() const {
+ DCHECK(IsSequence());
+ DCHECK(argument_ignored_ != nullptr);
+ return argument_ignored_->end();
+}
+
+bool BytecodeSequenceNode::HasIgnoredArguments() const {
+ return argument_ignored_ != nullptr;
+}
+
+BytecodeSequenceNode& BytecodeSequenceNode::GetNodeByIndexInSequence(
+ int index_in_sequence) {
+ DCHECK_LE(index_in_sequence, index_in_sequence_);
+
+ if (index_in_sequence < index_in_sequence_) {
+ DCHECK(parent_ != nullptr);
+ return parent_->GetNodeByIndexInSequence(index_in_sequence);
+ } else {
+ return *this;
+ }
+}
+
+Zone* BytecodeSequenceNode::zone() const { return zone_; }
+
+RegExpBytecodePeephole::RegExpBytecodePeephole(
+ Zone* zone, size_t buffer_size,
+ const ZoneUnorderedMap<int, int>& jump_edges)
+ : optimized_bytecode_buffer_(zone),
+ sequences_(new (zone->New(sizeof(*sequences_))) BytecodeSequenceNode(
+ BytecodeSequenceNode::kDummyBytecode, zone)),
+ jump_edges_(zone),
+ jump_edges_mapped_(zone),
+ jump_usage_counts_(zone),
+ jump_source_fixups_(zone),
+ jump_destination_fixups_(zone),
+ zone_(zone) {
+ optimized_bytecode_buffer_.reserve(buffer_size);
+ PrepareJumpStructures(jump_edges);
+ DefineStandardSequences();
+ // Sentinel fixups at beginning of bytecode (position -1) so we don't have to
+ // check for end of iterator inside the fixup loop.
+ // In general fixups are deltas of original offsets of jump
+ // sources/destinations (in the old bytecode) to find them in the new
+ // bytecode. All jump targets are fixed after the new bytecode is fully
+ // emitted in the internal buffer.
+ AddSentinelFixups(-1);
+ // Sentinel fixups at end of (old) bytecode so we don't have to check for
+ // end of iterator inside the fixup loop.
+ DCHECK_LE(buffer_size, std::numeric_limits<int>::max());
+ AddSentinelFixups(static_cast<int>(buffer_size));
+}
+
+void RegExpBytecodePeephole::DefineStandardSequences() {
+ // Commonly used sequences can be found by creating regexp bytecode traces
+ // (--trace-regexp-bytecodes) and using v8/tools/regexp-sequences.py.
+ CreateSequence(BC_LOAD_CURRENT_CHAR)
+ .FollowedBy(BC_CHECK_BIT_IN_TABLE)
+ .FollowedBy(BC_ADVANCE_CP_AND_GOTO)
+ // Sequence is only valid if the jump target of ADVANCE_CP_AND_GOTO is the
+ // first bytecode in this sequence.
+ .IfArgumentEqualsOffset(4, 4, 0)
+ .ReplaceWith(BC_SKIP_UNTIL_BIT_IN_TABLE)
+ .MapArgument(0, 1, 3) // load offset
+ .MapArgument(2, 1, 3, 4) // advance by
+ .MapArgument(1, 8, 16) // bit table
+ .MapArgument(1, 4, 4) // goto when match
+ .MapArgument(0, 4, 4) // goto on failure
+ .IgnoreArgument(2, 4, 4); // loop jump
+
+ CreateSequence(BC_CHECK_CURRENT_POSITION)
+ .FollowedBy(BC_LOAD_CURRENT_CHAR_UNCHECKED)
+ .FollowedBy(BC_CHECK_CHAR)
+ .FollowedBy(BC_ADVANCE_CP_AND_GOTO)
+ // Sequence is only valid if the jump target of ADVANCE_CP_AND_GOTO is the
+ // first bytecode in this sequence.
+ .IfArgumentEqualsOffset(4, 4, 0)
+ .ReplaceWith(BC_SKIP_UNTIL_CHAR_POS_CHECKED)
+ .MapArgument(1, 1, 3) // load offset
+ .MapArgument(3, 1, 3, 2) // advance_by
+ .MapArgument(2, 1, 3, 2) // c
+ .MapArgument(0, 1, 3, 4) // eats at least
+ .MapArgument(2, 4, 4) // goto when match
+ .MapArgument(0, 4, 4) // goto on failure
+ .IgnoreArgument(3, 4, 4); // loop jump
+
+ CreateSequence(BC_CHECK_CURRENT_POSITION)
+ .FollowedBy(BC_LOAD_CURRENT_CHAR_UNCHECKED)
+ .FollowedBy(BC_AND_CHECK_CHAR)
+ .FollowedBy(BC_ADVANCE_CP_AND_GOTO)
+ // Sequence is only valid if the jump target of ADVANCE_CP_AND_GOTO is the
+ // first bytecode in this sequence.
+ .IfArgumentEqualsOffset(4, 4, 0)
+ .ReplaceWith(BC_SKIP_UNTIL_CHAR_AND)
+ .MapArgument(1, 1, 3) // load offset
+ .MapArgument(3, 1, 3, 2) // advance_by
+ .MapArgument(2, 1, 3, 2) // c
+ .MapArgument(2, 4, 4) // mask
+ .MapArgument(0, 1, 3, 4) // eats at least
+ .MapArgument(2, 8, 4) // goto when match
+ .MapArgument(0, 4, 4) // goto on failure
+ .IgnoreArgument(3, 4, 4); // loop jump
+
+ // TODO(pthier): It might make sense for short sequences like this one to only
+ // optimize them if the resulting optimization is not longer than the current
+ // one. This could be the case if there are jumps inside the sequence and we
+ // have to replicate parts of the sequence. A method to mark such sequences
+ // might be useful.
+ CreateSequence(BC_LOAD_CURRENT_CHAR)
+ .FollowedBy(BC_CHECK_CHAR)
+ .FollowedBy(BC_ADVANCE_CP_AND_GOTO)
+ // Sequence is only valid if the jump target of ADVANCE_CP_AND_GOTO is the
+ // first bytecode in this sequence.
+ .IfArgumentEqualsOffset(4, 4, 0)
+ .ReplaceWith(BC_SKIP_UNTIL_CHAR)
+ .MapArgument(0, 1, 3) // load offset
+ .MapArgument(2, 1, 3, 2) // advance by
+ .MapArgument(1, 1, 3, 2) // character
+ .MapArgument(1, 4, 4) // goto when match
+ .MapArgument(0, 4, 4) // goto on failure
+ .IgnoreArgument(2, 4, 4); // loop jump
+
+ CreateSequence(BC_LOAD_CURRENT_CHAR)
+ .FollowedBy(BC_CHECK_CHAR)
+ .FollowedBy(BC_CHECK_CHAR)
+ // Sequence is only valid if the jump targets of both CHECK_CHAR bytecodes
+ // are equal.
+ .IfArgumentEqualsValueAtOffset(4, 4, 1, 4, 4)
+ .FollowedBy(BC_ADVANCE_CP_AND_GOTO)
+ // Sequence is only valid if the jump target of ADVANCE_CP_AND_GOTO is the
+ // first bytecode in this sequence.
+ .IfArgumentEqualsOffset(4, 4, 0)
+ .ReplaceWith(BC_SKIP_UNTIL_CHAR_OR_CHAR)
+ .MapArgument(0, 1, 3) // load offset
+ .MapArgument(3, 1, 3, 4) // advance by
+ .MapArgument(1, 1, 3, 2) // character 1
+ .MapArgument(2, 1, 3, 2) // character 2
+ .MapArgument(1, 4, 4) // goto when match
+ .MapArgument(0, 4, 4) // goto on failure
+ .IgnoreArgument(2, 4, 4) // goto when match 2
+ .IgnoreArgument(3, 4, 4); // loop jump
+
+ CreateSequence(BC_LOAD_CURRENT_CHAR)
+ .FollowedBy(BC_CHECK_GT)
+ // Sequence is only valid if the jump target of CHECK_GT is the first
+ // bytecode AFTER the whole sequence.
+ .IfArgumentEqualsOffset(4, 4, 56)
+ .FollowedBy(BC_CHECK_BIT_IN_TABLE)
+ // Sequence is only valid if the jump target of CHECK_BIT_IN_TABLE is
+ // the ADVANCE_CP_AND_GOTO bytecode at the end of the sequence.
+ .IfArgumentEqualsOffset(4, 4, 48)
+ .FollowedBy(BC_GOTO)
+ // Sequence is only valid if the jump target of GOTO is the same as the
+ // jump target of CHECK_GT (i.e. both jump to the first bytecode AFTER the
+ // whole sequence.
+ .IfArgumentEqualsValueAtOffset(4, 4, 1, 4, 4)
+ .FollowedBy(BC_ADVANCE_CP_AND_GOTO)
+ // Sequence is only valid if the jump target of ADVANCE_CP_AND_GOTO is the
+ // first bytecode in this sequence.
+ .IfArgumentEqualsOffset(4, 4, 0)
+ .ReplaceWith(BC_SKIP_UNTIL_GT_OR_NOT_BIT_IN_TABLE)
+ .MapArgument(0, 1, 3) // load offset
+ .MapArgument(4, 1, 3, 2) // advance by
+ .MapArgument(1, 1, 3, 2) // character
+ .MapArgument(2, 8, 16) // bit table
+ .MapArgument(1, 4, 4) // goto when match
+ .MapArgument(0, 4, 4) // goto on failure
+ .IgnoreArgument(2, 4, 4) // indirect loop jump
+ .IgnoreArgument(3, 4, 4) // jump out of loop
+ .IgnoreArgument(4, 4, 4); // loop jump
+}
+
+bool RegExpBytecodePeephole::OptimizeBytecode(const byte* bytecode,
+ int length) {
+ int old_pc = 0;
+ bool did_optimize = false;
+
+ while (old_pc < length) {
+ int replaced_len = TryOptimizeSequence(bytecode, old_pc);
+ if (replaced_len > 0) {
+ old_pc += replaced_len;
+ did_optimize = true;
+ } else {
+ int bc = bytecode[old_pc];
+ int bc_len = RegExpBytecodeLength(bc);
+ CopyRangeToOutput(bytecode, old_pc, bc_len);
+ old_pc += bc_len;
+ }
+ }
+
+ if (did_optimize) {
+ FixJumps();
+ }
+
+ return did_optimize;
+}
+
+void RegExpBytecodePeephole::CopyOptimizedBytecode(byte* to_address) const {
+ MemCopy(to_address, &(*optimized_bytecode_buffer_.begin()), Length());
+}
+
+int RegExpBytecodePeephole::Length() const { return pc(); }
+
+BytecodeSequenceNode& RegExpBytecodePeephole::CreateSequence(int bytecode) {
+ DCHECK(sequences_ != nullptr);
+ DCHECK(0 <= bytecode && bytecode < kRegExpBytecodeCount);
+
+ return sequences_->FollowedBy(bytecode);
+}
+
+int RegExpBytecodePeephole::TryOptimizeSequence(const byte* bytecode,
+ int start_pc) {
+ BytecodeSequenceNode* seq_node = sequences_;
+ BytecodeSequenceNode* valid_seq_end = nullptr;
+
+ int current_pc = start_pc;
+
+ // Check for the longest valid sequence matching any of the pre-defined
+ // sequences in the Trie data structure.
+ while ((seq_node = seq_node->Find(bytecode[current_pc]))) {
+ if (!seq_node->CheckArguments(bytecode, start_pc)) {
+ break;
+ }
+ if (seq_node->IsSequence()) {
+ valid_seq_end = seq_node;
+ }
+ current_pc += RegExpBytecodeLength(bytecode[current_pc]);
+ }
+
+ if (valid_seq_end) {
+ EmitOptimization(start_pc, bytecode, *valid_seq_end);
+ return valid_seq_end->SequenceLength();
+ }
+
+ return 0;
+}
+
+void RegExpBytecodePeephole::EmitOptimization(
+ int start_pc, const byte* bytecode, const BytecodeSequenceNode& last_node) {
+#ifdef DEBUG
+ int optimized_start_pc = pc();
+#endif
+ // Jump sources that are mapped or marked as unused will be deleted at the end
+ // of this method. We don't delete them immediately as we might need the
+ // information when we have to preserve bytecodes at the end.
+ // TODO(pthier): Replace with a stack-allocated data structure.
+ ZoneLinkedList<int> delete_jumps = ZoneLinkedList<int>(zone());
+
+ uint32_t bc = last_node.OptimizedBytecode();
+ EmitValue(bc);
+
+ for (size_t arg = 0; arg < last_node.ArgumentSize(); arg++) {
+ BytecodeArgumentMapping arg_map = last_node.ArgumentMapping(arg);
+ int arg_pos = start_pc + arg_map.offset;
+ // If we map any jump source we mark the old source for deletion and insert
+ // a new jump.
+ auto jump_edge_iter = jump_edges_.find(arg_pos);
+ if (jump_edge_iter != jump_edges_.end()) {
+ int jump_source = jump_edge_iter->first;
+ int jump_destination = jump_edge_iter->second;
+ // Add new jump edge add current position.
+ jump_edges_mapped_.emplace(Length(), jump_destination);
+ // Mark old jump edge for deletion.
+ delete_jumps.push_back(jump_source);
+ // Decrement usage count of jump destination.
+ auto jump_count_iter = jump_usage_counts_.find(jump_destination);
+ DCHECK(jump_count_iter != jump_usage_counts_.end());
+ int& usage_count = jump_count_iter->second;
+ --usage_count;
+ }
+ // TODO(pthier): DCHECK that mapped arguments are never sources of jumps
+ // to destinations inside the sequence.
+ EmitArgument(start_pc, bytecode, arg_map);
+ }
+ DCHECK_EQ(pc(), optimized_start_pc +
+ RegExpBytecodeLength(last_node.OptimizedBytecode()));
+
+ // Remove jumps from arguments we ignore.
+ if (last_node.HasIgnoredArguments()) {
+ for (auto ignored_arg = last_node.ArgumentIgnoredBegin();
+ ignored_arg != last_node.ArgumentIgnoredEnd(); ignored_arg++) {
+ auto jump_edge_iter = jump_edges_.find(start_pc + ignored_arg->offset);
+ if (jump_edge_iter != jump_edges_.end()) {
+ int jump_source = jump_edge_iter->first;
+ int jump_destination = jump_edge_iter->second;
+ // Mark old jump edge for deletion.
+ delete_jumps.push_back(jump_source);
+ // Decrement usage count of jump destination.
+ auto jump_count_iter = jump_usage_counts_.find(jump_destination);
+ DCHECK(jump_count_iter != jump_usage_counts_.end());
+ int& usage_count = jump_count_iter->second;
+ --usage_count;
+ }
+ }
+ }
+
+ int fixup_length = RegExpBytecodeLength(bc) - last_node.SequenceLength();
+
+ // Check if there are any jumps inside the old sequence.
+ // If so we have to keep the bytecodes that are jumped to around.
+ auto jump_destination_candidate = jump_usage_counts_.upper_bound(start_pc);
+ int jump_candidate_destination = jump_destination_candidate->first;
+ int jump_candidate_count = jump_destination_candidate->second;
+ // Jump destinations only jumped to from inside the sequence will be ignored.
+ while (jump_destination_candidate != jump_usage_counts_.end() &&
+ jump_candidate_count == 0) {
+ ++jump_destination_candidate;
+ jump_candidate_destination = jump_destination_candidate->first;
+ jump_candidate_count = jump_destination_candidate->second;
+ }
+
+ int preserve_from = start_pc + last_node.SequenceLength();
+ if (jump_destination_candidate != jump_usage_counts_.end() &&
+ jump_candidate_destination < start_pc + last_node.SequenceLength()) {
+ preserve_from = jump_candidate_destination;
+ // Check if any jump in the sequence we are preserving has a jump
+ // destination inside the optimized sequence before the current position we
+ // want to preserve. If so we have to preserve all bytecodes starting at
+ // this jump destination.
+ for (auto jump_iter = jump_edges_.lower_bound(preserve_from);
+ jump_iter != jump_edges_.end() &&
+ jump_iter->first /* jump source */ <
+ start_pc + last_node.SequenceLength();
+ ++jump_iter) {
+ int jump_destination = jump_iter->second;
+ if (jump_destination > start_pc && jump_destination < preserve_from) {
+ preserve_from = jump_destination;
+ }
+ }
+
+ // We preserve everything to the end of the sequence. This is conservative
+ // since it would be enough to preserve all bytecudes up to an unconditional
+ // jump.
+ int preserve_length = start_pc + last_node.SequenceLength() - preserve_from;
+ fixup_length += preserve_length;
+ // Jumps after the start of the preserved sequence need fixup.
+ AddJumpSourceFixup(fixup_length,
+ start_pc + last_node.SequenceLength() - preserve_length);
+ // All jump targets after the start of the optimized sequence need to be
+ // fixed relative to the length of the optimized sequence including
+ // bytecodes we preserved.
+ AddJumpDestinationFixup(fixup_length, start_pc + 1);
+ // Jumps to the sequence we preserved need absolute fixup as they could
+ // occur before or after the sequence.
+ SetJumpDestinationFixup(pc() - preserve_from, preserve_from);
+ CopyRangeToOutput(bytecode, preserve_from, preserve_length);
+ } else {
+ AddJumpDestinationFixup(fixup_length, start_pc + 1);
+ // Jumps after the end of the old sequence need fixup.
+ AddJumpSourceFixup(fixup_length, start_pc + last_node.SequenceLength());
+ }
+
+ // Delete jumps we definitely don't need anymore
+ for (int del : delete_jumps) {
+ if (del < preserve_from) {
+ jump_edges_.erase(del);
+ }
+ }
+}
+
+void RegExpBytecodePeephole::AddJumpSourceFixup(int fixup, int pos) {
+ auto previous_fixup = jump_source_fixups_.lower_bound(pos);
+ DCHECK(previous_fixup != jump_source_fixups_.end());
+ DCHECK(previous_fixup != jump_source_fixups_.begin());
+
+ int previous_fixup_value = (--previous_fixup)->second;
+ jump_source_fixups_[pos] = previous_fixup_value + fixup;
+}
+
+void RegExpBytecodePeephole::AddJumpDestinationFixup(int fixup, int pos) {
+ auto previous_fixup = jump_destination_fixups_.lower_bound(pos);
+ DCHECK(previous_fixup != jump_destination_fixups_.end());
+ DCHECK(previous_fixup != jump_destination_fixups_.begin());
+
+ int previous_fixup_value = (--previous_fixup)->second;
+ jump_destination_fixups_[pos] = previous_fixup_value + fixup;
+}
+
+void RegExpBytecodePeephole::SetJumpDestinationFixup(int fixup, int pos) {
+ auto previous_fixup = jump_destination_fixups_.lower_bound(pos);
+ DCHECK(previous_fixup != jump_destination_fixups_.end());
+ DCHECK(previous_fixup != jump_destination_fixups_.begin());
+
+ int previous_fixup_value = (--previous_fixup)->second;
+ jump_destination_fixups_.emplace(pos, fixup);
+ jump_destination_fixups_.emplace(pos + 1, previous_fixup_value);
+}
+
+void RegExpBytecodePeephole::PrepareJumpStructures(
+ const ZoneUnorderedMap<int, int>& jump_edges) {
+ for (auto jump_edge : jump_edges) {
+ int jump_source = jump_edge.first;
+ int jump_destination = jump_edge.second;
+
+ jump_edges_.emplace(jump_source, jump_destination);
+ jump_usage_counts_[jump_destination]++;
+ }
+}
+
+void RegExpBytecodePeephole::FixJumps() {
+ int position_fixup = 0;
+ // Next position where fixup changes.
+ auto next_source_fixup = jump_source_fixups_.lower_bound(0);
+ int next_source_fixup_offset = next_source_fixup->first;
+ int next_source_fixup_value = next_source_fixup->second;
+
+ for (auto jump_edge : jump_edges_) {
+ int jump_source = jump_edge.first;
+ int jump_destination = jump_edge.second;
+ while (jump_source >= next_source_fixup_offset) {
+ position_fixup = next_source_fixup_value;
+ ++next_source_fixup;
+ next_source_fixup_offset = next_source_fixup->first;
+ next_source_fixup_value = next_source_fixup->second;
+ }
+ jump_source += position_fixup;
+
+ FixJump(jump_source, jump_destination);
+ }
+
+ // Mapped jump edges don't need source fixups, as the position already is an
+ // offset in the new bytecode.
+ for (auto jump_edge : jump_edges_mapped_) {
+ int jump_source = jump_edge.first;
+ int jump_destination = jump_edge.second;
+
+ FixJump(jump_source, jump_destination);
+ }
+}
+
+void RegExpBytecodePeephole::FixJump(int jump_source, int jump_destination) {
+ int fixed_jump_destination =
+ jump_destination +
+ (--jump_destination_fixups_.upper_bound(jump_destination))->second;
+ DCHECK_LT(fixed_jump_destination, Length());
+#ifdef DEBUG
+ // TODO(pthier): This check could be better if we track the bytecodes
+ // actually used and check if we jump to one of them.
+ byte jump_bc = optimized_bytecode_buffer_[fixed_jump_destination];
+ DCHECK_GT(jump_bc, 0);
+ DCHECK_LT(jump_bc, kRegExpBytecodeCount);
+#endif
+
+ if (jump_destination != fixed_jump_destination) {
+ OverwriteValue<uint32_t>(jump_source, fixed_jump_destination);
+ }
+}
+
+void RegExpBytecodePeephole::AddSentinelFixups(int pos) {
+ jump_source_fixups_.emplace(pos, 0);
+ jump_destination_fixups_.emplace(pos, 0);
+}
+
+template <typename T>
+void RegExpBytecodePeephole::EmitValue(T value) {
+ DCHECK(optimized_bytecode_buffer_.begin() + pc() ==
+ optimized_bytecode_buffer_.end());
+ byte* value_byte_iter = reinterpret_cast<byte*>(&value);
+ optimized_bytecode_buffer_.insert(optimized_bytecode_buffer_.end(),
+ value_byte_iter,
+ value_byte_iter + sizeof(T));
+}
+
+template <typename T>
+void RegExpBytecodePeephole::OverwriteValue(int offset, T value) {
+ byte* value_byte_iter = reinterpret_cast<byte*>(&value);
+ byte* value_byte_iter_end = value_byte_iter + sizeof(T);
+ while (value_byte_iter < value_byte_iter_end) {
+ optimized_bytecode_buffer_[offset++] = *value_byte_iter++;
+ }
+}
+
+void RegExpBytecodePeephole::CopyRangeToOutput(const byte* orig_bytecode,
+ int start, int length) {
+ DCHECK(optimized_bytecode_buffer_.begin() + pc() ==
+ optimized_bytecode_buffer_.end());
+ optimized_bytecode_buffer_.insert(optimized_bytecode_buffer_.end(),
+ orig_bytecode + start,
+ orig_bytecode + start + length);
+}
+
+void RegExpBytecodePeephole::SetRange(byte value, int count) {
+ DCHECK(optimized_bytecode_buffer_.begin() + pc() ==
+ optimized_bytecode_buffer_.end());
+ optimized_bytecode_buffer_.insert(optimized_bytecode_buffer_.end(), count,
+ value);
+}
+
+void RegExpBytecodePeephole::EmitArgument(int start_pc, const byte* bytecode,
+ BytecodeArgumentMapping arg) {
+ int arg_pos = start_pc + arg.offset;
+ switch (arg.length) {
+ case 1:
+ DCHECK_EQ(arg.new_length, arg.length);
+ EmitValue(GetValue<byte>(bytecode, arg_pos));
+ break;
+ case 2:
+ DCHECK_EQ(arg.new_length, arg.length);
+ EmitValue(GetValue<uint16_t>(bytecode, arg_pos));
+ break;
+ case 3: {
+ // Length 3 only occurs in 'packed' arguments where the lowermost byte is
+ // the current bytecode, and the remaining 3 bytes are the packed value.
+ //
+ // We load 4 bytes from position - 1 and shift out the bytecode.
+#ifdef V8_TARGET_BIG_ENDIAN
+ UNIMPLEMENTED();
+ int32_t val = 0;
+#else
+ int32_t val = GetValue<int32_t>(bytecode, arg_pos - 1) >> kBitsPerByte;
+#endif // V8_TARGET_BIG_ENDIAN
+
+ switch (arg.new_length) {
+ case 2:
+ EmitValue<uint16_t>(val);
+ break;
+ case 3: {
+ // Pack with previously emitted value.
+ auto prev_val =
+ GetValue<int32_t>(&(*optimized_bytecode_buffer_.begin()),
+ Length() - sizeof(uint32_t));
+#ifdef V8_TARGET_BIG_ENDIAN
+ UNIMPLEMENTED();
+ USE(prev_val);
+#else
+ DCHECK_EQ(prev_val & 0xFFFFFF00, 0);
+ OverwriteValue<uint32_t>(
+ pc() - sizeof(uint32_t),
+ (static_cast<uint32_t>(val) << 8) | (prev_val & 0xFF));
+#endif // V8_TARGET_BIG_ENDIAN
+ break;
+ }
+ case 4:
+ EmitValue<uint32_t>(val);
+ break;
+ }
+ break;
+ }
+ case 4:
+ DCHECK_EQ(arg.new_length, arg.length);
+ EmitValue(GetValue<uint32_t>(bytecode, arg_pos));
+ break;
+ case 8:
+ DCHECK_EQ(arg.new_length, arg.length);
+ EmitValue(GetValue<uint64_t>(bytecode, arg_pos));
+ break;
+ default:
+ CopyRangeToOutput(bytecode, arg_pos, Min(arg.length, arg.new_length));
+ if (arg.length < arg.new_length) {
+ SetRange(0x00, arg.new_length - arg.length);
+ }
+ break;
+ }
+}
+
+int RegExpBytecodePeephole::pc() const {
+ DCHECK_LE(optimized_bytecode_buffer_.size(), std::numeric_limits<int>::max());
+ return static_cast<int>(optimized_bytecode_buffer_.size());
+}
+
+Zone* RegExpBytecodePeephole::zone() const { return zone_; }
+
+} // namespace
+
+// static
+Handle<ByteArray> RegExpBytecodePeepholeOptimization::OptimizeBytecode(
+ Isolate* isolate, Zone* zone, Handle<String> source, const byte* bytecode,
+ int length, const ZoneUnorderedMap<int, int>& jump_edges) {
+ RegExpBytecodePeephole peephole(zone, length, jump_edges);
+ bool did_optimize = peephole.OptimizeBytecode(bytecode, length);
+ Handle<ByteArray> array = isolate->factory()->NewByteArray(peephole.Length());
+ peephole.CopyOptimizedBytecode(array->GetDataStartAddress());
+
+ if (did_optimize && FLAG_trace_regexp_peephole_optimization) {
+ PrintF("Original Bytecode:\n");
+ RegExpBytecodeDisassemble(bytecode, length, source->ToCString().get());
+ PrintF("Optimized Bytecode:\n");
+ RegExpBytecodeDisassemble(array->GetDataStartAddress(), peephole.Length(),
+ source->ToCString().get());
+ }
+
+ return array;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/regexp/regexp-bytecode-peephole.h b/deps/v8/src/regexp/regexp-bytecode-peephole.h
new file mode 100644
index 0000000000..f116e1ac41
--- /dev/null
+++ b/deps/v8/src/regexp/regexp-bytecode-peephole.h
@@ -0,0 +1,31 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_REGEXP_REGEXP_BYTECODE_PEEPHOLE_H_
+#define V8_REGEXP_REGEXP_BYTECODE_PEEPHOLE_H_
+
+#include "src/common/globals.h"
+#include "src/zone/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+
+class ByteArray;
+
+// Peephole optimization for regexp interpreter bytecode.
+// Pre-defined bytecode sequences occuring in the bytecode generated by the
+// RegExpBytecodeGenerator can be optimized into a single bytecode.
+class RegExpBytecodePeepholeOptimization : public AllStatic {
+ public:
+ // Performs peephole optimization on the given bytecode and returns the
+ // optimized bytecode.
+ static Handle<ByteArray> OptimizeBytecode(
+ Isolate* isolate, Zone* zone, Handle<String> source, const byte* bytecode,
+ int length, const ZoneUnorderedMap<int, int>& jump_edges);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_REGEXP_REGEXP_BYTECODE_PEEPHOLE_H_
diff --git a/deps/v8/src/regexp/regexp-bytecodes.cc b/deps/v8/src/regexp/regexp-bytecodes.cc
new file mode 100644
index 0000000000..fbf8273ab4
--- /dev/null
+++ b/deps/v8/src/regexp/regexp-bytecodes.cc
@@ -0,0 +1,46 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/regexp/regexp-bytecodes.h"
+
+#include <cctype>
+
+#include "src/utils/utils.h"
+
+namespace v8 {
+namespace internal {
+
+void RegExpBytecodeDisassembleSingle(const byte* code_base, const byte* pc) {
+ PrintF("%s", RegExpBytecodeName(*pc));
+
+ // Args and the bytecode as hex.
+ for (int i = 0; i < RegExpBytecodeLength(*pc); i++) {
+ PrintF(", %02x", pc[i]);
+ }
+ PrintF(" ");
+
+ // Args as ascii.
+ for (int i = 1; i < RegExpBytecodeLength(*pc); i++) {
+ unsigned char b = pc[i];
+ PrintF("%c", std::isprint(b) ? b : '.');
+ }
+ PrintF("\n");
+}
+
+void RegExpBytecodeDisassemble(const byte* code_base, int length,
+ const char* pattern) {
+ PrintF("[generated bytecode for regexp pattern: '%s']\n", pattern);
+
+ ptrdiff_t offset = 0;
+
+ while (offset < length) {
+ const byte* const pc = code_base + offset;
+ PrintF("%p %4" V8PRIxPTRDIFF " ", pc, offset);
+ RegExpBytecodeDisassembleSingle(code_base, pc);
+ offset += RegExpBytecodeLength(*pc);
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/regexp/regexp-bytecodes.h b/deps/v8/src/regexp/regexp-bytecodes.h
index 3dd7637b88..e25945d0a0 100644
--- a/deps/v8/src/regexp/regexp-bytecodes.h
+++ b/deps/v8/src/regexp/regexp-bytecodes.h
@@ -6,17 +6,27 @@
#define V8_REGEXP_REGEXP_BYTECODES_H_
#include "src/base/macros.h"
+#include "src/common/globals.h"
namespace v8 {
namespace internal {
-const int BYTECODE_MASK = 0xff;
+// Maximum number of bytecodes that will be used (next power of 2 of actually
+// defined bytecodes).
+// All slots between the last actually defined bytecode and maximum id will be
+// filled with BREAKs, indicating an invalid operation. This way using
+// BYTECODE_MASK guarantees no OOB access to the dispatch table.
+constexpr int kRegExpPaddedBytecodeCount = 1 << 6;
+constexpr int BYTECODE_MASK = kRegExpPaddedBytecodeCount - 1;
// The first argument is packed in with the byte code in one word, but so it
// has 24 bits, but it can be positive and negative so only use 23 bits for
// positive values.
const unsigned int MAX_FIRST_ARG = 0x7fffffu;
const int BYTECODE_SHIFT = 8;
+STATIC_ASSERT(1 << BYTECODE_SHIFT > BYTECODE_MASK);
+// TODO(pthier): Argument offsets of bytecodes should be easily accessible by
+// name or at least by position.
#define BYTECODE_ITERATOR(V) \
V(BREAK, 0, 4) /* bc8 */ \
V(PUSH_CP, 1, 4) /* bc8 pad24 */ \
@@ -34,25 +44,61 @@ const int BYTECODE_SHIFT = 8;
V(FAIL, 13, 4) /* bc8 pad24 */ \
V(SUCCEED, 14, 4) /* bc8 pad24 */ \
V(ADVANCE_CP, 15, 4) /* bc8 offset24 */ \
- V(GOTO, 16, 8) /* bc8 pad24 addr32 */ \
+ /* Jump to another bytecode given its offset. */ \
+ /* Bit Layout: */ \
+ /* 0x00 - 0x07: 0x10 (fixed) Bytecode */ \
+ /* 0x08 - 0x1F: 0x00 (unused) Padding */ \
+ /* 0x20 - 0x3F: Address of bytecode to jump to */ \
+ V(GOTO, 16, 8) /* bc8 pad24 addr32 */ \
+ /* Check if offset is in range and load character at given offset. */ \
+ /* Bit Layout: */ \
+ /* 0x00 - 0x07: 0x11 (fixed) Bytecode */ \
+ /* 0x08 - 0x1F: Offset from current position */ \
+ /* 0x20 - 0x3F: Address of bytecode when load is out of range */ \
V(LOAD_CURRENT_CHAR, 17, 8) /* bc8 offset24 addr32 */ \
+ /* Load character at given offset without range checks. */ \
+ /* Bit Layout: */ \
+ /* 0x00 - 0x07: 0x12 (fixed) Bytecode */ \
+ /* 0x08 - 0x1F: Offset from current position */ \
V(LOAD_CURRENT_CHAR_UNCHECKED, 18, 4) /* bc8 offset24 */ \
V(LOAD_2_CURRENT_CHARS, 19, 8) /* bc8 offset24 addr32 */ \
V(LOAD_2_CURRENT_CHARS_UNCHECKED, 20, 4) /* bc8 offset24 */ \
V(LOAD_4_CURRENT_CHARS, 21, 8) /* bc8 offset24 addr32 */ \
V(LOAD_4_CURRENT_CHARS_UNCHECKED, 22, 4) /* bc8 offset24 */ \
V(CHECK_4_CHARS, 23, 12) /* bc8 pad24 uint32 addr32 */ \
- V(CHECK_CHAR, 24, 8) /* bc8 pad8 uint16 addr32 */ \
+ /* Check if current character is equal to a given character */ \
+ /* Bit Layout: */ \
+ /* 0x00 - 0x07: 0x19 (fixed) Bytecode */ \
+ /* 0x08 - 0x0F: 0x00 (unused) Padding */ \
+ /* 0x10 - 0x1F: Character to check */ \
+ /* 0x20 - 0x3F: Address of bytecode when matched */ \
+ V(CHECK_CHAR, 24, 8) /* bc8 pad8 uint16 addr32 */ \
V(CHECK_NOT_4_CHARS, 25, 12) /* bc8 pad24 uint32 addr32 */ \
V(CHECK_NOT_CHAR, 26, 8) /* bc8 pad8 uint16 addr32 */ \
V(AND_CHECK_4_CHARS, 27, 16) /* bc8 pad24 uint32 uint32 addr32 */ \
- V(AND_CHECK_CHAR, 28, 12) /* bc8 pad8 uint16 uint32 addr32 */ \
+ /* Checks if the current character combined with mask (bitwise and) */ \
+ /* matches a character (e.g. used when two characters in a disjunction */ \
+ /* differ by only a single bit */ \
+ /* Bit Layout: */ \
+ /* 0x00 - 0x07: 0x1c (fixed) Bytecode */ \
+ /* 0x08 - 0x0F: 0x00 (unused) Padding */ \
+ /* 0x10 - 0x1F: Character to match against (after mask aplied) */ \
+ /* 0x20 - 0x3F: Bitmask bitwise and combined with current character */ \
+ /* 0x40 - 0x5F: Address of bytecode when matched */ \
+ V(AND_CHECK_CHAR, 28, 12) /* bc8 pad8 uint16 uint32 addr32 */ \
V(AND_CHECK_NOT_4_CHARS, 29, 16) /* bc8 pad24 uint32 uint32 addr32 */ \
V(AND_CHECK_NOT_CHAR, 30, 12) /* bc8 pad8 uint16 uint32 addr32 */ \
V(MINUS_AND_CHECK_NOT_CHAR, 31, 12) /* bc8 pad8 uc16 uc16 uc16 addr32 */ \
V(CHECK_CHAR_IN_RANGE, 32, 12) /* bc8 pad24 uc16 uc16 addr32 */ \
V(CHECK_CHAR_NOT_IN_RANGE, 33, 12) /* bc8 pad24 uc16 uc16 addr32 */ \
- V(CHECK_BIT_IN_TABLE, 34, 24) /* bc8 pad24 addr32 bits128 */ \
+ /* Checks if the current character matches any of the characters encoded */ \
+ /* in a bit table. Similar to/inspired by boyer moore string search */ \
+ /* Bit Layout: */ \
+ /* 0x00 - 0x07: 0x22 (fixed) Bytecode */ \
+ /* 0x08 - 0x1F: 0x00 (unused) Padding */ \
+ /* 0x20 - 0x3F: Address of bytecode when bit is set */ \
+ /* 0x40 - 0xBF: Bit table */ \
+ V(CHECK_BIT_IN_TABLE, 34, 24) /* bc8 pad24 addr32 bits128 */ \
V(CHECK_LT, 35, 8) /* bc8 pad8 uc16 addr32 */ \
V(CHECK_GT, 36, 8) /* bc8 pad8 uc16 addr32 */ \
V(CHECK_NOT_BACK_REF, 37, 8) /* bc8 reg_idx24 addr32 */ \
@@ -67,10 +113,99 @@ const int BYTECODE_SHIFT = 8;
V(CHECK_REGISTER_EQ_POS, 46, 8) /* bc8 reg_idx24 addr32 */ \
V(CHECK_AT_START, 47, 8) /* bc8 pad24 addr32 */ \
V(CHECK_NOT_AT_START, 48, 8) /* bc8 offset24 addr32 */ \
+ /* Checks if the current position matches top of backtrack stack */ \
+ /* Bit Layout: */ \
+ /* 0x00 - 0x07: 0x31 (fixed) Bytecode */ \
+ /* 0x08 - 0x1F: 0x00 (unused) Padding */ \
+ /* 0x20 - 0x3F: Address of bytecode when current matches tos */ \
V(CHECK_GREEDY, 49, 8) /* bc8 pad24 addr32 */ \
- V(ADVANCE_CP_AND_GOTO, 50, 8) /* bc8 offset24 addr32 */ \
+ /* Advance character pointer by given offset and jump to another bytecode.*/ \
+ /* Bit Layout: */ \
+ /* 0x00 - 0x07: 0x32 (fixed) Bytecode */ \
+ /* 0x08 - 0x1F: Number of characters to advance */ \
+ /* 0x20 - 0x3F: Address of bytecode to jump to */ \
+ V(ADVANCE_CP_AND_GOTO, 50, 8) /* bc8 offset24 addr32 */ \
V(SET_CURRENT_POSITION_FROM_END, 51, 4) /* bc8 idx24 */ \
- V(CHECK_CURRENT_POSITION, 52, 8) /* bc8 idx24 addr32 */
+ /* Checks if current position + given offset is in range. */ \
+ /* Bit Layout: */ \
+ /* 0x00 - 0x07: 0x34 (fixed) Bytecode */ \
+ /* 0x08 - 0x1F: Offset from current position */ \
+ /* 0x20 - 0x3F: Address of bytecode when position is out of range */ \
+ V(CHECK_CURRENT_POSITION, 52, 8) /* bc8 idx24 addr32 */ \
+ /* Combination of: */ \
+ /* LOAD_CURRENT_CHAR, CHECK_BIT_IN_TABLE and ADVANCE_CP_AND_GOTO */ \
+ /* Emitted by RegExpBytecodePeepholeOptimization. */ \
+ /* Bit Layout: */ \
+ /* 0x00 - 0x07 0x35 (fixed) Bytecode */ \
+ /* 0x08 - 0x1F Load character offset from current position */ \
+ /* 0x20 - 0x3F Number of characters to advance */ \
+ /* 0x40 - 0xBF Bit Table */ \
+ /* 0xC0 - 0xDF Address of bytecode when character is matched */ \
+ /* 0xE0 - 0xFF Address of bytecode when no match */ \
+ V(SKIP_UNTIL_BIT_IN_TABLE, 53, 32) \
+ /* Combination of: */ \
+ /* CHECK_CURRENT_POSITION, LOAD_CURRENT_CHAR_UNCHECKED, AND_CHECK_CHAR */ \
+ /* and ADVANCE_CP_AND_GOTO */ \
+ /* Emitted by RegExpBytecodePeepholeOptimization. */ \
+ /* Bit Layout: */ \
+ /* 0x00 - 0x07 0x36 (fixed) Bytecode */ \
+ /* 0x08 - 0x1F Load character offset from current position */ \
+ /* 0x20 - 0x2F Number of characters to advance */ \
+ /* 0x30 - 0x3F Character to match against (after mask applied) */ \
+ /* 0x40 - 0x5F: Bitmask bitwise and combined with current character */ \
+ /* 0x60 - 0x7F Minimum number of characters this pattern consumes */ \
+ /* 0x80 - 0x9F Address of bytecode when character is matched */ \
+ /* 0xA0 - 0xBF Address of bytecode when no match */ \
+ V(SKIP_UNTIL_CHAR_AND, 54, 24) \
+ /* Combination of: */ \
+ /* LOAD_CURRENT_CHAR, CHECK_CHAR and ADVANCE_CP_AND_GOTO */ \
+ /* Emitted by RegExpBytecodePeepholeOptimization. */ \
+ /* Bit Layout: */ \
+ /* 0x00 - 0x07 0x37 (fixed) Bytecode */ \
+ /* 0x08 - 0x1F Load character offset from current position */ \
+ /* 0x20 - 0x2F Number of characters to advance */ \
+ /* 0x30 - 0x3F Character to match */ \
+ /* 0x40 - 0x5F Address of bytecode when character is matched */ \
+ /* 0x60 - 0x7F Address of bytecode when no match */ \
+ V(SKIP_UNTIL_CHAR, 55, 16) \
+ /* Combination of: */ \
+ /* CHECK_CURRENT_POSITION, LOAD_CURRENT_CHAR_UNCHECKED, CHECK_CHAR */ \
+ /* and ADVANCE_CP_AND_GOTO */ \
+ /* Emitted by RegExpBytecodePeepholeOptimization. */ \
+ /* Bit Layout: */ \
+ /* 0x00 - 0x07 0x38 (fixed) Bytecode */ \
+ /* 0x08 - 0x1F Load character offset from current position */ \
+ /* 0x20 - 0x2F Number of characters to advance */ \
+ /* 0x30 - 0x3F Character to match */ \
+ /* 0x40 - 0x5F Minimum number of characters this pattern consumes */ \
+ /* 0x60 - 0x7F Address of bytecode when character is matched */ \
+ /* 0x80 - 0x9F Address of bytecode when no match */ \
+ V(SKIP_UNTIL_CHAR_POS_CHECKED, 56, 20) \
+ /* Combination of: */ \
+ /* LOAD_CURRENT_CHAR, CHECK_CHAR, CHECK_CHAR and ADVANCE_CP_AND_GOTO */ \
+ /* Emitted by RegExpBytecodePeepholeOptimization. */ \
+ /* Bit Layout: */ \
+ /* 0x00 - 0x07 0x39 (fixed) Bytecode */ \
+ /* 0x08 - 0x1F Load character offset from current position */ \
+ /* 0x20 - 0x3F Number of characters to advance */ \
+ /* 0x40 - 0x4F Character to match */ \
+ /* 0x50 - 0x5F Other Character to match */ \
+ /* 0x60 - 0x7F Address of bytecode when either character is matched */ \
+ /* 0x80 - 0x9F Address of bytecode when no match */ \
+ V(SKIP_UNTIL_CHAR_OR_CHAR, 57, 20) \
+ /* Combination of: */ \
+ /* LOAD_CURRENT_CHAR, CHECK_GT, CHECK_BIT_IN_TABLE, GOTO and */ \
+ /* and ADVANCE_CP_AND_GOTO */ \
+ /* Emitted by RegExpBytecodePeepholeOptimization. */ \
+ /* Bit Layout: */ \
+ /* 0x00 - 0x07 0x3A (fixed) Bytecode */ \
+ /* 0x08 - 0x1F Load character offset from current position */ \
+ /* 0x20 - 0x2F Number of characters to advance */ \
+ /* 0x30 - 0x3F Character to check if it is less than current char */ \
+ /* 0x40 - 0xBF Bit Table */ \
+ /* 0xC0 - 0xDF Address of bytecode when character is matched */ \
+ /* 0xE0 - 0xFF Address of bytecode when no match */ \
+ V(SKIP_UNTIL_GT_OR_NOT_BIT_IN_TABLE, 58, 32)
#define COUNT(...) +1
static constexpr int kRegExpBytecodeCount = BYTECODE_ITERATOR(COUNT);
@@ -80,7 +215,7 @@ static constexpr int kRegExpBytecodeCount = BYTECODE_ITERATOR(COUNT);
// contiguous, strictly increasing, and start at 0.
// TODO(jgruber): Do not explicitly assign values, instead generate them
// implicitly from the list order.
-STATIC_ASSERT(kRegExpBytecodeCount == 53);
+STATIC_ASSERT(kRegExpBytecodeCount == 59);
#define DECLARE_BYTECODES(name, code, length) \
static constexpr int BC_##name = code;
@@ -107,6 +242,10 @@ inline const char* RegExpBytecodeName(int bytecode) {
return kRegExpBytecodeNames[bytecode];
}
+void RegExpBytecodeDisassembleSingle(const byte* code_base, const byte* pc);
+void RegExpBytecodeDisassemble(const byte* code_base, int length,
+ const char* pattern);
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/regexp/regexp-compiler.cc b/deps/v8/src/regexp/regexp-compiler.cc
index 85da69f308..d141f3c490 100644
--- a/deps/v8/src/regexp/regexp-compiler.cc
+++ b/deps/v8/src/regexp/regexp-compiler.cc
@@ -725,6 +725,11 @@ static int GetCaseIndependentLetters(Isolate* isolate, uc16 character,
unibrow::uchar* letters,
int letter_length) {
#ifdef V8_INTL_SUPPORT
+ // Special case for U+017F which has upper case in ASCII range.
+ if (character == 0x017f) {
+ letters[0] = character;
+ return 1;
+ }
icu::UnicodeSet set;
set.add(character);
set = set.closeOver(USET_CASE_INSENSITIVE);
@@ -734,10 +739,18 @@ static int GetCaseIndependentLetters(Isolate* isolate, uc16 character,
UChar32 start = set.getRangeStart(i);
UChar32 end = set.getRangeEnd(i);
CHECK(end - start + items <= letter_length);
- while (start <= end) {
- if (one_byte_subject && start > String::kMaxOneByteCharCode) break;
- letters[items++] = (unibrow::uchar)(start);
- start++;
+ // Only add to the output if character is not in ASCII range
+ // or the case equivalent character is in ASCII range.
+ // #sec-runtime-semantics-canonicalize-ch
+ // 3.g If the numeric value of ch ≥ 128 and the numeric value of cu < 128,
+ // return ch.
+ if (!((start >= 128) && (character < 128))) {
+ // No range have start and end span across code point 128.
+ DCHECK((start >= 128) == (end >= 128));
+ for (UChar32 cu = start; cu <= end; cu++) {
+ if (one_byte_subject && cu > String::kMaxOneByteCharCode) break;
+ letters[items++] = (unibrow::uchar)(cu);
+ }
}
}
return items;
diff --git a/deps/v8/src/regexp/regexp-interpreter.cc b/deps/v8/src/regexp/regexp-interpreter.cc
index cf2fb55e4a..df72951afb 100644
--- a/deps/v8/src/regexp/regexp-interpreter.cc
+++ b/deps/v8/src/regexp/regexp-interpreter.cc
@@ -12,6 +12,7 @@
#include "src/objects/objects-inl.h"
#include "src/regexp/regexp-bytecodes.h"
#include "src/regexp/regexp-macro-assembler.h"
+#include "src/regexp/regexp-stack.h" // For kMaximumStackSize.
#include "src/regexp/regexp.h"
#include "src/strings/unicode.h"
#include "src/utils/utils.h"
@@ -63,23 +64,6 @@ bool BackRefMatchesNoCase(Isolate* isolate, int from, int current, int len,
return true;
}
-void DisassembleSingleBytecode(const byte* code_base, const byte* pc) {
- PrintF("%s", RegExpBytecodeName(*pc));
-
- // Args and the bytecode as hex.
- for (int i = 0; i < RegExpBytecodeLength(*pc); i++) {
- PrintF(", %02x", pc[i]);
- }
- PrintF(" ");
-
- // Args as ascii.
- for (int i = 1; i < RegExpBytecodeLength(*pc); i++) {
- unsigned char b = pc[i];
- PrintF("%c", std::isprint(b) ? b : '.');
- }
- PrintF("\n");
-}
-
#ifdef DEBUG
void MaybeTraceInterpreter(const byte* code_base, const byte* pc,
int stack_depth, int current_position,
@@ -94,7 +78,7 @@ void MaybeTraceInterpreter(const byte* code_base, const byte* pc,
PrintF(format, pc - code_base, stack_depth, current_position, current_char,
printable ? current_char : '.');
- DisassembleSingleBytecode(code_base, pc);
+ RegExpBytecodeDisassembleSingle(code_base, pc);
}
}
#endif // DEBUG
@@ -118,7 +102,10 @@ class BacktrackStack {
public:
BacktrackStack() = default;
- void push(int v) { data_.emplace_back(v); }
+ V8_WARN_UNUSED_RESULT bool push(int v) {
+ data_.emplace_back(v);
+ return (static_cast<int>(data_.size()) <= kMaxSize);
+ }
int peek() const {
DCHECK(!data_.empty());
return data_.back();
@@ -141,13 +128,17 @@ class BacktrackStack {
// static stack-allocated backing store, but small enough not to waste space.
static constexpr int kStaticCapacity = 64;
- base::SmallVector<int, kStaticCapacity> data_;
+ using ValueT = int;
+ base::SmallVector<ValueT, kStaticCapacity> data_;
+
+ static constexpr int kMaxSize =
+ RegExpStack::kMaximumStackSize / sizeof(ValueT);
DISALLOW_COPY_AND_ASSIGN(BacktrackStack);
};
-IrregexpInterpreter::Result StackOverflow(Isolate* isolate,
- RegExp::CallOrigin call_origin) {
+IrregexpInterpreter::Result ThrowStackOverflow(Isolate* isolate,
+ RegExp::CallOrigin call_origin) {
CHECK(call_origin == RegExp::CallOrigin::kFromRuntime);
// We abort interpreter execution after the stack overflow is thrown, and thus
// allow allocation here despite the outer DisallowHeapAllocationScope.
@@ -156,6 +147,17 @@ IrregexpInterpreter::Result StackOverflow(Isolate* isolate,
return IrregexpInterpreter::EXCEPTION;
}
+// Only throws if called from the runtime, otherwise just returns the EXCEPTION
+// status code.
+IrregexpInterpreter::Result MaybeThrowStackOverflow(
+ Isolate* isolate, RegExp::CallOrigin call_origin) {
+ if (call_origin == RegExp::CallOrigin::kFromRuntime) {
+ return ThrowStackOverflow(isolate, call_origin);
+ } else {
+ return IrregexpInterpreter::EXCEPTION;
+ }
+}
+
template <typename Char>
void UpdateCodeAndSubjectReferences(
Isolate* isolate, Handle<ByteArray> code_array,
@@ -208,7 +210,7 @@ IrregexpInterpreter::Result HandleInterrupts(
Handle<String> subject_handle(*subject_string_out, isolate);
if (js_has_overflowed) {
- return StackOverflow(isolate, call_origin);
+ return ThrowStackOverflow(isolate, call_origin);
} else if (check.InterruptRequested()) {
const bool was_one_byte =
String::IsOneByteRepresentationUnderneath(*subject_string_out);
@@ -238,6 +240,13 @@ IrregexpInterpreter::Result HandleInterrupts(
return IrregexpInterpreter::SUCCESS;
}
+bool CheckBitInTable(const uint32_t current_char, const byte* const table) {
+ int mask = RegExpMacroAssembler::kTableMask;
+ int b = table[(current_char & mask) >> kBitsPerByteLog2];
+ int bit = (current_char & (kBitsPerByte - 1));
+ return (b & (1 << bit)) != 0;
+}
+
// If computed gotos are supported by the compiler, we can get addresses to
// labels directly in C/C++. Every bytecode handler has its own label and we
// store the addresses in a dispatch table indexed by bytecode. To execute the
@@ -262,7 +271,7 @@ IrregexpInterpreter::Result HandleInterrupts(
#define DISPATCH() \
pc = next_pc; \
insn = next_insn; \
- break
+ goto switch_dispatch_continuation
#endif // V8_USE_COMPUTED_GOTO
// ADVANCE/SET_PC_FROM_OFFSET are separated from DISPATCH, because ideally some
@@ -297,11 +306,52 @@ IrregexpInterpreter::Result RawMatch(Isolate* isolate, ByteArray code_array,
DisallowHeapAllocation no_gc;
#if V8_USE_COMPUTED_GOTO
-#define DECLARE_DISPATCH_TABLE_ENTRY(name, code, length) &&BC_##name,
- static const void* const dispatch_table[] = {
- BYTECODE_ITERATOR(DECLARE_DISPATCH_TABLE_ENTRY)};
+
+// We have to make sure that no OOB access to the dispatch table is possible and
+// all values are valid label addresses.
+// Otherwise jumps to arbitrary addresses could potentially happen.
+// This is ensured as follows:
+// Every index to the dispatch table gets masked using BYTECODE_MASK in
+// DECODE(). This way we can only get values between 0 (only the least
+// significant byte of an integer is used) and kRegExpPaddedBytecodeCount - 1
+// (BYTECODE_MASK is defined to be exactly this value).
+// All entries from kRegExpBytecodeCount to kRegExpPaddedBytecodeCount have to
+// be filled with BREAKs (invalid operation).
+
+// Fill dispatch table from last defined bytecode up to the next power of two
+// with BREAK (invalid operation).
+// TODO(pthier): Find a way to fill up automatically (at compile time)
+// 59 real bytecodes -> 5 fillers
+#define BYTECODE_FILLER_ITERATOR(V) \
+ V(BREAK) /* 1 */ \
+ V(BREAK) /* 2 */ \
+ V(BREAK) /* 3 */ \
+ V(BREAK) /* 4 */ \
+ V(BREAK) /* 5 */
+
+#define COUNT(...) +1
+ static constexpr int kRegExpBytecodeFillerCount =
+ BYTECODE_FILLER_ITERATOR(COUNT);
+#undef COUNT
+
+ // Make sure kRegExpPaddedBytecodeCount is actually the closest possible power
+ // of two.
+ DCHECK_EQ(kRegExpPaddedBytecodeCount,
+ base::bits::RoundUpToPowerOfTwo32(kRegExpBytecodeCount));
+
+ // Make sure every bytecode we get by using BYTECODE_MASK is well defined.
+ STATIC_ASSERT(kRegExpBytecodeCount <= kRegExpPaddedBytecodeCount);
+ STATIC_ASSERT(kRegExpBytecodeCount + kRegExpBytecodeFillerCount ==
+ kRegExpPaddedBytecodeCount);
+
+#define DECLARE_DISPATCH_TABLE_ENTRY(name, ...) &&BC_##name,
+ static const void* const dispatch_table[kRegExpPaddedBytecodeCount] = {
+ BYTECODE_ITERATOR(DECLARE_DISPATCH_TABLE_ENTRY)
+ BYTECODE_FILLER_ITERATOR(DECLARE_DISPATCH_TABLE_ENTRY)};
#undef DECLARE_DISPATCH_TABLE_ENTRY
-#endif
+#undef BYTECODE_FILLER_ITERATOR
+
+#endif // V8_USE_COMPUTED_GOTO
const byte* pc = code_array.GetDataStartAddress();
const byte* code_base = pc;
@@ -329,17 +379,23 @@ IrregexpInterpreter::Result RawMatch(Isolate* isolate, ByteArray code_array,
BYTECODE(BREAK) { UNREACHABLE(); }
BYTECODE(PUSH_CP) {
ADVANCE(PUSH_CP);
- backtrack_stack.push(current);
+ if (!backtrack_stack.push(current)) {
+ return MaybeThrowStackOverflow(isolate, call_origin);
+ }
DISPATCH();
}
BYTECODE(PUSH_BT) {
ADVANCE(PUSH_BT);
- backtrack_stack.push(Load32Aligned(pc + 4));
+ if (!backtrack_stack.push(Load32Aligned(pc + 4))) {
+ return MaybeThrowStackOverflow(isolate, call_origin);
+ }
DISPATCH();
}
BYTECODE(PUSH_REGISTER) {
ADVANCE(PUSH_REGISTER);
- backtrack_stack.push(registers[insn >> BYTECODE_SHIFT]);
+ if (!backtrack_stack.push(registers[insn >> BYTECODE_SHIFT])) {
+ return MaybeThrowStackOverflow(isolate, call_origin);
+ }
DISPATCH();
}
BYTECODE(SET_REGISTER) {
@@ -580,10 +636,7 @@ IrregexpInterpreter::Result RawMatch(Isolate* isolate, ByteArray code_array,
DISPATCH();
}
BYTECODE(CHECK_BIT_IN_TABLE) {
- int mask = RegExpMacroAssembler::kTableMask;
- byte b = pc[8 + ((current_char & mask) >> kBitsPerByteLog2)];
- int bit = (current_char & (kBitsPerByte - 1));
- if ((b & (1 << bit)) != 0) {
+ if (CheckBitInTable(current_char, pc + 8)) {
SET_PC_FROM_OFFSET(Load32Aligned(pc + 4));
} else {
ADVANCE(CHECK_BIT_IN_TABLE);
@@ -762,6 +815,118 @@ IrregexpInterpreter::Result RawMatch(Isolate* isolate, ByteArray code_array,
}
DISPATCH();
}
+ BYTECODE(SKIP_UNTIL_CHAR) {
+ int load_offset = (insn >> BYTECODE_SHIFT);
+ uint32_t advance = Load16Aligned(pc + 4);
+ uint32_t c = Load16Aligned(pc + 6);
+ while (static_cast<uintptr_t>(current + load_offset) <
+ static_cast<uintptr_t>(subject.length())) {
+ current_char = subject[current + load_offset];
+ if (c == current_char) {
+ SET_PC_FROM_OFFSET(Load32Aligned(pc + 8));
+ DISPATCH();
+ }
+ current += advance;
+ }
+ SET_PC_FROM_OFFSET(Load32Aligned(pc + 12));
+ DISPATCH();
+ }
+ BYTECODE(SKIP_UNTIL_CHAR_AND) {
+ int load_offset = (insn >> BYTECODE_SHIFT);
+ uint16_t advance = Load16Aligned(pc + 4);
+ uint16_t c = Load16Aligned(pc + 6);
+ uint32_t mask = Load32Aligned(pc + 8);
+ int32_t maximum_offset = Load32Aligned(pc + 12);
+ while (static_cast<uintptr_t>(current + maximum_offset) <=
+ static_cast<uintptr_t>(subject.length())) {
+ current_char = subject[current + load_offset];
+ if (c == (current_char & mask)) {
+ SET_PC_FROM_OFFSET(Load32Aligned(pc + 16));
+ DISPATCH();
+ }
+ current += advance;
+ }
+ SET_PC_FROM_OFFSET(Load32Aligned(pc + 20));
+ DISPATCH();
+ }
+ BYTECODE(SKIP_UNTIL_CHAR_POS_CHECKED) {
+ int load_offset = (insn >> BYTECODE_SHIFT);
+ uint16_t advance = Load16Aligned(pc + 4);
+ uint16_t c = Load16Aligned(pc + 6);
+ int32_t maximum_offset = Load32Aligned(pc + 8);
+ while (static_cast<uintptr_t>(current + maximum_offset) <=
+ static_cast<uintptr_t>(subject.length())) {
+ current_char = subject[current + load_offset];
+ if (c == current_char) {
+ SET_PC_FROM_OFFSET(Load32Aligned(pc + 12));
+ DISPATCH();
+ }
+ current += advance;
+ }
+ SET_PC_FROM_OFFSET(Load32Aligned(pc + 16));
+ DISPATCH();
+ }
+ BYTECODE(SKIP_UNTIL_BIT_IN_TABLE) {
+ int load_offset = (insn >> BYTECODE_SHIFT);
+ uint32_t advance = Load16Aligned(pc + 4);
+ const byte* table = pc + 8;
+ while (static_cast<uintptr_t>(current + load_offset) <
+ static_cast<uintptr_t>(subject.length())) {
+ current_char = subject[current + load_offset];
+ if (CheckBitInTable(current_char, table)) {
+ SET_PC_FROM_OFFSET(Load32Aligned(pc + 24));
+ DISPATCH();
+ }
+ current += advance;
+ }
+ SET_PC_FROM_OFFSET(Load32Aligned(pc + 28));
+ DISPATCH();
+ }
+ BYTECODE(SKIP_UNTIL_GT_OR_NOT_BIT_IN_TABLE) {
+ int load_offset = (insn >> BYTECODE_SHIFT);
+ uint16_t advance = Load16Aligned(pc + 4);
+ uint16_t limit = Load16Aligned(pc + 6);
+ const byte* table = pc + 8;
+ while (static_cast<uintptr_t>(current + load_offset) <
+ static_cast<uintptr_t>(subject.length())) {
+ current_char = subject[current + load_offset];
+ if (current_char > limit) {
+ SET_PC_FROM_OFFSET(Load32Aligned(pc + 24));
+ DISPATCH();
+ }
+ if (!CheckBitInTable(current_char, table)) {
+ SET_PC_FROM_OFFSET(Load32Aligned(pc + 24));
+ DISPATCH();
+ }
+ current += advance;
+ }
+ SET_PC_FROM_OFFSET(Load32Aligned(pc + 28));
+ DISPATCH();
+ }
+ BYTECODE(SKIP_UNTIL_CHAR_OR_CHAR) {
+ int load_offset = (insn >> BYTECODE_SHIFT);
+ uint32_t advance = Load32Aligned(pc + 4);
+ uint16_t c = Load16Aligned(pc + 8);
+ uint16_t c2 = Load16Aligned(pc + 10);
+ while (static_cast<uintptr_t>(current + load_offset) <
+ static_cast<uintptr_t>(subject.length())) {
+ current_char = subject[current + load_offset];
+ // The two if-statements below are split up intentionally, as combining
+ // them seems to result in register allocation behaving quite
+ // differently and slowing down the resulting code.
+ if (c == current_char) {
+ SET_PC_FROM_OFFSET(Load32Aligned(pc + 12));
+ DISPATCH();
+ }
+ if (c2 == current_char) {
+ SET_PC_FROM_OFFSET(Load32Aligned(pc + 12));
+ DISPATCH();
+ }
+ current += advance;
+ }
+ SET_PC_FROM_OFFSET(Load32Aligned(pc + 16));
+ DISPATCH();
+ }
#if V8_USE_COMPUTED_GOTO
// Lint gets confused a lot if we just use !V8_USE_COMPUTED_GOTO or ifndef
// V8_USE_COMPUTED_GOTO here.
@@ -769,6 +934,9 @@ IrregexpInterpreter::Result RawMatch(Isolate* isolate, ByteArray code_array,
default:
UNREACHABLE();
}
+ // Label we jump to in DISPATCH(). There must be no instructions between the
+ // end of the switch, this label and the end of the loop.
+ switch_dispatch_continuation : {}
#endif // V8_USE_COMPUTED_GOTO
}
}
@@ -784,30 +952,11 @@ IrregexpInterpreter::Result RawMatch(Isolate* isolate, ByteArray code_array,
} // namespace
// static
-void IrregexpInterpreter::Disassemble(ByteArray byte_array,
- const std::string& pattern) {
- DisallowHeapAllocation no_gc;
-
- PrintF("[generated bytecode for regexp pattern: '%s']\n", pattern.c_str());
-
- const byte* const code_base = byte_array.GetDataStartAddress();
- const int byte_array_length = byte_array.length();
- ptrdiff_t offset = 0;
-
- while (offset < byte_array_length) {
- const byte* const pc = code_base + offset;
- PrintF("%p %4" V8PRIxPTRDIFF " ", pc, offset);
- DisassembleSingleBytecode(code_base, pc);
- offset += RegExpBytecodeLength(*pc);
- }
-}
-
-// static
IrregexpInterpreter::Result IrregexpInterpreter::Match(
Isolate* isolate, JSRegExp regexp, String subject_string, int* registers,
int registers_length, int start_position, RegExp::CallOrigin call_origin) {
if (FLAG_regexp_tier_up) {
- regexp.MarkTierUpForNextExec();
+ regexp.TierUpTick();
}
bool is_one_byte = String::IsOneByteRepresentationUnderneath(subject_string);
@@ -869,6 +1018,12 @@ IrregexpInterpreter::Result IrregexpInterpreter::MatchForCallFromJs(
String subject_string = String::cast(Object(subject));
JSRegExp regexp_obj = JSRegExp::cast(Object(regexp));
+ if (regexp_obj.MarkedForTierUp()) {
+ // Returning RETRY will re-enter through runtime, where actual recompilation
+ // for tier-up takes place.
+ return IrregexpInterpreter::RETRY;
+ }
+
return Match(isolate, regexp_obj, subject_string, registers, registers_length,
start_position, call_origin);
}
diff --git a/deps/v8/src/regexp/regexp-interpreter.h b/deps/v8/src/regexp/regexp-interpreter.h
index fbc5a3b290..2d0b74f136 100644
--- a/deps/v8/src/regexp/regexp-interpreter.h
+++ b/deps/v8/src/regexp/regexp-interpreter.h
@@ -31,6 +31,8 @@ class V8_EXPORT_PRIVATE IrregexpInterpreter : public AllStatic {
// In case a StackOverflow occurs, EXCEPTION is returned. The caller is
// responsible for creating the exception.
+ // RETRY is returned if a retry through the runtime is needed (e.g. when
+ // interrupts have been scheduled or the regexp is marked for tier-up).
// Arguments input_start, input_end and backtrack_stack are
// unused. They are only passed to match the signature of the native irregex
// code.
@@ -46,8 +48,6 @@ class V8_EXPORT_PRIVATE IrregexpInterpreter : public AllStatic {
int registers_length, int start_position,
RegExp::CallOrigin call_origin);
- static void Disassemble(ByteArray byte_array, const std::string& pattern);
-
private:
static Result Match(Isolate* isolate, JSRegExp regexp, String subject_string,
int* registers, int registers_length, int start_position,
diff --git a/deps/v8/src/regexp/regexp-parser.cc b/deps/v8/src/regexp/regexp-parser.cc
index ec1beca84b..951f815374 100644
--- a/deps/v8/src/regexp/regexp-parser.cc
+++ b/deps/v8/src/regexp/regexp-parser.cc
@@ -84,6 +84,9 @@ void RegExpParser::Advance() {
ReportError(CStrVector(
MessageFormatter::TemplateString(MessageTemplate::kStackOverflow)));
} else if (zone()->excess_allocation()) {
+ if (FLAG_correctness_fuzzer_suppressions) {
+ FATAL("Aborting on excess zone allocation");
+ }
ReportError(CStrVector("Regular expression too large"));
} else {
current_ = ReadNext<true>();
diff --git a/deps/v8/src/regexp/regexp-stack.h b/deps/v8/src/regexp/regexp-stack.h
index 7ecaa40b81..d3c5415f1f 100644
--- a/deps/v8/src/regexp/regexp-stack.h
+++ b/deps/v8/src/regexp/regexp-stack.h
@@ -73,6 +73,9 @@ class RegExpStack {
char* RestoreStack(char* from);
void FreeThreadResources() { thread_local_.Free(); }
+ // Maximal size of allocated stack area.
+ static constexpr size_t kMaximumStackSize = 64 * MB;
+
private:
RegExpStack();
~RegExpStack();
@@ -84,9 +87,6 @@ class RegExpStack {
// Minimal size of allocated stack area.
static const size_t kMinimumStackSize = 1 * KB;
- // Maximal size of allocated stack area.
- static const size_t kMaximumStackSize = 64 * MB;
-
// Structure holding the allocated memory, size and limit.
struct ThreadLocal {
ThreadLocal() { Clear(); }
diff --git a/deps/v8/src/regexp/regexp-utils.cc b/deps/v8/src/regexp/regexp-utils.cc
index c9194d5170..73c2015dd9 100644
--- a/deps/v8/src/regexp/regexp-utils.cc
+++ b/deps/v8/src/regexp/regexp-utils.cc
@@ -171,12 +171,11 @@ bool RegExpUtils::IsUnmodifiedRegExp(Isolate* isolate, Handle<Object> obj) {
// Check that the "exec" method is unmodified.
// Check that the index refers to "exec" method (this has to be consistent
// with the init order in the bootstrapper).
+ InternalIndex kExecIndex(JSRegExp::kExecFunctionDescriptorIndex);
DCHECK_EQ(*(isolate->factory()->exec_string()),
- proto_map.instance_descriptors().GetKey(
- JSRegExp::kExecFunctionDescriptorIndex));
- if (proto_map.instance_descriptors()
- .GetDetails(JSRegExp::kExecFunctionDescriptorIndex)
- .constness() != PropertyConstness::kConst) {
+ proto_map.instance_descriptors().GetKey(kExecIndex));
+ if (proto_map.instance_descriptors().GetDetails(kExecIndex).constness() !=
+ PropertyConstness::kConst) {
return false;
}
diff --git a/deps/v8/src/regexp/regexp.cc b/deps/v8/src/regexp/regexp.cc
index e0bc4b8e32..a4ab48ed0e 100644
--- a/deps/v8/src/regexp/regexp.cc
+++ b/deps/v8/src/regexp/regexp.cc
@@ -9,6 +9,7 @@
#include "src/heap/heap-inl.h"
#include "src/objects/js-regexp-inl.h"
#include "src/regexp/regexp-bytecode-generator.h"
+#include "src/regexp/regexp-bytecodes.h"
#include "src/regexp/regexp-compiler.h"
#include "src/regexp/regexp-dotprinter.h"
#include "src/regexp/regexp-interpreter.h"
@@ -574,7 +575,7 @@ int RegExpImpl::IrregexpExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
// match.
// We need to reset the tier up to start over with compilation.
if (FLAG_regexp_tier_up) {
- regexp->ResetTierUp();
+ regexp->ResetLastTierUpTick();
}
is_one_byte = String::IsOneByteRepresentationUnderneath(*subject);
EnsureCompiledIrregexp(isolate, regexp, subject, is_one_byte);
@@ -600,6 +601,20 @@ MaybeHandle<Object> RegExpImpl::IrregexpExec(
}
#endif
+ // For very long subject strings, the regexp interpreter is currently much
+ // slower than the jitted code execution. If the tier-up strategy is turned
+ // on, we want to avoid this performance penalty so we eagerly tier-up if the
+ // subject string length is equal or greater than the given heuristic value.
+ if (FLAG_regexp_tier_up &&
+ subject->length() >= JSRegExp::kTierUpForSubjectLengthValue) {
+ regexp->MarkTierUpForNextExec();
+ if (FLAG_trace_regexp_tier_up) {
+ PrintF(
+ "Forcing tier-up for very long strings in "
+ "RegExpImpl::IrregexpExec\n");
+ }
+ }
+
// Prepare space for the return values.
int required_registers = RegExp::IrregexpPrepare(isolate, regexp, subject);
if (required_registers < 0) {
@@ -860,14 +875,15 @@ bool RegExpImpl::Compile(Isolate* isolate, Zone* zone, RegExpCompileData* data,
OFStream os(trace_scope.file());
Handle<Code> c(Code::cast(result.code), isolate);
auto pattern_cstring = pattern->ToCString();
- c->Disassemble(pattern_cstring.get(), os);
+ c->Disassemble(pattern_cstring.get(), os, isolate);
}
#endif
if (FLAG_print_regexp_bytecode &&
data->compilation_target == RegExpCompilationTarget::kBytecode) {
Handle<ByteArray> bytecode(ByteArray::cast(result.code), isolate);
auto pattern_cstring = pattern->ToCString();
- IrregexpInterpreter::Disassemble(*bytecode, pattern_cstring.get());
+ RegExpBytecodeDisassemble(bytecode->GetDataStartAddress(),
+ bytecode->length(), pattern_cstring.get());
}
}
diff --git a/deps/v8/src/regexp/regexp.h b/deps/v8/src/regexp/regexp.h
index 8ccc9789a3..6625b063bc 100644
--- a/deps/v8/src/regexp/regexp.h
+++ b/deps/v8/src/regexp/regexp.h
@@ -55,10 +55,7 @@ struct RegExpCompileData {
class RegExp final : public AllStatic {
public:
- // Whether the irregexp engine generates native code or interpreter bytecode.
- static bool CanGenerateNativeCode() {
- return !FLAG_regexp_interpret_all || FLAG_regexp_tier_up;
- }
+ // Whether the irregexp engine generates interpreter bytecode.
static bool CanGenerateBytecode() {
return FLAG_regexp_interpret_all || FLAG_regexp_tier_up;
}
diff --git a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
index d4144e7e64..853d8b2815 100644
--- a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
+++ b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
@@ -38,7 +38,10 @@ namespace internal {
* The remaining registers are free for computations.
* Each call to a public method should retain this convention.
*
- * The stack will have the following structure:
+ * The stack will have the following structure
+ * - fp[112] Address regexp (address of the JSRegExp object; unused in
+ * native code, passed to match signature of
+ * the interpreter)
* - fp[108] Isolate* isolate (address of the current isolate)
* - fp[104] direct_call (if 1, direct call from JavaScript code,
* if 0, call through the runtime system).
@@ -85,7 +88,8 @@ namespace internal {
* int num_capture_registers,
* byte* stack_area_base,
* bool direct_call = false,
- * Isolate* isolate);
+ * Isolate* isolate,
+ * Address regexp);
* The call is performed by NativeRegExpMacroAssembler::Execute()
* (in regexp-macro-assembler.cc) via the GeneratedCode wrapper.
*/
@@ -204,7 +208,7 @@ void RegExpMacroAssemblerS390::CheckGreedyLoop(Label* on_equal) {
Label backtrack_non_equal;
__ CmpP(current_input_offset(), MemOperand(backtrack_stackpointer(), 0));
__ bne(&backtrack_non_equal);
- __ AddP(backtrack_stackpointer(), Operand(kPointerSize));
+ __ AddP(backtrack_stackpointer(), Operand(kSystemPointerSize));
BranchOrBacktrack(al, on_equal);
__ bind(&backtrack_non_equal);
@@ -635,11 +639,11 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
// Requires us to save the callee-preserved registers r6-r13
// General convention is to also save r14 (return addr) and
// sp/r15 as well in a single STM/STMG
- __ StoreMultipleP(r6, sp, MemOperand(sp, 6 * kPointerSize));
+ __ StoreMultipleP(r6, sp, MemOperand(sp, 6 * kSystemPointerSize));
// Load stack parameters from caller stack frame
- __ LoadMultipleP(r7, r9,
- MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
+ __ LoadMultipleP(
+ r7, r9, MemOperand(sp, kStackFrameExtraParamSlot * kSystemPointerSize));
// r7 = capture array size
// r8 = stack area base
// r9 = direct call
@@ -654,7 +658,7 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
// Set frame pointer in space for it if this is not a direct call
// from generated code.
__ LoadRR(frame_pointer(), sp);
- __ lay(sp, MemOperand(sp, -10 * kPointerSize));
+ __ lay(sp, MemOperand(sp, -10 * kSystemPointerSize));
__ mov(r1, Operand::Zero()); // success counter
__ LoadRR(r0, r1); // offset of location
__ StoreMultipleP(r0, r9, MemOperand(sp, 0));
@@ -672,7 +676,7 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
__ ble(&stack_limit_hit);
// Check if there is room for the variable number of registers above
// the stack limit.
- __ CmpLogicalP(r2, Operand(num_registers_ * kPointerSize));
+ __ CmpLogicalP(r2, Operand(num_registers_ * kSystemPointerSize));
__ bge(&stack_ok);
// Exit with OutOfMemory exception. There is not enough space on the stack
// for our working registers.
@@ -688,7 +692,7 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
__ bind(&stack_ok);
// Allocate space on stack for registers.
- __ lay(sp, MemOperand(sp, (-num_registers_ * kPointerSize)));
+ __ lay(sp, MemOperand(sp, (-num_registers_ * kSystemPointerSize)));
// Load string end.
__ LoadP(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
// Load input start.
@@ -731,12 +735,13 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
// Fill saved registers with initial value = start offset - 1
if (num_saved_registers_ > 8) {
// One slot beyond address of register 0.
- __ lay(r3, MemOperand(frame_pointer(), kRegisterZero + kPointerSize));
+ __ lay(r3,
+ MemOperand(frame_pointer(), kRegisterZero + kSystemPointerSize));
__ Load(r4, Operand(num_saved_registers_));
Label init_loop;
__ bind(&init_loop);
- __ StoreP(r1, MemOperand(r3, -kPointerSize));
- __ lay(r3, MemOperand(r3, -kPointerSize));
+ __ StoreP(r1, MemOperand(r3, -kSystemPointerSize));
+ __ lay(r3, MemOperand(r3, -kSystemPointerSize));
__ BranchOnCount(r4, &init_loop);
} else {
for (int i = 0; i < num_saved_registers_; i++) {
@@ -871,7 +876,7 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
// Skip sp past regexp registers and local variables..
__ LoadRR(sp, frame_pointer());
// Restore registers r6..r15.
- __ LoadMultipleP(r6, sp, MemOperand(sp, 6 * kPointerSize));
+ __ LoadMultipleP(r6, sp, MemOperand(sp, 6 * kSystemPointerSize));
__ b(r14);
@@ -1087,17 +1092,19 @@ void RegExpMacroAssemblerS390::CallCheckStackGuardState(Register scratch) {
// Code of self.
__ mov(r3, Operand(masm_->CodeObject()));
// r2 becomes return address pointer.
- __ lay(r2, MemOperand(sp, kStackFrameRASlot * kPointerSize));
+ __ lay(r2, MemOperand(sp, kStackFrameRASlot * kSystemPointerSize));
ExternalReference stack_guard_check =
ExternalReference::re_check_stack_guard_state(isolate());
__ mov(ip, Operand(stack_guard_check));
__ StoreReturnAddressAndCall(ip);
- if (base::OS::ActivationFrameAlignment() > kPointerSize) {
- __ LoadP(sp, MemOperand(sp, (kNumRequiredStackFrameSlots * kPointerSize)));
+ if (base::OS::ActivationFrameAlignment() > kSystemPointerSize) {
+ __ LoadP(
+ sp, MemOperand(sp, (kNumRequiredStackFrameSlots * kSystemPointerSize)));
} else {
- __ la(sp, MemOperand(sp, (kNumRequiredStackFrameSlots * kPointerSize)));
+ __ la(sp,
+ MemOperand(sp, (kNumRequiredStackFrameSlots * kSystemPointerSize)));
}
__ mov(code_pointer(), Operand(masm_->CodeObject()));
@@ -1106,7 +1113,7 @@ void RegExpMacroAssemblerS390::CallCheckStackGuardState(Register scratch) {
// Helper function for reading a value out of a stack frame.
template <typename T>
static T& frame_entry(Address re_frame, int frame_offset) {
- DCHECK_EQ(kPointerSize, sizeof(T));
+ DCHECK_EQ(kSystemPointerSize, sizeof(T));
#ifdef V8_TARGET_ARCH_S390X
return reinterpret_cast<T&>(Memory<uint64_t>(re_frame + frame_offset));
#else
@@ -1140,7 +1147,7 @@ MemOperand RegExpMacroAssemblerS390::register_location(int register_index) {
num_registers_ = register_index + 1;
}
return MemOperand(frame_pointer(),
- kRegisterZero - register_index * kPointerSize);
+ kRegisterZero - register_index * kSystemPointerSize);
}
void RegExpMacroAssemblerS390::CheckPosition(int cp_offset,
@@ -1200,7 +1207,7 @@ void RegExpMacroAssemblerS390::SafeCallTarget(Label* name) {
void RegExpMacroAssemblerS390::Push(Register source) {
DCHECK(source != backtrack_stackpointer());
__ lay(backtrack_stackpointer(),
- MemOperand(backtrack_stackpointer(), -kPointerSize));
+ MemOperand(backtrack_stackpointer(), -kSystemPointerSize));
__ StoreP(source, MemOperand(backtrack_stackpointer()));
}
@@ -1208,7 +1215,7 @@ void RegExpMacroAssemblerS390::Pop(Register target) {
DCHECK(target != backtrack_stackpointer());
__ LoadP(target, MemOperand(backtrack_stackpointer()));
__ la(backtrack_stackpointer(),
- MemOperand(backtrack_stackpointer(), kPointerSize));
+ MemOperand(backtrack_stackpointer(), kSystemPointerSize));
}
void RegExpMacroAssemblerS390::CheckPreemption() {
@@ -1235,13 +1242,15 @@ void RegExpMacroAssemblerS390::CallCFunctionUsingStub(
__ mov(code_pointer(), Operand(function));
Label ret;
__ larl(r14, &ret);
- __ StoreP(r14, MemOperand(sp, kStackFrameRASlot * kPointerSize));
+ __ StoreP(r14, MemOperand(sp, kStackFrameRASlot * kSystemPointerSize));
__ b(code_pointer());
__ bind(&ret);
- if (base::OS::ActivationFrameAlignment() > kPointerSize) {
- __ LoadP(sp, MemOperand(sp, (kNumRequiredStackFrameSlots * kPointerSize)));
+ if (base::OS::ActivationFrameAlignment() > kSystemPointerSize) {
+ __ LoadP(
+ sp, MemOperand(sp, (kNumRequiredStackFrameSlots * kSystemPointerSize)));
} else {
- __ la(sp, MemOperand(sp, (kNumRequiredStackFrameSlots * kPointerSize)));
+ __ la(sp,
+ MemOperand(sp, (kNumRequiredStackFrameSlots * kSystemPointerSize)));
}
__ mov(code_pointer(), Operand(masm_->CodeObject()));
}
diff --git a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
index 3a6a915263..4811ac7382 100644
--- a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
+++ b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
@@ -95,26 +95,27 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerS390
kStoredRegisters + kCalleeRegisterSaveAreaSize;
// Stack parameters placed by caller.
static const int kCaptureArraySize = kCallerFrame;
- static const int kStackAreaBase = kCallerFrame + kPointerSize;
+ static const int kStackAreaBase = kCallerFrame + kSystemPointerSize;
// kDirectCall again
- static const int kIsolate = kStackAreaBase + 2 * kPointerSize;
+ static const int kIsolate = kStackAreaBase + 2 * kSystemPointerSize;
// Below the frame pointer.
// Register parameters stored by setup code.
- static const int kDirectCall = kFramePointer - kPointerSize;
- static const int kStackHighEnd = kDirectCall - kPointerSize;
- static const int kNumOutputRegisters = kStackHighEnd - kPointerSize;
- static const int kRegisterOutput = kNumOutputRegisters - kPointerSize;
- static const int kInputEnd = kRegisterOutput - kPointerSize;
- static const int kInputStart = kInputEnd - kPointerSize;
- static const int kStartIndex = kInputStart - kPointerSize;
- static const int kInputString = kStartIndex - kPointerSize;
+ static const int kDirectCall = kFramePointer - kSystemPointerSize;
+ static const int kStackHighEnd = kDirectCall - kSystemPointerSize;
+ static const int kNumOutputRegisters = kStackHighEnd - kSystemPointerSize;
+ static const int kRegisterOutput = kNumOutputRegisters - kSystemPointerSize;
+ static const int kInputEnd = kRegisterOutput - kSystemPointerSize;
+ static const int kInputStart = kInputEnd - kSystemPointerSize;
+ static const int kStartIndex = kInputStart - kSystemPointerSize;
+ static const int kInputString = kStartIndex - kSystemPointerSize;
// When adding local variables remember to push space for them in
// the frame in GetCode.
- static const int kSuccessfulCaptures = kInputString - kPointerSize;
- static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize;
+ static const int kSuccessfulCaptures = kInputString - kSystemPointerSize;
+ static const int kStringStartMinusOne =
+ kSuccessfulCaptures - kSystemPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kStringStartMinusOne - kPointerSize;
+ static const int kRegisterZero = kStringStartMinusOne - kSystemPointerSize;
// Initial size of code buffer.
static const int kRegExpCodeSize = 1024;
diff --git a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
index 42ba13c4ee..4352c3f67f 100644
--- a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
@@ -296,7 +296,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
} else {
DCHECK(mode_ == UC16);
// Save important/volatile registers before calling C function.
-#ifndef _WIN64
+#ifndef V8_TARGET_OS_WIN
// Caller save on Linux and callee save in Windows.
__ pushq(rsi);
__ pushq(rdi);
@@ -311,7 +311,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// Address byte_offset2 - Address of current character position.
// size_t byte_length - length of capture in bytes(!)
// Isolate* isolate or 0 if unicode flag.
-#ifdef _WIN64
+#ifdef V8_TARGET_OS_WIN
DCHECK(rcx == arg_reg_1);
DCHECK(rdx == arg_reg_2);
// Compute and set byte_offset1 (start of capture).
@@ -333,7 +333,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
if (read_backward) {
__ subq(rsi, rbx);
}
-#endif // _WIN64
+#endif // V8_TARGET_OS_WIN
// Set byte_length.
__ movq(arg_reg_3, rbx);
@@ -358,7 +358,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// Restore original values before reacting on result value.
__ Move(code_object_pointer(), masm_.CodeObject());
__ popq(backtrack_stackpointer());
-#ifndef _WIN64
+#ifndef V8_TARGET_OS_WIN
__ popq(rdi);
__ popq(rsi);
#endif
@@ -683,7 +683,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ movq(rbp, rsp);
// Save parameters and callee-save registers. Order here should correspond
// to order of kBackup_ebx etc.
-#ifdef _WIN64
+#ifdef V8_TARGET_OS_WIN
// MSVC passes arguments in rcx, rdx, r8, r9, with backing stack slots.
// Store register parameters in pre-allocated stack slots,
__ movq(Operand(rbp, kInputString), rcx);
@@ -890,7 +890,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
}
__ bind(&return_rax);
-#ifdef _WIN64
+#ifdef V8_TARGET_OS_WIN
// Restore callee save registers.
__ leaq(rsp, Operand(rbp, kLastCalleeSaveRegister));
__ popq(rbx);
@@ -943,7 +943,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Reached if the backtrack-stack limit has been hit.
// Save registers before calling C function
-#ifndef _WIN64
+#ifndef V8_TARGET_OS_WIN
// Callee-save in Microsoft 64-bit ABI, but not in AMD64 ABI.
__ pushq(rsi);
__ pushq(rdi);
@@ -952,7 +952,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Call GrowStack(backtrack_stackpointer())
static const int num_arguments = 3;
__ PrepareCallCFunction(num_arguments);
-#ifdef _WIN64
+#ifdef V8_TARGET_OS_WIN
// Microsoft passes parameters in rcx, rdx, r8.
// First argument, backtrack stackpointer, is already in rcx.
__ leaq(rdx, Operand(rbp, kStackHighEnd)); // Second argument
@@ -974,7 +974,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ movq(backtrack_stackpointer(), rax);
// Restore saved registers and continue.
__ Move(code_object_pointer(), masm_.CodeObject());
-#ifndef _WIN64
+#ifndef V8_TARGET_OS_WIN
__ popq(rdi);
__ popq(rsi);
#endif
@@ -1159,7 +1159,7 @@ void RegExpMacroAssemblerX64::CallCheckStackGuardState() {
// store anything volatile in a C call or overwritten by this function.
static const int num_arguments = 3;
__ PrepareCallCFunction(num_arguments);
-#ifdef _WIN64
+#ifdef V8_TARGET_OS_WIN
// Second argument: Code of self. (Do this before overwriting r8).
__ movq(rdx, code_object_pointer());
// Third argument: RegExp code frame pointer.
diff --git a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
index 9d011dcd46..1d95a2718f 100644
--- a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
+++ b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
@@ -92,7 +92,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerX64
static const int kReturn_eip = kFramePointer + kSystemPointerSize;
static const int kFrameAlign = kReturn_eip + kSystemPointerSize;
-#ifdef _WIN64
+#ifdef V8_TARGET_OS_WIN
// Parameters (first four passed as registers, but with room on stack).
// In Microsoft 64-bit Calling Convention, there is room on the callers
// stack (before the return address) to spill parameter registers. We
@@ -131,7 +131,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerX64
static const int kIsolate = kDirectCall + kSystemPointerSize;
#endif
-#ifdef _WIN64
+#ifdef V8_TARGET_OS_WIN
// Microsoft calling convention has three callee-saved registers
// (that we are using). We push these after the frame pointer.
static const int kBackup_rsi = kFramePointer - kSystemPointerSize;
diff --git a/deps/v8/src/roots/roots.h b/deps/v8/src/roots/roots.h
index c82ec6d04f..c262f63928 100644
--- a/deps/v8/src/roots/roots.h
+++ b/deps/v8/src/roots/roots.h
@@ -206,37 +206,37 @@ class Symbol;
// Mutable roots that are known to be immortal immovable, for which we can
// safely skip write barriers.
-#define STRONG_MUTABLE_IMMOVABLE_ROOT_LIST(V) \
- ACCESSOR_INFO_ROOT_LIST(V) \
- /* Maps */ \
- V(Map, external_map, ExternalMap) \
- V(Map, message_object_map, JSMessageObjectMap) \
- /* Canonical empty values */ \
- V(Script, empty_script, EmptyScript) \
- V(FeedbackCell, many_closures_cell, ManyClosuresCell) \
- V(Cell, invalid_prototype_validity_cell, InvalidPrototypeValidityCell) \
- /* Protectors */ \
- V(Cell, array_constructor_protector, ArrayConstructorProtector) \
- V(PropertyCell, no_elements_protector, NoElementsProtector) \
- V(Cell, is_concat_spreadable_protector, IsConcatSpreadableProtector) \
- V(PropertyCell, array_species_protector, ArraySpeciesProtector) \
- V(PropertyCell, typed_array_species_protector, TypedArraySpeciesProtector) \
- V(PropertyCell, promise_species_protector, PromiseSpeciesProtector) \
- V(Cell, string_length_protector, StringLengthProtector) \
- V(PropertyCell, array_iterator_protector, ArrayIteratorProtector) \
- V(PropertyCell, array_buffer_detaching_protector, \
- ArrayBufferDetachingProtector) \
- V(PropertyCell, promise_hook_protector, PromiseHookProtector) \
- V(Cell, promise_resolve_protector, PromiseResolveProtector) \
- V(PropertyCell, map_iterator_protector, MapIteratorProtector) \
- V(PropertyCell, promise_then_protector, PromiseThenProtector) \
- V(PropertyCell, set_iterator_protector, SetIteratorProtector) \
- V(PropertyCell, string_iterator_protector, StringIteratorProtector) \
- /* Caches */ \
- V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \
- V(FixedArray, string_split_cache, StringSplitCache) \
- V(FixedArray, regexp_multiple_cache, RegExpMultipleCache) \
- /* Indirection lists for isolate-independent builtins */ \
+#define STRONG_MUTABLE_IMMOVABLE_ROOT_LIST(V) \
+ ACCESSOR_INFO_ROOT_LIST(V) \
+ /* Maps */ \
+ V(Map, external_map, ExternalMap) \
+ V(Map, message_object_map, JSMessageObjectMap) \
+ /* Canonical empty values */ \
+ V(Script, empty_script, EmptyScript) \
+ V(FeedbackCell, many_closures_cell, ManyClosuresCell) \
+ V(Cell, invalid_prototype_validity_cell, InvalidPrototypeValidityCell) \
+ /* Protectors */ \
+ V(PropertyCell, array_constructor_protector, ArrayConstructorProtector) \
+ V(PropertyCell, no_elements_protector, NoElementsProtector) \
+ V(PropertyCell, is_concat_spreadable_protector, IsConcatSpreadableProtector) \
+ V(PropertyCell, array_species_protector, ArraySpeciesProtector) \
+ V(PropertyCell, typed_array_species_protector, TypedArraySpeciesProtector) \
+ V(PropertyCell, promise_species_protector, PromiseSpeciesProtector) \
+ V(PropertyCell, string_length_protector, StringLengthProtector) \
+ V(PropertyCell, array_iterator_protector, ArrayIteratorProtector) \
+ V(PropertyCell, array_buffer_detaching_protector, \
+ ArrayBufferDetachingProtector) \
+ V(PropertyCell, promise_hook_protector, PromiseHookProtector) \
+ V(PropertyCell, promise_resolve_protector, PromiseResolveProtector) \
+ V(PropertyCell, map_iterator_protector, MapIteratorProtector) \
+ V(PropertyCell, promise_then_protector, PromiseThenProtector) \
+ V(PropertyCell, set_iterator_protector, SetIteratorProtector) \
+ V(PropertyCell, string_iterator_protector, StringIteratorProtector) \
+ /* Caches */ \
+ V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \
+ V(FixedArray, string_split_cache, StringSplitCache) \
+ V(FixedArray, regexp_multiple_cache, RegExpMultipleCache) \
+ /* Indirection lists for isolate-independent builtins */ \
V(FixedArray, builtins_constants_table, BuiltinsConstantsTable)
// These root references can be updated by the mutator.
@@ -255,8 +255,6 @@ class Symbol;
/* Feedback vectors that we need for code coverage or type profile */ \
V(Object, feedback_vectors_for_profiling_tools, \
FeedbackVectorsForProfilingTools) \
- V(WeakArrayList, noscript_shared_function_infos, \
- NoScriptSharedFunctionInfos) \
V(FixedArray, serialized_objects, SerializedObjects) \
V(FixedArray, serialized_global_proxy_sizes, SerializedGlobalProxySizes) \
V(TemplateList, message_listeners, MessageListeners) \
@@ -268,7 +266,9 @@ class Symbol;
V(HeapObject, weak_refs_keep_during_job, WeakRefsKeepDuringJob) \
V(HeapObject, interpreter_entry_trampoline_for_profiling, \
InterpreterEntryTrampolineForProfiling) \
- V(Object, pending_optimize_for_test_bytecode, PendingOptimizeForTestBytecode)
+ V(Object, pending_optimize_for_test_bytecode, \
+ PendingOptimizeForTestBytecode) \
+ V(WeakArrayList, shared_wasm_memories, SharedWasmMemories)
// Entries in this list are limited to Smis and are not visited during GC.
#define SMI_ROOT_LIST(V) \
diff --git a/deps/v8/src/runtime/runtime-array.cc b/deps/v8/src/runtime/runtime-array.cc
index 6190b16cff..34a8b2b937 100644
--- a/deps/v8/src/runtime/runtime-array.cc
+++ b/deps/v8/src/runtime/runtime-array.cc
@@ -5,6 +5,7 @@
#include "src/debug/debug.h"
#include "src/execution/arguments-inl.h"
#include "src/execution/isolate-inl.h"
+#include "src/execution/protectors-inl.h"
#include "src/heap/factory.h"
#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
#include "src/heap/heap-write-barrier-inl.h"
@@ -136,8 +137,8 @@ RUNTIME_FUNCTION(Runtime_NewArray) {
// just flip the bit on the global protector cell instead.
// TODO(bmeurer): Find a better way to mark this. Global protectors
// tend to back-fire over time...
- if (isolate->IsArrayConstructorIntact()) {
- isolate->InvalidateArrayConstructorProtector();
+ if (Protectors::IsArrayConstructorIntact(isolate)) {
+ Protectors::InvalidateArrayConstructor(isolate);
}
}
}
diff --git a/deps/v8/src/runtime/runtime-classes.cc b/deps/v8/src/runtime/runtime-classes.cc
index 522e93da3f..a4e9680a1f 100644
--- a/deps/v8/src/runtime/runtime-classes.cc
+++ b/deps/v8/src/runtime/runtime-classes.cc
@@ -130,7 +130,8 @@ Handle<Name> KeyToName<NumberDictionary>(Isolate* isolate, Handle<Object> key) {
inline void SetHomeObject(Isolate* isolate, JSFunction method,
JSObject home_object) {
if (method.shared().needs_home_object()) {
- const int kPropertyIndex = JSFunction::kMaybeHomeObjectDescriptorIndex;
+ const InternalIndex kPropertyIndex(
+ JSFunction::kMaybeHomeObjectDescriptorIndex);
CHECK_EQ(method.map().instance_descriptors().GetKey(kPropertyIndex),
ReadOnlyRoots(isolate).home_object_symbol());
@@ -303,7 +304,7 @@ bool AddDescriptorsByTemplate(
// Count the number of properties that must be in the instance and
// create the property array to hold the constants.
int count = 0;
- for (int i = 0; i < nof_descriptors; i++) {
+ for (InternalIndex i : InternalIndex::Range(nof_descriptors)) {
PropertyDetails details = descriptors_template->GetDetails(i);
if (details.location() == kDescriptor && details.kind() == kData) {
count++;
@@ -315,7 +316,7 @@ bool AddDescriptorsByTemplate(
// Read values from |descriptors_template| and store possibly post-processed
// values into "instantiated" |descriptors| array.
int field_index = 0;
- for (int i = 0; i < nof_descriptors; i++) {
+ for (InternalIndex i : InternalIndex::Range(nof_descriptors)) {
Object value = descriptors_template->GetStrongValue(i);
if (value.IsAccessorPair()) {
Handle<AccessorPair> pair = AccessorPair::Copy(
diff --git a/deps/v8/src/runtime/runtime-compiler.cc b/deps/v8/src/runtime/runtime-compiler.cc
index 4364c55775..c7f3201eac 100644
--- a/deps/v8/src/runtime/runtime-compiler.cc
+++ b/deps/v8/src/runtime/runtime-compiler.cc
@@ -157,6 +157,9 @@ RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) {
TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
TRACE_EVENT0("v8", "V8.DeoptimizeCode");
Handle<JSFunction> function = deoptimizer->function();
+ // For OSR the optimized code isn't installed on the function, so get the
+ // code object from deoptimizer.
+ Handle<Code> optimized_code = deoptimizer->compiled_code();
DeoptimizeKind type = deoptimizer->deopt_kind();
// TODO(turbofan): We currently need the native context to materialize
@@ -174,7 +177,7 @@ RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) {
// Invalidate the underlying optimized code on non-lazy deopts.
if (type != DeoptimizeKind::kLazy) {
- Deoptimizer::DeoptimizeFunction(*function);
+ Deoptimizer::DeoptimizeFunction(*function, *optimized_code);
}
return ReadOnlyRoots(isolate).undefined_value();
@@ -224,8 +227,7 @@ BailoutId DetermineEntryAndDisarmOSRForInterpreter(JavaScriptFrame* frame) {
RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ DCHECK_EQ(0, args.length());
// Only reachable when OST is enabled.
CHECK(FLAG_use_osr);
@@ -233,7 +235,6 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
// Determine frame triggering OSR request.
JavaScriptFrameIterator it(isolate);
JavaScriptFrame* frame = it.frame();
- DCHECK_EQ(frame->function(), *function);
DCHECK(frame->is_interpreted());
// Determine the entry point for which this OSR request has been fired and
@@ -242,6 +243,7 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
DCHECK(!ast_id.IsNone());
MaybeHandle<Code> maybe_result;
+ Handle<JSFunction> function(frame->function(), isolate);
if (IsSuitableForOnStackReplacement(isolate, function)) {
if (FLAG_trace_osr) {
PrintF("[OSR - Compiling: ");
diff --git a/deps/v8/src/runtime/runtime-debug.cc b/deps/v8/src/runtime/runtime-debug.cc
index 0fbea6a193..09dd4f8132 100644
--- a/deps/v8/src/runtime/runtime-debug.cc
+++ b/deps/v8/src/runtime/runtime-debug.cc
@@ -491,8 +491,7 @@ int ScriptLinePosition(Handle<Script> script, int line) {
if (line < 0) return -1;
if (script->type() == Script::TYPE_WASM) {
- return WasmModuleObject::cast(script->wasm_module_object())
- .GetFunctionOffset(line);
+ return GetWasmFunctionOffset(script->wasm_native_module()->module(), line);
}
Script::InitLineEnds(script);
@@ -827,19 +826,6 @@ RUNTIME_FUNCTION(Runtime_LiveEditPatchScript) {
return ReadOnlyRoots(isolate).undefined_value();
}
-RUNTIME_FUNCTION(Runtime_PerformSideEffectCheckForObject) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
-
- DCHECK_EQ(isolate->debug_execution_mode(), DebugInfo::kSideEffects);
- if (!isolate->debug()->PerformSideEffectCheckForObject(object)) {
- DCHECK(isolate->has_pending_exception());
- return ReadOnlyRoots(isolate).exception();
- }
- return ReadOnlyRoots(isolate).undefined_value();
-}
-
RUNTIME_FUNCTION(Runtime_ProfileCreateSnapshotDataBlob) {
HandleScope scope(isolate);
DCHECK_EQ(0, args.length());
diff --git a/deps/v8/src/runtime/runtime-forin.cc b/deps/v8/src/runtime/runtime-forin.cc
index 6042a867c9..0d7e1dc30b 100644
--- a/deps/v8/src/runtime/runtime-forin.cc
+++ b/deps/v8/src/runtime/runtime-forin.cc
@@ -33,7 +33,10 @@ MaybeHandle<HeapObject> Enumerate(Isolate* isolate,
if (!accumulator.is_receiver_simple_enum()) {
Handle<FixedArray> keys;
ASSIGN_RETURN_ON_EXCEPTION(
- isolate, keys, accumulator.GetKeys(GetKeysConversion::kConvertToString),
+ isolate, keys,
+ accumulator.GetKeys(accumulator.may_have_elements()
+ ? GetKeysConversion::kConvertToString
+ : GetKeysConversion::kNoNumbers),
HeapObject);
// Test again, since cache may have been built by GetKeys() calls above.
if (!accumulator.is_receiver_simple_enum()) return keys;
diff --git a/deps/v8/src/runtime/runtime-internal.cc b/deps/v8/src/runtime/runtime-internal.cc
index 80f9baa48d..03c9e582d8 100644
--- a/deps/v8/src/runtime/runtime-internal.cc
+++ b/deps/v8/src/runtime/runtime-internal.cc
@@ -14,6 +14,7 @@
#include "src/execution/frames-inl.h"
#include "src/execution/isolate-inl.h"
#include "src/execution/messages.h"
+#include "src/execution/runtime-profiler.h"
#include "src/handles/maybe-handles.h"
#include "src/init/bootstrapper.h"
#include "src/logging/counters.h"
@@ -296,10 +297,11 @@ RUNTIME_FUNCTION(Runtime_BytecodeBudgetInterrupt) {
function->feedback_vector().set_invocation_count(1);
return ReadOnlyRoots(isolate).undefined_value();
}
- // Handle interrupts.
{
SealHandleScope shs(isolate);
- return isolate->stack_guard()->HandleInterrupts();
+ isolate->counters()->runtime_profiler_ticks()->Increment();
+ isolate->runtime_profiler()->MarkCandidatesForOptimization();
+ return ReadOnlyRoots(isolate).undefined_value();
}
}
diff --git a/deps/v8/src/runtime/runtime-literals.cc b/deps/v8/src/runtime/runtime-literals.cc
index 0ffc6e932e..497a27dbb9 100644
--- a/deps/v8/src/runtime/runtime-literals.cc
+++ b/deps/v8/src/runtime/runtime-literals.cc
@@ -111,8 +111,7 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
if (copy->HasFastProperties(isolate)) {
Handle<DescriptorArray> descriptors(
copy->map(isolate).instance_descriptors(isolate), isolate);
- int limit = copy->map(isolate).NumberOfOwnDescriptors();
- for (int i = 0; i < limit; i++) {
+ for (InternalIndex i : copy->map(isolate).IterateOwnDescriptors()) {
PropertyDetails details = descriptors->GetDetails(i);
DCHECK_EQ(kField, details.location());
DCHECK_EQ(kData, details.kind());
@@ -595,10 +594,11 @@ RUNTIME_FUNCTION(Runtime_CreateObjectLiteral) {
CONVERT_SMI_ARG_CHECKED(literals_index, 1);
CONVERT_ARG_HANDLE_CHECKED(ObjectBoilerplateDescription, description, 2);
CONVERT_SMI_ARG_CHECKED(flags, 3);
- Handle<FeedbackVector> vector = Handle<FeedbackVector>();
- if (!maybe_vector->IsUndefined()) {
- DCHECK(maybe_vector->IsFeedbackVector());
+ Handle<FeedbackVector> vector;
+ if (maybe_vector->IsFeedbackVector()) {
vector = Handle<FeedbackVector>::cast(maybe_vector);
+ } else {
+ DCHECK(maybe_vector->IsUndefined());
}
RETURN_RESULT_OR_FAILURE(
isolate, CreateLiteral<ObjectLiteralHelper>(
@@ -632,10 +632,11 @@ RUNTIME_FUNCTION(Runtime_CreateArrayLiteral) {
CONVERT_SMI_ARG_CHECKED(literals_index, 1);
CONVERT_ARG_HANDLE_CHECKED(ArrayBoilerplateDescription, elements, 2);
CONVERT_SMI_ARG_CHECKED(flags, 3);
- Handle<FeedbackVector> vector = Handle<FeedbackVector>();
- if (!maybe_vector->IsUndefined()) {
- DCHECK(maybe_vector->IsFeedbackVector());
+ Handle<FeedbackVector> vector;
+ if (maybe_vector->IsFeedbackVector()) {
vector = Handle<FeedbackVector>::cast(maybe_vector);
+ } else {
+ DCHECK(maybe_vector->IsUndefined());
}
RETURN_RESULT_OR_FAILURE(
isolate, CreateLiteral<ArrayLiteralHelper>(
@@ -649,11 +650,12 @@ RUNTIME_FUNCTION(Runtime_CreateRegExpLiteral) {
CONVERT_SMI_ARG_CHECKED(index, 1);
CONVERT_ARG_HANDLE_CHECKED(String, pattern, 2);
CONVERT_SMI_ARG_CHECKED(flags, 3);
- FeedbackSlot literal_slot(FeedbackVector::ToSlot(index));
- Handle<FeedbackVector> vector = Handle<FeedbackVector>();
- if (!maybe_vector->IsUndefined()) {
- DCHECK(maybe_vector->IsFeedbackVector());
+
+ Handle<FeedbackVector> vector;
+ if (maybe_vector->IsFeedbackVector()) {
vector = Handle<FeedbackVector>::cast(maybe_vector);
+ } else {
+ DCHECK(maybe_vector->IsUndefined());
}
if (vector.is_null()) {
Handle<JSRegExp> new_regexp;
@@ -663,20 +665,21 @@ RUNTIME_FUNCTION(Runtime_CreateRegExpLiteral) {
return *new_regexp;
}
- // Check if boilerplate exists. If not, create it first.
- Handle<JSRegExp> boilerplate;
+ // This function assumes that the boilerplate does not yet exist.
+ FeedbackSlot literal_slot(FeedbackVector::ToSlot(index));
Handle<Object> literal_site(vector->Get(literal_slot)->cast<Object>(),
isolate);
- if (!HasBoilerplate(literal_site)) {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, boilerplate,
- JSRegExp::New(isolate, pattern, JSRegExp::Flags(flags)));
- if (IsUninitializedLiteralSite(*literal_site)) {
- PreInitializeLiteralSite(vector, literal_slot);
- return *boilerplate;
- }
- vector->Set(literal_slot, *boilerplate);
+ CHECK(!HasBoilerplate(literal_site));
+
+ Handle<JSRegExp> boilerplate;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, boilerplate,
+ JSRegExp::New(isolate, pattern, JSRegExp::Flags(flags)));
+ if (IsUninitializedLiteralSite(*literal_site)) {
+ PreInitializeLiteralSite(vector, literal_slot);
+ return *boilerplate;
}
+ vector->Set(literal_slot, *boilerplate);
return *JSRegExp::Copy(boilerplate);
}
diff --git a/deps/v8/src/runtime/runtime-object.cc b/deps/v8/src/runtime/runtime-object.cc
index 310cdaab42..e07d91dd31 100644
--- a/deps/v8/src/runtime/runtime-object.cc
+++ b/deps/v8/src/runtime/runtime-object.cc
@@ -91,7 +91,7 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver,
// (2) The property to be deleted must be the last property.
int nof = receiver_map->NumberOfOwnDescriptors();
if (nof == 0) return false;
- int descriptor = nof - 1;
+ InternalIndex descriptor(nof - 1);
Handle<DescriptorArray> descriptors(receiver_map->instance_descriptors(),
isolate);
if (descriptors->GetKey(descriptor) != *key) return false;
@@ -132,8 +132,12 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver,
// for properties stored in the descriptor array.
if (details.location() == kField) {
DisallowHeapAllocation no_allocation;
- isolate->heap()->NotifyObjectLayoutChange(
- *receiver, receiver_map->instance_size(), no_allocation);
+
+ // Invalidate slots manually later in case we delete an in-object tagged
+ // property. In this case we might later store an untagged value in the
+ // recorded slot.
+ isolate->heap()->NotifyObjectLayoutChange(*receiver, no_allocation,
+ InvalidateRecordedSlots::kNo);
FieldIndex index =
FieldIndex::ForPropertyIndex(*receiver_map, details.field_index());
// Special case deleting the last out-of object property.
@@ -149,8 +153,13 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver,
// Slot clearing is the reason why this entire function cannot currently
// be implemented in the DeleteProperty stub.
if (index.is_inobject() && !receiver_map->IsUnboxedDoubleField(index)) {
+ // We need to clear the recorded slot in this case because in-object
+ // slack tracking might not be finished. This ensures that we don't
+ // have recorded slots in free space.
isolate->heap()->ClearRecordedSlot(*receiver,
receiver->RawField(index.offset()));
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(*receiver);
+ chunk->InvalidateRecordedSlots(*receiver);
}
}
}
diff --git a/deps/v8/src/runtime/runtime-regexp.cc b/deps/v8/src/runtime/runtime-regexp.cc
index e197e16e11..980339ee5e 100644
--- a/deps/v8/src/runtime/runtime-regexp.cc
+++ b/deps/v8/src/runtime/runtime-regexp.cc
@@ -613,20 +613,6 @@ V8_WARN_UNUSED_RESULT static Object StringReplaceGlobalRegExpWithString(
JSRegExp::Type typeTag = regexp->TypeTag();
if (typeTag == JSRegExp::IRREGEXP) {
- // Force tier up to native code for global replaces. The global replace is
- // implemented differently for native code and bytecode execution, where the
- // native code expects an array to store all the matches, and the bytecode
- // matches one at a time, so it's easier to tier-up to native code from the
- // start.
- if (FLAG_regexp_tier_up) {
- regexp->MarkTierUpForNextExec();
- if (FLAG_trace_regexp_tier_up) {
- PrintF(
- "Forcing tier-up of JSRegExp object %p in "
- "StringReplaceGlobalRegExpWithString\n",
- reinterpret_cast<void*>(regexp->ptr()));
- }
- }
// Ensure the RegExp is compiled so we can access the capture-name map.
if (RegExp::IrregexpPrepare(isolate, regexp, subject) == -1) {
DCHECK(isolate->has_pending_exception());
@@ -1349,6 +1335,19 @@ V8_WARN_UNUSED_RESULT MaybeHandle<String> RegExpReplace(
RETURN_ON_EXCEPTION(isolate, RegExpUtils::SetLastIndex(isolate, regexp, 0),
String);
+ // Force tier up to native code for global replaces. The global replace is
+ // implemented differently for native code and bytecode execution, where the
+ // native code expects an array to store all the matches, and the bytecode
+ // matches one at a time, so it's easier to tier-up to native code from the
+ // start.
+ if (FLAG_regexp_tier_up && regexp->TypeTag() == JSRegExp::IRREGEXP) {
+ regexp->MarkTierUpForNextExec();
+ if (FLAG_trace_regexp_tier_up) {
+ PrintF("Forcing tier-up of JSRegExp object %p in RegExpReplace\n",
+ reinterpret_cast<void*>(regexp->ptr()));
+ }
+ }
+
if (replace->length() == 0) {
if (string->IsOneByteRepresentation()) {
Object result =
diff --git a/deps/v8/src/runtime/runtime-test.cc b/deps/v8/src/runtime/runtime-test.cc
index a766dd5db2..a58b28ce52 100644
--- a/deps/v8/src/runtime/runtime-test.cc
+++ b/deps/v8/src/runtime/runtime-test.cc
@@ -32,6 +32,7 @@
#include "src/utils/ostreams.h"
#include "src/wasm/memory-tracing.h"
#include "src/wasm/module-compiler.h"
+#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
@@ -676,6 +677,47 @@ RUNTIME_FUNCTION(Runtime_SetAllocationTimeout) {
return ReadOnlyRoots(isolate).undefined_value();
}
+namespace {
+
+int FixedArrayLenFromSize(int size) {
+ return Min((size - FixedArray::kHeaderSize) / kTaggedSize,
+ FixedArray::kMaxRegularLength);
+}
+
+void FillUpOneNewSpacePage(Isolate* isolate, Heap* heap) {
+ NewSpace* space = heap->new_space();
+ int space_remaining = static_cast<int>(*space->allocation_limit_address() -
+ *space->allocation_top_address());
+ while (space_remaining > 0) {
+ int length = FixedArrayLenFromSize(space_remaining);
+ if (length > 0) {
+ Handle<FixedArray> padding =
+ isolate->factory()->NewFixedArray(length, AllocationType::kYoung);
+ DCHECK(heap->new_space()->Contains(*padding));
+ space_remaining -= padding->Size();
+ } else {
+ // Not enough room to create another fixed array. Create a filler.
+ heap->CreateFillerObjectAt(*heap->new_space()->allocation_top_address(),
+ space_remaining, ClearRecordedSlots::kNo);
+ break;
+ }
+ }
+}
+
+} // namespace
+
+RUNTIME_FUNCTION(Runtime_SimulateNewspaceFull) {
+ HandleScope scope(isolate);
+ Heap* heap = isolate->heap();
+ NewSpace* space = heap->new_space();
+ PauseAllocationObserversScope pause_observers(heap);
+ AlwaysAllocateScope always_allocate(heap);
+ do {
+ FillUpOneNewSpacePage(isolate, heap);
+ } while (space->AddFreshPage());
+
+ return ReadOnlyRoots(isolate).undefined_value();
+}
RUNTIME_FUNCTION(Runtime_DebugPrint) {
SealHandleScope shs(isolate);
@@ -1008,7 +1050,7 @@ RUNTIME_FUNCTION(Runtime_GetWasmRecoveredTrapCount) {
RUNTIME_FUNCTION(Runtime_GetWasmExceptionId) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, exception, 0);
+ CONVERT_ARG_HANDLE_CHECKED(WasmExceptionPackage, exception, 0);
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 1);
Handle<Object> tag =
WasmExceptionPackage::GetExceptionTag(isolate, exception);
@@ -1024,7 +1066,7 @@ RUNTIME_FUNCTION(Runtime_GetWasmExceptionId) {
RUNTIME_FUNCTION(Runtime_GetWasmExceptionValues) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, exception, 0);
+ CONVERT_ARG_HANDLE_CHECKED(WasmExceptionPackage, exception, 0);
Handle<Object> values_obj =
WasmExceptionPackage::GetExceptionValues(isolate, exception);
CHECK(values_obj->IsFixedArray()); // Only called with correct input.
@@ -1107,20 +1149,22 @@ RUNTIME_FUNCTION(Runtime_ArraySpeciesProtector) {
RUNTIME_FUNCTION(Runtime_MapIteratorProtector) {
SealHandleScope shs(isolate);
DCHECK_EQ(0, args.length());
- return isolate->heap()->ToBoolean(isolate->IsMapIteratorLookupChainIntact());
+ return isolate->heap()->ToBoolean(
+ Protectors::IsMapIteratorLookupChainIntact(isolate));
}
RUNTIME_FUNCTION(Runtime_SetIteratorProtector) {
SealHandleScope shs(isolate);
DCHECK_EQ(0, args.length());
- return isolate->heap()->ToBoolean(isolate->IsSetIteratorLookupChainIntact());
+ return isolate->heap()->ToBoolean(
+ Protectors::IsSetIteratorLookupChainIntact(isolate));
}
RUNTIME_FUNCTION(Runtime_StringIteratorProtector) {
SealHandleScope shs(isolate);
DCHECK_EQ(0, args.length());
return isolate->heap()->ToBoolean(
- isolate->IsStringIteratorLookupChainIntact());
+ Protectors::IsStringIteratorLookupChainIntact(isolate));
}
// Take a compiled wasm module and serialize it into an array buffer, which is
@@ -1132,17 +1176,22 @@ RUNTIME_FUNCTION(Runtime_SerializeWasmModule) {
wasm::NativeModule* native_module = module_obj->native_module();
wasm::WasmSerializer wasm_serializer(native_module);
- size_t compiled_size = wasm_serializer.GetSerializedNativeModuleSize();
- void* array_data = isolate->array_buffer_allocator()->Allocate(compiled_size);
- Handle<JSArrayBuffer> array_buffer =
- isolate->factory()->NewJSArrayBuffer(SharedFlag::kNotShared);
- JSArrayBuffer::Setup(array_buffer, isolate, false, array_data, compiled_size);
- if (!array_data ||
- !wasm_serializer.SerializeNativeModule(
- {reinterpret_cast<uint8_t*>(array_data), compiled_size})) {
- return ReadOnlyRoots(isolate).undefined_value();
+ size_t byte_length = wasm_serializer.GetSerializedNativeModuleSize();
+
+ MaybeHandle<JSArrayBuffer> result =
+ isolate->factory()->NewJSArrayBufferAndBackingStore(
+ byte_length, InitializedFlag::kUninitialized);
+
+ Handle<JSArrayBuffer> array_buffer;
+ if (result.ToHandle(&array_buffer) &&
+ wasm_serializer.SerializeNativeModule(
+ {reinterpret_cast<uint8_t*>(array_buffer->backing_store()),
+ byte_length})) {
+ return *array_buffer;
}
- return *array_buffer;
+
+ // Error. Return undefined.
+ return ReadOnlyRoots(isolate).undefined_value();
}
// Take an array buffer and attempt to reconstruct a compiled wasm module.
@@ -1210,7 +1259,8 @@ RUNTIME_FUNCTION(Runtime_WasmGetNumberOfInstances) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmModuleObject, module_obj, 0);
int instance_count = 0;
- WeakArrayList weak_instance_list = module_obj->weak_instance_list();
+ WeakArrayList weak_instance_list =
+ module_obj->script().wasm_weak_instance_list();
for (int i = 0; i < weak_instance_list.length(); ++i) {
if (weak_instance_list.Get(i)->IsWeak()) instance_count++;
}
@@ -1226,6 +1276,22 @@ RUNTIME_FUNCTION(Runtime_WasmNumInterpretedCalls) {
return *isolate->factory()->NewNumberFromSize(static_cast<size_t>(num));
}
+RUNTIME_FUNCTION(Runtime_WasmNumCodeSpaces) {
+ DCHECK_EQ(1, args.length());
+ HandleScope scope(isolate);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, argument, 0);
+ Handle<WasmModuleObject> module;
+ if (argument->IsWasmInstanceObject()) {
+ module = handle(Handle<WasmInstanceObject>::cast(argument)->module_object(),
+ isolate);
+ } else if (argument->IsWasmModuleObject()) {
+ module = Handle<WasmModuleObject>::cast(argument);
+ }
+ size_t num_spaces =
+ module->native_module()->GetNumberOfCodeSpacesForTesting();
+ return *isolate->factory()->NewNumberFromSize(num_spaces);
+}
+
RUNTIME_FUNCTION(Runtime_RedirectToWasmInterpreter) {
DCHECK_EQ(2, args.length());
HandleScope scope(isolate);
diff --git a/deps/v8/src/runtime/runtime-typedarray.cc b/deps/v8/src/runtime/runtime-typedarray.cc
index 7fab051cbf..327c102238 100644
--- a/deps/v8/src/runtime/runtime-typedarray.cc
+++ b/deps/v8/src/runtime/runtime-typedarray.cc
@@ -27,22 +27,7 @@ RUNTIME_FUNCTION(Runtime_ArrayBufferDetach) {
isolate, NewTypeError(MessageTemplate::kNotTypedArray));
}
Handle<JSArrayBuffer> array_buffer = Handle<JSArrayBuffer>::cast(argument);
- if (!array_buffer->is_detachable()) {
- return ReadOnlyRoots(isolate).undefined_value();
- }
- if (array_buffer->backing_store() == nullptr) {
- CHECK_EQ(0, array_buffer->byte_length());
- return ReadOnlyRoots(isolate).undefined_value();
- }
- // Shared array buffers should never be detached.
- CHECK(!array_buffer->is_shared());
- DCHECK(!array_buffer->is_external());
- void* backing_store = array_buffer->backing_store();
- size_t byte_length = array_buffer->byte_length();
- array_buffer->set_is_external(true);
- isolate->heap()->UnregisterArrayBuffer(*array_buffer);
array_buffer->Detach();
- isolate->array_buffer_allocator()->Free(backing_store, byte_length);
return ReadOnlyRoots(isolate).undefined_value();
}
diff --git a/deps/v8/src/runtime/runtime-wasm.cc b/deps/v8/src/runtime/runtime-wasm.cc
index 57e59c07be..b0153b7828 100644
--- a/deps/v8/src/runtime/runtime-wasm.cc
+++ b/deps/v8/src/runtime/runtime-wasm.cc
@@ -150,7 +150,12 @@ RUNTIME_FUNCTION(Runtime_WasmExceptionGetTag) {
CONVERT_ARG_CHECKED(Object, except_obj_raw, 0);
// TODO(mstarzinger): Manually box because parameters are not visited yet.
Handle<Object> except_obj(except_obj_raw, isolate);
- return *WasmExceptionPackage::GetExceptionTag(isolate, except_obj);
+ if (!except_obj->IsWasmExceptionPackage(isolate)) {
+ return ReadOnlyRoots(isolate).undefined_value();
+ }
+ Handle<WasmExceptionPackage> exception =
+ Handle<WasmExceptionPackage>::cast(except_obj);
+ return *WasmExceptionPackage::GetExceptionTag(isolate, exception);
}
RUNTIME_FUNCTION(Runtime_WasmExceptionGetValues) {
@@ -162,7 +167,12 @@ RUNTIME_FUNCTION(Runtime_WasmExceptionGetValues) {
CONVERT_ARG_CHECKED(Object, except_obj_raw, 0);
// TODO(mstarzinger): Manually box because parameters are not visited yet.
Handle<Object> except_obj(except_obj_raw, isolate);
- return *WasmExceptionPackage::GetExceptionValues(isolate, except_obj);
+ if (!except_obj->IsWasmExceptionPackage(isolate)) {
+ return ReadOnlyRoots(isolate).undefined_value();
+ }
+ Handle<WasmExceptionPackage> exception =
+ Handle<WasmExceptionPackage>::cast(except_obj);
+ return *WasmExceptionPackage::GetExceptionValues(isolate, exception);
}
RUNTIME_FUNCTION(Runtime_WasmRunInterpreter) {
diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h
index d705b05752..8319aabe2c 100644
--- a/deps/v8/src/runtime/runtime.h
+++ b/deps/v8/src/runtime/runtime.h
@@ -99,7 +99,7 @@ namespace internal {
F(WeakCollectionSet, 4, 1)
#define FOR_EACH_INTRINSIC_COMPILER(F, I) \
- F(CompileForOnStackReplacement, 1, 1) \
+ F(CompileForOnStackReplacement, 0, 1) \
F(CompileLazy, 1, 1) \
F(CompileOptimized_Concurrent, 1, 1) \
F(CompileOptimized_NotConcurrent, 1, 1) \
@@ -319,7 +319,6 @@ namespace internal {
F(ObjectValues, 1, 1) \
F(ObjectValuesSkipFastPath, 1, 1) \
F(OptimizeObjectForAddingMultipleProperties, 2, 1) \
- F(PerformSideEffectCheckForObject, 1, 1) \
F(SetDataProperties, 2, 1) \
F(SetKeyedProperty, 3, 1) \
F(SetNamedProperty, 3, 1) \
@@ -515,6 +514,7 @@ namespace internal {
F(SetWasmCompileControls, 2, 1) \
F(SetWasmInstantiateControls, 0, 1) \
F(SetWasmThreadsEnabled, 1, 1) \
+ F(SimulateNewspaceFull, 0, 1) \
F(StringIteratorProtector, 0, 1) \
F(SystemBreak, 0, 1) \
F(TraceEnter, 0, 1) \
@@ -523,6 +523,7 @@ namespace internal {
F(UnblockConcurrentRecompilation, 0, 1) \
F(WasmGetNumberOfInstances, 1, 1) \
F(WasmNumInterpretedCalls, 1, 1) \
+ F(WasmNumCodeSpaces, 1, 1) \
F(WasmTierUpFunction, 2, 1) \
F(WasmTraceMemory, 1, 1) \
I(DeoptimizeNow, 0, 1)
diff --git a/deps/v8/src/sanitizer/OWNERS b/deps/v8/src/sanitizer/OWNERS
index 96c9d10c12..29f827d160 100644
--- a/deps/v8/src/sanitizer/OWNERS
+++ b/deps/v8/src/sanitizer/OWNERS
@@ -1,3 +1,3 @@
file:../../INFRA_OWNERS
-clemensh@chromium.org
+clemensb@chromium.org
diff --git a/deps/v8/src/snapshot/deserializer.cc b/deps/v8/src/snapshot/deserializer.cc
index 25e32e2cc0..e477817d20 100644
--- a/deps/v8/src/snapshot/deserializer.cc
+++ b/deps/v8/src/snapshot/deserializer.cc
@@ -291,23 +291,30 @@ HeapObject Deserializer::PostProcessNewObject(HeapObject obj,
data_view.byte_offset());
} else if (obj.IsJSTypedArray()) {
JSTypedArray typed_array = JSTypedArray::cast(obj);
- // Only fixup for the off-heap case.
- if (!typed_array.is_on_heap()) {
- Smi store_index(
- reinterpret_cast<Address>(typed_array.external_pointer()));
- byte* backing_store = off_heap_backing_stores_[store_index.value()] +
- typed_array.byte_offset();
- typed_array.set_external_pointer(backing_store);
+ // Fixup typed array pointers.
+ if (typed_array.is_on_heap()) {
+ typed_array.SetOnHeapDataPtr(HeapObject::cast(typed_array.base_pointer()),
+ typed_array.external_pointer());
+ } else {
+ // Serializer writes backing store ref as a DataPtr() value.
+ size_t store_index = reinterpret_cast<size_t>(typed_array.DataPtr());
+ auto backing_store = backing_stores_[store_index];
+ auto start = backing_store
+ ? reinterpret_cast<byte*>(backing_store->buffer_start())
+ : nullptr;
+ typed_array.SetOffHeapDataPtr(start, typed_array.byte_offset());
}
} else if (obj.IsJSArrayBuffer()) {
JSArrayBuffer buffer = JSArrayBuffer::cast(obj);
// Only fixup for the off-heap case.
if (buffer.backing_store() != nullptr) {
- Smi store_index(reinterpret_cast<Address>(buffer.backing_store()));
- void* backing_store = off_heap_backing_stores_[store_index.value()];
-
- buffer.set_backing_store(backing_store);
- isolate_->heap()->RegisterNewArrayBuffer(buffer);
+ // Serializer writes backing store ref in |backing_store| field.
+ size_t store_index = reinterpret_cast<size_t>(buffer.backing_store());
+ auto backing_store = backing_stores_[store_index];
+ SharedFlag shared = backing_store && backing_store->is_shared()
+ ? SharedFlag::kShared
+ : SharedFlag::kNotShared;
+ buffer.Setup(shared, backing_store);
}
} else if (obj.IsBytecodeArray()) {
// TODO(mythria): Remove these once we store the default values for these
@@ -523,9 +530,10 @@ bool Deserializer::ReadData(TSlot current, TSlot limit,
// Write barrier support costs around 1% in startup time. In fact there
// are no new space objects in current boot snapshots, so it's not needed,
// but that may change.
- bool write_barrier_needed = (current_object_address != kNullAddress &&
- source_space != SnapshotSpace::kNew &&
- source_space != SnapshotSpace::kCode);
+ bool write_barrier_needed =
+ (current_object_address != kNullAddress &&
+ source_space != SnapshotSpace::kNew &&
+ source_space != SnapshotSpace::kCode && !FLAG_disable_write_barriers);
while (current < limit) {
byte data = source_.Get();
switch (data) {
@@ -669,12 +677,12 @@ bool Deserializer::ReadData(TSlot current, TSlot limit,
case kOffHeapBackingStore: {
int byte_length = source_.GetInt();
- byte* backing_store = static_cast<byte*>(
- isolate->array_buffer_allocator()->AllocateUninitialized(
- byte_length));
+ std::unique_ptr<BackingStore> backing_store =
+ BackingStore::Allocate(isolate, byte_length, SharedFlag::kNotShared,
+ InitializedFlag::kUninitialized);
CHECK_NOT_NULL(backing_store);
- source_.CopyRaw(backing_store, byte_length);
- off_heap_backing_stores_.push_back(backing_store);
+ source_.CopyRaw(backing_store->buffer_start(), byte_length);
+ backing_stores_.push_back(std::move(backing_store));
break;
}
@@ -842,6 +850,7 @@ TSlot Deserializer::ReadDataCase(Isolate* isolate, TSlot current,
// Don't update current pointer here as it may be needed for write barrier.
Write(current, heap_object_ref);
if (emit_write_barrier && write_barrier_needed) {
+ DCHECK_IMPLIES(FLAG_disable_write_barriers, !write_barrier_needed);
HeapObject host_object = HeapObject::FromAddress(current_object_address);
SLOW_DCHECK(isolate->heap()->Contains(host_object));
GenerationalBarrier(host_object, MaybeObjectSlot(current.address()),
diff --git a/deps/v8/src/snapshot/deserializer.h b/deps/v8/src/snapshot/deserializer.h
index 8dce1b3f3f..9f66c37ac5 100644
--- a/deps/v8/src/snapshot/deserializer.h
+++ b/deps/v8/src/snapshot/deserializer.h
@@ -10,6 +10,7 @@
#include "src/objects/allocation-site.h"
#include "src/objects/api-callbacks.h"
+#include "src/objects/backing-store.h"
#include "src/objects/code.h"
#include "src/objects/js-array.h"
#include "src/objects/map.h"
@@ -56,7 +57,7 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
allocator()->DecodeReservation(data->Reservations());
// We start the indices here at 1, so that we can distinguish between an
// actual index and a nullptr in a deserialized object requiring fix-up.
- off_heap_backing_stores_.push_back(nullptr);
+ backing_stores_.push_back({});
}
void Initialize(Isolate* isolate);
@@ -173,7 +174,7 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
std::vector<CallHandlerInfo> call_handler_infos_;
std::vector<Handle<String>> new_internalized_strings_;
std::vector<Handle<Script>> new_scripts_;
- std::vector<byte*> off_heap_backing_stores_;
+ std::vector<std::shared_ptr<BackingStore>> backing_stores_;
DeserializerAllocator allocator_;
const bool deserializing_user_code_;
diff --git a/deps/v8/src/snapshot/embedded/embedded-file-writer.h b/deps/v8/src/snapshot/embedded/embedded-file-writer.h
index e487b9be9b..75fdb2eac3 100644
--- a/deps/v8/src/snapshot/embedded/embedded-file-writer.h
+++ b/deps/v8/src/snapshot/embedded/embedded-file-writer.h
@@ -8,6 +8,7 @@
#include <cinttypes>
#include <cstdio>
#include <cstring>
+#include <memory>
#include "src/common/globals.h"
#include "src/snapshot/embedded/embedded-data.h"
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-base.cc b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-base.cc
index a17f039fa2..7a04a9dfab 100644
--- a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-base.cc
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-base.cc
@@ -136,16 +136,16 @@ std::unique_ptr<PlatformEmbeddedFileWriterBase> NewPlatformEmbeddedFileWriter(
auto embedded_target_os = ToEmbeddedTargetOs(target_os);
if (embedded_target_os == EmbeddedTargetOs::kAIX) {
- return base::make_unique<PlatformEmbeddedFileWriterAIX>(
- embedded_target_arch, embedded_target_os);
+ return std::make_unique<PlatformEmbeddedFileWriterAIX>(embedded_target_arch,
+ embedded_target_os);
} else if (embedded_target_os == EmbeddedTargetOs::kMac) {
- return base::make_unique<PlatformEmbeddedFileWriterMac>(
- embedded_target_arch, embedded_target_os);
+ return std::make_unique<PlatformEmbeddedFileWriterMac>(embedded_target_arch,
+ embedded_target_os);
} else if (embedded_target_os == EmbeddedTargetOs::kWin) {
- return base::make_unique<PlatformEmbeddedFileWriterWin>(
- embedded_target_arch, embedded_target_os);
+ return std::make_unique<PlatformEmbeddedFileWriterWin>(embedded_target_arch,
+ embedded_target_os);
} else {
- return base::make_unique<PlatformEmbeddedFileWriterGeneric>(
+ return std::make_unique<PlatformEmbeddedFileWriterGeneric>(
embedded_target_arch, embedded_target_os);
}
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc
index 688255352d..9a9a26fbd0 100644
--- a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc
@@ -11,7 +11,6 @@
#if defined(V8_OS_WIN64)
#include "src/builtins/builtins.h"
#include "src/diagnostics/unwinding-info-win64.h"
-#include "src/objects/objects-inl.h"
#include "src/snapshot/embedded/embedded-data.h"
#include "src/snapshot/embedded/embedded-file-writer.h"
#endif // V8_OS_WIN64
@@ -670,11 +669,7 @@ void PlatformEmbeddedFileWriterWin::DeclareExternalFilename(
// Replace any Windows style paths (backslashes) with forward
// slashes.
std::string fixed_filename(filename);
- for (auto& c : fixed_filename) {
- if (c == '\\') {
- c = '/';
- }
- }
+ std::replace(fixed_filename.begin(), fixed_filename.end(), '\\', '/');
fprintf(fp_, ".file %d \"%s\"\n", fileid, fixed_filename.c_str());
}
diff --git a/deps/v8/src/snapshot/natives-external.cc b/deps/v8/src/snapshot/natives-external.cc
index fe67f33087..4aa411dd92 100644
--- a/deps/v8/src/snapshot/natives-external.cc
+++ b/deps/v8/src/snapshot/natives-external.cc
@@ -61,9 +61,10 @@ class NativesStore {
// We expect the libraries in the following format:
// int: # of sources.
// 2N blobs: N pairs of source name + actual source.
- int library_count = source->GetInt();
- for (int i = 0; i < library_count; ++i)
+ int library_count = source->GetIntSlow();
+ for (int i = 0; i < library_count; ++i) {
store->ReadNameAndContentPair(source);
+ }
return store;
}
diff --git a/deps/v8/src/snapshot/object-deserializer.cc b/deps/v8/src/snapshot/object-deserializer.cc
index 63a0cfca17..daada252ba 100644
--- a/deps/v8/src/snapshot/object-deserializer.cc
+++ b/deps/v8/src/snapshot/object-deserializer.cc
@@ -60,7 +60,9 @@ void ObjectDeserializer::FlushICache() {
DCHECK(deserializing_user_code());
for (Code code : new_code_objects()) {
// Record all references to embedded objects in the new code object.
+#ifndef V8_DISABLE_WRITE_BARRIERS
WriteBarrierForCode(code);
+#endif
FlushInstructionCache(code.raw_instruction_start(),
code.raw_instruction_size());
}
diff --git a/deps/v8/src/snapshot/partial-serializer.cc b/deps/v8/src/snapshot/partial-serializer.cc
index 7b4ffbb2bf..c362fdb0ce 100644
--- a/deps/v8/src/snapshot/partial-serializer.cc
+++ b/deps/v8/src/snapshot/partial-serializer.cc
@@ -176,7 +176,8 @@ bool PartialSerializer::SerializeJSObjectWithEmbedderFields(Object obj) {
} else {
// If no serializer is provided and the field was empty, we serialize it
// by default to nullptr.
- if (serialize_embedder_fields_.callback == nullptr && object.ptr() == 0) {
+ if (serialize_embedder_fields_.callback == nullptr &&
+ object == Smi::zero()) {
serialized_data.push_back({nullptr, 0});
} else {
DCHECK_NOT_NULL(serialize_embedder_fields_.callback);
diff --git a/deps/v8/src/snapshot/serializer.cc b/deps/v8/src/snapshot/serializer.cc
index 5b68aaa87b..f009f08fc7 100644
--- a/deps/v8/src/snapshot/serializer.cc
+++ b/deps/v8/src/snapshot/serializer.cc
@@ -342,7 +342,7 @@ void Serializer::ObjectSerializer::SerializePrologue(SnapshotSpace space,
serializer_->SerializeObject(map);
}
-int32_t Serializer::ObjectSerializer::SerializeBackingStore(
+uint32_t Serializer::ObjectSerializer::SerializeBackingStore(
void* backing_store, int32_t byte_length) {
SerializerReference reference =
serializer_->reference_map()->LookupReference(backing_store);
@@ -358,13 +358,15 @@ int32_t Serializer::ObjectSerializer::SerializeBackingStore(
serializer_->reference_map()->Add(backing_store, reference);
}
- return static_cast<int32_t>(reference.off_heap_backing_store_index());
+ return reference.off_heap_backing_store_index();
}
void Serializer::ObjectSerializer::SerializeJSTypedArray() {
JSTypedArray typed_array = JSTypedArray::cast(object_);
- if (!typed_array.WasDetached()) {
- if (!typed_array.is_on_heap()) {
+ if (typed_array.is_on_heap()) {
+ typed_array.RemoveExternalPointerCompensationForSerialization();
+ } else {
+ if (!typed_array.WasDetached()) {
// Explicitly serialize the backing store now.
JSArrayBuffer buffer = JSArrayBuffer::cast(typed_array.buffer());
CHECK_LE(buffer.byte_length(), Smi::kMaxValue);
@@ -372,21 +374,20 @@ void Serializer::ObjectSerializer::SerializeJSTypedArray() {
int32_t byte_length = static_cast<int32_t>(buffer.byte_length());
int32_t byte_offset = static_cast<int32_t>(typed_array.byte_offset());
- // We need to calculate the backing store from the external pointer
+ // We need to calculate the backing store from the data pointer
// because the ArrayBuffer may already have been serialized.
void* backing_store = reinterpret_cast<void*>(
- reinterpret_cast<intptr_t>(typed_array.external_pointer()) -
- byte_offset);
- int32_t ref = SerializeBackingStore(backing_store, byte_length);
-
- // The external_pointer is the backing_store + typed_array->byte_offset.
- // To properly share the buffer, we set the backing store ref here. On
- // deserialization we re-add the byte_offset to external_pointer.
- typed_array.set_external_pointer(
- reinterpret_cast<void*>(Smi::FromInt(ref).ptr()));
+ reinterpret_cast<Address>(typed_array.DataPtr()) - byte_offset);
+
+ uint32_t ref = SerializeBackingStore(backing_store, byte_length);
+ // To properly share the buffer, we set the backing store ref as an
+ // off-heap offset from nullptr. On deserialization we re-set data
+ // pointer to proper value.
+ typed_array.SetOffHeapDataPtr(nullptr, ref);
+ DCHECK_EQ(ref, reinterpret_cast<Address>(typed_array.DataPtr()));
+ } else {
+ typed_array.SetOffHeapDataPtr(nullptr, 0);
}
- } else {
- typed_array.set_external_pointer(nullptr);
}
SerializeObject();
}
@@ -400,8 +401,11 @@ void Serializer::ObjectSerializer::SerializeJSArrayBuffer() {
// The embedder-allocated backing store only exists for the off-heap case.
if (backing_store != nullptr) {
- int32_t ref = SerializeBackingStore(backing_store, byte_length);
- buffer.set_backing_store(reinterpret_cast<void*>(Smi::FromInt(ref).ptr()));
+ uint32_t ref = SerializeBackingStore(backing_store, byte_length);
+ // To properly share the buffer, we set the backing store ref as an
+ // a backing store address. On deserialization we re-set data pointer
+ // to proper value.
+ buffer.set_backing_store(reinterpret_cast<void*>(static_cast<size_t>(ref)));
}
SerializeObject();
buffer.set_backing_store(backing_store);
diff --git a/deps/v8/src/snapshot/serializer.h b/deps/v8/src/snapshot/serializer.h
index fad2ec8a88..73a9a1eaac 100644
--- a/deps/v8/src/snapshot/serializer.h
+++ b/deps/v8/src/snapshot/serializer.h
@@ -328,7 +328,7 @@ class Serializer::ObjectSerializer : public ObjectVisitor {
void SerializeContent(Map map, int size);
void OutputRawData(Address up_to);
void OutputCode(int size);
- int32_t SerializeBackingStore(void* backing_store, int32_t byte_length);
+ uint32_t SerializeBackingStore(void* backing_store, int32_t byte_length);
void SerializeJSTypedArray();
void SerializeJSArrayBuffer();
void SerializeExternalString();
diff --git a/deps/v8/src/snapshot/snapshot-source-sink.h b/deps/v8/src/snapshot/snapshot-source-sink.h
index f20f2ad33f..9cdb85089e 100644
--- a/deps/v8/src/snapshot/snapshot-source-sink.h
+++ b/deps/v8/src/snapshot/snapshot-source-sink.h
@@ -63,6 +63,24 @@ class SnapshotByteSource final {
return answer;
}
+ int GetIntSlow() {
+ // Unlike GetInt, this reads only up to the end of the blob, even if less
+ // than 4 bytes are remaining.
+ // TODO(jgruber): Remove once the use in MakeFromScriptsSource is gone.
+ DCHECK(position_ < length_);
+ uint32_t answer = data_[position_];
+ if (position_ + 1 < length_) answer |= data_[position_ + 1] << 8;
+ if (position_ + 2 < length_) answer |= data_[position_ + 2] << 16;
+ if (position_ + 3 < length_) answer |= data_[position_ + 3] << 24;
+ int bytes = (answer & 3) + 1;
+ Advance(bytes);
+ uint32_t mask = 0xffffffffu;
+ mask >>= 32 - (bytes << 3);
+ answer &= mask;
+ answer >>= 2;
+ return answer;
+ }
+
// Returns length.
int GetBlob(const byte** data);
diff --git a/deps/v8/src/snapshot/snapshot.h b/deps/v8/src/snapshot/snapshot.h
index f7e1e86b84..4a4da9f755 100644
--- a/deps/v8/src/snapshot/snapshot.h
+++ b/deps/v8/src/snapshot/snapshot.h
@@ -8,7 +8,6 @@
#include "src/snapshot/partial-serializer.h"
#include "src/snapshot/startup-serializer.h"
-#include "src/objects/objects-inl.h"
#include "src/utils/utils.h"
namespace v8 {
diff --git a/deps/v8/src/strings/string-hasher-inl.h b/deps/v8/src/strings/string-hasher-inl.h
index b547d0a78d..0c69e6c7ba 100644
--- a/deps/v8/src/strings/string-hasher-inl.h
+++ b/deps/v8/src/strings/string-hasher-inl.h
@@ -34,32 +34,63 @@ uint32_t StringHasher::GetHashCore(uint32_t running_hash) {
uint32_t StringHasher::GetTrivialHash(int length) {
DCHECK_GT(length, String::kMaxHashCalcLength);
// String hash of a large string is simply the length.
- return (length << String::kHashShift) | String::kIsNotArrayIndexMask;
+ return (static_cast<uint32_t>(length) << String::kHashShift) |
+ String::kIsNotArrayIndexMask | String::kIsNotIntegerIndexMask;
}
template <typename schar>
uint32_t StringHasher::HashSequentialString(const schar* chars, int length,
uint64_t seed) {
- // Check whether the string is a valid array index. In that case, compute the
- // array index hash. It'll fall through to compute a regular string hash from
- // the start if it turns out that the string isn't a valid array index.
- if (IsInRange(length, 1, String::kMaxArrayIndexSize)) {
+ DCHECK_LE(0, length);
+ DCHECK_IMPLIES(0 < length, chars != nullptr);
+ if (length >= 1) {
if (IsDecimalDigit(chars[0]) && (length == 1 || chars[0] != '0')) {
- uint32_t index = chars[0] - '0';
- int i = 1;
- do {
- if (i == length) {
- return MakeArrayIndexHash(index, length);
+ uint32_t index = 0;
+ if (length <= String::kMaxArrayIndexSize) {
+ // Possible array index; try to compute the array index hash.
+ index = chars[0] - '0';
+ int i = 1;
+ do {
+ if (i == length) {
+ return MakeArrayIndexHash(index, length);
+ }
+ } while (TryAddIndexChar(&index, chars[i++]));
+ }
+ // The following block wouldn't do anything on 32-bit platforms,
+ // because kMaxArrayIndexSize == kMaxIntegerIndexSize there, and
+ // if we wanted to compile it everywhere, then {index_big} would
+ // have to be a {size_t}, which the Mac compiler doesn't like to
+ // implicitly cast to uint64_t for the {TryAddIndexChar} call.
+#if V8_HOST_ARCH_64_BIT
+ // No "else" here: if the block above was entered and fell through,
+ // we'll have to take this branch.
+ if (length <= String::kMaxIntegerIndexSize) {
+ // Not an array index, but it could still be an integer index.
+ // Perform a regular hash computation, and additionally check
+ // if there are non-digit characters.
+ uint32_t is_integer_index = 0;
+ uint32_t running_hash = static_cast<uint32_t>(seed);
+ uint64_t index_big = index;
+ const schar* end = &chars[length];
+ while (chars != end) {
+ if (is_integer_index == 0 && !TryAddIndexChar(&index_big, *chars)) {
+ is_integer_index = String::kIsNotIntegerIndexMask;
+ }
+ running_hash = AddCharacterCore(running_hash, *chars++);
}
- } while (TryAddIndexChar(&index, chars[i++]));
+ return (GetHashCore(running_hash) << String::kHashShift) |
+ String::kIsNotArrayIndexMask | is_integer_index;
+ }
+#endif
+ }
+ // No "else" here: if the first character was a decimal digit, we might
+ // still have to take this branch.
+ if (length > String::kMaxHashCalcLength) {
+ return GetTrivialHash(length);
}
- } else if (length > String::kMaxHashCalcLength) {
- return GetTrivialHash(length);
}
- // Non-array-index hash.
- DCHECK_LE(0, length);
- DCHECK_IMPLIES(0 < length, chars != nullptr);
+ // Non-index hash.
uint32_t running_hash = static_cast<uint32_t>(seed);
const schar* end = &chars[length];
while (chars != end) {
@@ -67,7 +98,7 @@ uint32_t StringHasher::HashSequentialString(const schar* chars, int length,
}
return (GetHashCore(running_hash) << String::kHashShift) |
- String::kIsNotArrayIndexMask;
+ String::kIsNotArrayIndexMask | String::kIsNotIntegerIndexMask;
}
std::size_t SeededStringHasher::operator()(const char* name) const {
diff --git a/deps/v8/src/strings/string-stream.cc b/deps/v8/src/strings/string-stream.cc
index 25a8ffc3c1..5747f66bba 100644
--- a/deps/v8/src/strings/string-stream.cc
+++ b/deps/v8/src/strings/string-stream.cc
@@ -298,9 +298,8 @@ void StringStream::PrintName(Object name) {
void StringStream::PrintUsingMap(JSObject js_object) {
Map map = js_object.map();
- int real_size = map.NumberOfOwnDescriptors();
DescriptorArray descs = map.instance_descriptors();
- for (int i = 0; i < real_size; i++) {
+ for (InternalIndex i : map.IterateOwnDescriptors()) {
PropertyDetails details = descs.GetDetails(i);
if (details.location() == kField) {
DCHECK_EQ(kData, details.kind());
diff --git a/deps/v8/src/strings/string-stream.h b/deps/v8/src/strings/string-stream.h
index d7b616c6ff..3a2ba0dd35 100644
--- a/deps/v8/src/strings/string-stream.h
+++ b/deps/v8/src/strings/string-stream.h
@@ -5,6 +5,8 @@
#ifndef V8_STRINGS_STRING_STREAM_H_
#define V8_STRINGS_STRING_STREAM_H_
+#include <memory>
+
#include "src/base/small-vector.h"
#include "src/handles/handles.h"
#include "src/objects/heap-object.h"
diff --git a/deps/v8/src/strings/uri.cc b/deps/v8/src/strings/uri.cc
index 430c8dd0eb..de4e339b39 100644
--- a/deps/v8/src/strings/uri.cc
+++ b/deps/v8/src/strings/uri.cc
@@ -195,10 +195,14 @@ MaybeHandle<String> Uri::Decode(Isolate* isolate, Handle<String> uri,
String);
DisallowHeapAllocation no_gc;
- CopyChars(result->GetChars(no_gc), one_byte_buffer.data(),
- one_byte_buffer.size());
- CopyChars(result->GetChars(no_gc) + one_byte_buffer.size(),
- two_byte_buffer.data(), two_byte_buffer.size());
+ uc16* chars = result->GetChars(no_gc);
+ if (!one_byte_buffer.empty()) {
+ CopyChars(chars, one_byte_buffer.data(), one_byte_buffer.size());
+ chars += one_byte_buffer.size();
+ }
+ if (!two_byte_buffer.empty()) {
+ CopyChars(chars, two_byte_buffer.data(), two_byte_buffer.size());
+ }
return result;
}
diff --git a/deps/v8/src/tasks/OWNERS b/deps/v8/src/tasks/OWNERS
index 2c6630da0c..d31f346b03 100644
--- a/deps/v8/src/tasks/OWNERS
+++ b/deps/v8/src/tasks/OWNERS
@@ -1,5 +1,5 @@
ahaas@chromium.org
-clemensh@chromium.org
+clemensb@chromium.org
mlippautz@chromium.org
mstarzinger@chromium.org
rmcilroy@chromium.org
diff --git a/deps/v8/src/tasks/task-utils.cc b/deps/v8/src/tasks/task-utils.cc
index 2b75c4549c..50edeccf14 100644
--- a/deps/v8/src/tasks/task-utils.cc
+++ b/deps/v8/src/tasks/task-utils.cc
@@ -42,22 +42,22 @@ class CancelableIdleFuncTask final : public CancelableIdleTask {
std::unique_ptr<CancelableTask> MakeCancelableTask(Isolate* isolate,
std::function<void()> func) {
- return base::make_unique<CancelableFuncTask>(isolate, std::move(func));
+ return std::make_unique<CancelableFuncTask>(isolate, std::move(func));
}
std::unique_ptr<CancelableTask> MakeCancelableTask(
CancelableTaskManager* manager, std::function<void()> func) {
- return base::make_unique<CancelableFuncTask>(manager, std::move(func));
+ return std::make_unique<CancelableFuncTask>(manager, std::move(func));
}
std::unique_ptr<CancelableIdleTask> MakeCancelableIdleTask(
Isolate* isolate, std::function<void(double)> func) {
- return base::make_unique<CancelableIdleFuncTask>(isolate, std::move(func));
+ return std::make_unique<CancelableIdleFuncTask>(isolate, std::move(func));
}
std::unique_ptr<CancelableIdleTask> MakeCancelableIdleTask(
CancelableTaskManager* manager, std::function<void(double)> func) {
- return base::make_unique<CancelableIdleFuncTask>(manager, std::move(func));
+ return std::make_unique<CancelableIdleFuncTask>(manager, std::move(func));
}
} // namespace internal
diff --git a/deps/v8/src/torque/ast.h b/deps/v8/src/torque/ast.h
index 5ce25cf13a..fcbb02124d 100644
--- a/deps/v8/src/torque/ast.h
+++ b/deps/v8/src/torque/ast.h
@@ -90,7 +90,8 @@ namespace torque {
AST_STATEMENT_NODE_KIND_LIST(V) \
AST_DECLARATION_NODE_KIND_LIST(V) \
V(Identifier) \
- V(LabelBlock)
+ V(LabelBlock) \
+ V(ClassBody)
struct AstNode {
public:
@@ -792,6 +793,12 @@ struct TypeDeclaration : Declaration {
Identifier* name;
};
+struct InstanceTypeConstraints {
+ InstanceTypeConstraints() : value(-1), num_flags_bits(-1) {}
+ int value;
+ int num_flags_bits;
+};
+
struct AbstractTypeDeclaration : TypeDeclaration {
DEFINE_AST_NODE_LEAF_BOILERPLATE(AbstractTypeDeclaration)
AbstractTypeDeclaration(SourcePosition pos, Identifier* name, bool transient,
@@ -1069,24 +1076,38 @@ struct StructDeclaration : TypeDeclaration {
bool IsGeneric() const { return !generic_parameters.empty(); }
};
+struct ClassBody : AstNode {
+ DEFINE_AST_NODE_LEAF_BOILERPLATE(ClassBody)
+ ClassBody(SourcePosition pos, std::vector<Declaration*> methods,
+ std::vector<ClassFieldExpression> fields)
+ : AstNode(kKind, pos),
+ methods(std::move(methods)),
+ fields(std::move(fields)) {}
+ std::vector<Declaration*> methods;
+ std::vector<ClassFieldExpression> fields;
+};
+
struct ClassDeclaration : TypeDeclaration {
DEFINE_AST_NODE_LEAF_BOILERPLATE(ClassDeclaration)
ClassDeclaration(SourcePosition pos, Identifier* name, ClassFlags flags,
base::Optional<TypeExpression*> super,
base::Optional<std::string> generates,
std::vector<Declaration*> methods,
- std::vector<ClassFieldExpression> fields)
+ std::vector<ClassFieldExpression> fields,
+ InstanceTypeConstraints instance_type_constraints)
: TypeDeclaration(kKind, pos, name),
flags(flags),
super(super),
generates(std::move(generates)),
methods(std::move(methods)),
- fields(std::move(fields)) {}
+ fields(std::move(fields)),
+ instance_type_constraints(std::move(instance_type_constraints)) {}
ClassFlags flags;
base::Optional<TypeExpression*> super;
base::Optional<std::string> generates;
std::vector<Declaration*> methods;
std::vector<ClassFieldExpression> fields;
+ InstanceTypeConstraints instance_type_constraints;
};
struct CppIncludeDeclaration : Declaration {
diff --git a/deps/v8/src/torque/class-debug-reader-generator.cc b/deps/v8/src/torque/class-debug-reader-generator.cc
index 6abdffcc91..fca2409959 100644
--- a/deps/v8/src/torque/class-debug-reader-generator.cc
+++ b/deps/v8/src/torque/class-debug-reader-generator.cc
@@ -10,12 +10,20 @@ namespace v8 {
namespace internal {
namespace torque {
+const char* tq_object_override_decls =
+ R"( std::vector<std::unique_ptr<ObjectProperty>> GetProperties(
+ d::MemoryAccessor accessor) const override;
+ const char* GetName() const override;
+ void Visit(TqObjectVisitor* visitor) const override;
+ bool IsSuperclassOf(const TqObject* other) const override;
+)";
+
namespace {
void GenerateClassDebugReader(const ClassType& type, std::ostream& h_contents,
std::ostream& cc_contents, std::ostream& visitor,
std::unordered_set<const ClassType*>* done) {
// Make sure each class only gets generated once.
- if (!type.IsExtern() || !done->insert(&type).second) return;
+ if (!done->insert(&type).second) return;
const ClassType* super_type = type.GetSuperClass();
// We must emit the classes in dependency order. If the super class hasn't
@@ -25,6 +33,10 @@ void GenerateClassDebugReader(const ClassType& type, std::ostream& h_contents,
done);
}
+ // Classes with undefined layout don't grant any particular value here and may
+ // not correspond with actual C++ classes, so skip them.
+ if (type.HasUndefinedLayout()) return;
+
const std::string name = type.name();
const std::string super_name =
super_type == nullptr ? "Object" : super_type->name();
@@ -32,10 +44,7 @@ void GenerateClassDebugReader(const ClassType& type, std::ostream& h_contents,
h_contents << " public:\n";
h_contents << " inline Tq" << name << "(uintptr_t address) : Tq"
<< super_name << "(address) {}\n";
- h_contents << " std::vector<std::unique_ptr<ObjectProperty>> "
- "GetProperties(d::MemoryAccessor accessor) const override;\n";
- h_contents << " const char* GetName() const override;\n";
- h_contents << " void Visit(TqObjectVisitor* visitor) const override;\n";
+ h_contents << tq_object_override_decls;
cc_contents << "\nconst char* Tq" << name << "::GetName() const {\n";
cc_contents << " return \"v8::internal::" << name << "\";\n";
@@ -46,6 +55,13 @@ void GenerateClassDebugReader(const ClassType& type, std::ostream& h_contents,
cc_contents << " visitor->Visit" << name << "(this);\n";
cc_contents << "}\n";
+ cc_contents << "\nbool Tq" << name
+ << "::IsSuperclassOf(const TqObject* other) const {\n";
+ cc_contents
+ << " return GetName() != other->GetName() && dynamic_cast<const Tq"
+ << name << "*>(other) != nullptr;\n";
+ cc_contents << "}\n";
+
visitor << " virtual void Visit" << name << "(const Tq" << name
<< "* object) {\n";
visitor << " Visit" << super_name << "(object);\n";
@@ -71,9 +87,10 @@ void GenerateClassDebugReader(const ClassType& type, std::ostream& h_contents,
if (is_field_tagged) {
field_value_type = "uintptr_t";
field_value_type_compressed = "i::Tagged_t";
- field_cc_type = "v8::internal::" + (field_class_type.has_value()
- ? (*field_class_type)->name()
- : "Object");
+ field_cc_type = "v8::internal::" +
+ (field_class_type.has_value()
+ ? (*field_class_type)->GetGeneratedTNodeTypeName()
+ : "Object");
field_cc_type_compressed =
COMPRESS_POINTERS_BOOL ? "v8::internal::TaggedValue" : field_cc_type;
} else {
@@ -107,7 +124,7 @@ void GenerateClassDebugReader(const ClassType& type, std::ostream& h_contents,
std::string index_param;
std::string index_offset;
if (field.index) {
- const Type* index_type = (*field.index)->name_and_type.type;
+ const Type* index_type = field.index->type;
std::string index_type_name;
std::string index_value;
if (index_type == TypeOracle::GetSmiType()) {
@@ -129,18 +146,17 @@ void GenerateClassDebugReader(const ClassType& type, std::ostream& h_contents,
}
get_props_impl << " Value<" << index_type_name
<< "> indexed_field_count = Get"
- << CamelifyString((*field.index)->name_and_type.name)
+ << CamelifyString(field.index->name)
<< "Value(accessor);\n";
indexed_field_info =
", " + index_value + ", GetArrayKind(indexed_field_count.validity)";
index_param = ", size_t offset";
index_offset = " + offset * sizeof(value)";
}
- get_props_impl
- << " result.push_back(v8::base::make_unique<ObjectProperty>(\""
- << field_name << "\", \"" << field_cc_type_compressed << "\", \""
- << field_cc_type << "\", " << address_getter << "()"
- << indexed_field_info << "));\n";
+ get_props_impl << " result.push_back(std::make_unique<ObjectProperty>(\""
+ << field_name << "\", \"" << field_cc_type_compressed
+ << "\", \"" << field_cc_type << "\", " << address_getter
+ << "()" << indexed_field_info << "));\n";
h_contents << " uintptr_t " << address_getter << "() const;\n";
h_contents << " Value<" << field_value_type << "> " << field_getter
@@ -158,7 +174,8 @@ void GenerateClassDebugReader(const ClassType& type, std::ostream& h_contents,
<< address_getter << "()" << index_offset
<< ", reinterpret_cast<uint8_t*>(&value), sizeof(value));\n";
cc_contents << " return {validity, "
- << (is_field_tagged ? "Decompress(value, address_)" : "value")
+ << (is_field_tagged ? "EnsureDecompressed(value, address_)"
+ : "value")
<< "};\n";
cc_contents << "}\n";
}
@@ -192,6 +209,11 @@ void ImplementationVisitor::GenerateClassDebugReaders(
h_contents
<< "\n#include \"tools/debug_helper/debug-helper-internal.h\"\n\n";
+ h_contents << "// Unset a windgi.h macro that causes conflicts.\n";
+ h_contents << "#ifdef GetBValue\n";
+ h_contents << "#undef GetBValue\n";
+ h_contents << "#endif\n\n";
+
cc_contents << "#include \"torque-generated/" << file_name << ".h\"\n";
cc_contents << "#include \"include/v8-internal.h\"\n\n";
cc_contents << "namespace i = v8::internal;\n\n";
diff --git a/deps/v8/src/torque/constants.h b/deps/v8/src/torque/constants.h
index efbbf9588e..ebfbab0cba 100644
--- a/deps/v8/src/torque/constants.h
+++ b/deps/v8/src/torque/constants.h
@@ -55,6 +55,25 @@ static const char* const REFERENCE_TYPE_STRING = "Reference";
static const char* const SLICE_TYPE_STRING = "Slice";
static const char* const STRUCT_NAMESPACE_STRING = "_struct";
+static const char* const ANNOTATION_GENERATE_PRINT = "@generatePrint";
+static const char* const ANNOTATION_NO_VERIFIER = "@noVerifier";
+static const char* const ANNOTATION_ABSTRACT = "@abstract";
+static const char* const ANNOTATION_INSTANTIATED_ABSTRACT_CLASS =
+ "@dirtyInstantiatedAbstractClass";
+static const char* const ANNOTATION_HAS_SAME_INSTANCE_TYPE_AS_PARENT =
+ "@hasSameInstanceTypeAsParent";
+static const char* const ANNOTATION_GENERATE_CPP_CLASS = "@generateCppClass";
+static const char* const ANNOTATION_HIGHEST_INSTANCE_TYPE_WITHIN_PARENT =
+ "@highestInstanceTypeWithinParentClassRange";
+static const char* const ANNOTATION_LOWEST_INSTANCE_TYPE_WITHIN_PARENT =
+ "@lowestInstanceTypeWithinParentClassRange";
+static const char* const ANNOTATION_RESERVE_BITS_IN_INSTANCE_TYPE =
+ "@reserveBitsInInstanceType";
+static const char* const ANNOTATION_INSTANCE_TYPE_VALUE =
+ "@apiExposedInstanceTypeValue";
+static const char* const ANNOTATION_IF = "@if";
+static const char* const ANNOTATION_IFNOT = "@ifnot";
+
inline bool IsConstexprName(const std::string& name) {
return name.substr(0, std::strlen(CONSTEXPR_TYPE_PREFIX)) ==
CONSTEXPR_TYPE_PREFIX;
@@ -80,7 +99,10 @@ enum class ClassFlag {
kInstantiatedAbstractClass = 1 << 5,
kHasSameInstanceTypeAsParent = 1 << 6,
kGenerateCppClassDefinitions = 1 << 7,
- kHasIndexedField = 1 << 8
+ kHasIndexedField = 1 << 8,
+ kHighestInstanceTypeWithinParent = 1 << 9,
+ kLowestInstanceTypeWithinParent = 1 << 10,
+ kUndefinedLayout = 1 << 11,
};
using ClassFlags = base::Flags<ClassFlag>;
diff --git a/deps/v8/src/torque/csa-generator.cc b/deps/v8/src/torque/csa-generator.cc
index 7925783914..2a10e4f353 100644
--- a/deps/v8/src/torque/csa-generator.cc
+++ b/deps/v8/src/torque/csa-generator.cc
@@ -41,7 +41,7 @@ Stack<std::string> CSAGenerator::EmitBlock(const Block* block) {
Stack<std::string> stack;
for (const Type* t : block->InputTypes()) {
stack.Push(FreshNodeName());
- out_ << " compiler::TNode<" << t->GetGeneratedTNodeTypeName() << "> "
+ out_ << " TNode<" << t->GetGeneratedTNodeTypeName() << "> "
<< stack.Top() << ";\n";
}
out_ << " ca_.Bind(&" << BlockName(block);
@@ -56,14 +56,10 @@ Stack<std::string> CSAGenerator::EmitBlock(const Block* block) {
}
void CSAGenerator::EmitSourcePosition(SourcePosition pos, bool always_emit) {
- std::string file = SourceFileMap::AbsolutePath(pos.source);
+ const std::string& file = SourceFileMap::AbsolutePath(pos.source);
if (always_emit || !previous_position_.CompareStartIgnoreColumn(pos)) {
// Lines in Torque SourcePositions are zero-based, while the
// CodeStubAssembler and downwind systems are one-based.
- for (auto& c : file) {
- if (c == '\\')
- c = '/';
- }
out_ << " ca_.SetSourcePosition(\"" << file << "\", "
<< (pos.start.line + 1) << ");\n";
previous_position_ = pos;
@@ -123,8 +119,8 @@ void CSAGenerator::EmitInstruction(
for (const Type* lowered : LowerType(type)) {
results.push_back(FreshNodeName());
stack->Push(results.back());
- out_ << " compiler::TNode<" << lowered->GetGeneratedTNodeTypeName()
- << "> " << stack->Top() << ";\n";
+ out_ << " TNode<" << lowered->GetGeneratedTNodeTypeName() << "> "
+ << stack->Top() << ";\n";
out_ << " USE(" << stack->Top() << ");\n";
}
out_ << " ";
@@ -179,7 +175,7 @@ void CSAGenerator::EmitInstruction(const CallIntrinsicInstruction& instruction,
for (const Type* type : LowerType(return_type)) {
results.push_back(FreshNodeName());
stack->Push(results.back());
- out_ << " compiler::TNode<" << type->GetGeneratedTNodeTypeName() << "> "
+ out_ << " TNode<" << type->GetGeneratedTNodeTypeName() << "> "
<< stack->Top() << ";\n";
out_ << " USE(" << stack->Top() << ");\n";
}
@@ -302,7 +298,7 @@ void CSAGenerator::EmitInstruction(const CallCsaMacroInstruction& instruction,
for (const Type* type : LowerType(return_type)) {
results.push_back(FreshNodeName());
stack->Push(results.back());
- out_ << " compiler::TNode<" << type->GetGeneratedTNodeTypeName() << "> "
+ out_ << " TNode<" << type->GetGeneratedTNodeTypeName() << "> "
<< stack->Top() << ";\n";
out_ << " USE(" << stack->Top() << ");\n";
}
@@ -354,8 +350,8 @@ void CSAGenerator::EmitInstruction(
for (const Type* type :
LowerType(instruction.macro->signature().return_type)) {
results.push_back(FreshNodeName());
- out_ << " compiler::TNode<" << type->GetGeneratedTNodeTypeName()
- << "> " << results.back() << ";\n";
+ out_ << " TNode<" << type->GetGeneratedTNodeTypeName() << "> "
+ << results.back() << ";\n";
out_ << " USE(" << results.back() << ");\n";
}
}
@@ -453,9 +449,8 @@ void CSAGenerator::EmitInstruction(const CallBuiltinInstruction& instruction,
} else {
std::string result_name = FreshNodeName();
if (result_types.size() == 1) {
- out_ << " compiler::TNode<"
- << result_types[0]->GetGeneratedTNodeTypeName() << "> "
- << result_name << ";\n";
+ out_ << " TNode<" << result_types[0]->GetGeneratedTNodeTypeName()
+ << "> " << result_name << ";\n";
}
std::string catch_name =
PreCallableExceptionPreparation(instruction.catch_block);
@@ -503,8 +498,7 @@ void CSAGenerator::EmitInstruction(
stack->Push(FreshNodeName());
std::string generated_type = result_types[0]->GetGeneratedTNodeTypeName();
- out_ << " compiler::TNode<" << generated_type << "> " << stack->Top()
- << " = ";
+ out_ << " TNode<" << generated_type << "> " << stack->Top() << " = ";
if (generated_type != "Object") out_ << "TORQUE_CAST(";
out_ << "CodeStubAssembler(state_).CallBuiltinPointer(Builtins::"
"CallableFor(ca_."
@@ -543,8 +537,7 @@ void CSAGenerator::PostCallableExceptionPreparation(
if (!return_type->IsNever()) {
out_ << " ca_.Goto(&" << catch_name << "_skip);\n";
}
- out_ << " compiler::TNode<Object> " << catch_name
- << "_exception_object;\n";
+ out_ << " TNode<Object> " << catch_name << "_exception_object;\n";
out_ << " ca_.Bind(&" << catch_name << "__label, &" << catch_name
<< "_exception_object);\n";
out_ << " ca_.Goto(&" << block_name;
@@ -579,9 +572,8 @@ void CSAGenerator::EmitInstruction(const CallRuntimeInstruction& instruction,
} else {
std::string result_name = FreshNodeName();
if (result_types.size() == 1) {
- out_ << " compiler::TNode<"
- << result_types[0]->GetGeneratedTNodeTypeName() << "> "
- << result_name << ";\n";
+ out_ << " TNode<" << result_types[0]->GetGeneratedTNodeTypeName()
+ << "> " << result_name << ";\n";
}
std::string catch_name =
PreCallableExceptionPreparation(instruction.catch_block);
@@ -722,10 +714,9 @@ void CSAGenerator::EmitInstruction(
std::string offset_name = FreshNodeName();
stack->Push(offset_name);
- out_ << " compiler::TNode<IntPtrT> " << offset_name
- << " = ca_.IntPtrConstant(";
- out_ << field.aggregate->GetGeneratedTNodeTypeName() << "::k"
- << CamelifyString(field.name_and_type.name) << "Offset";
+ out_ << " TNode<IntPtrT> " << offset_name << " = ca_.IntPtrConstant(";
+ out_ << field.aggregate->GetGeneratedTNodeTypeName() << "::k"
+ << CamelifyString(field.name_and_type.name) << "Offset";
out_ << ");\n"
<< " USE(" << stack->Top() << ");\n";
}
@@ -776,8 +767,8 @@ void CSAGenerator::EmitCSAValue(VisitResult result,
out << "}";
} else {
DCHECK_EQ(1, result.stack_range().Size());
- out << "compiler::TNode<" << result.type()->GetGeneratedTNodeTypeName()
- << ">{" << values.Peek(result.stack_range().begin()) << "}";
+ out << "TNode<" << result.type()->GetGeneratedTNodeTypeName() << ">{"
+ << values.Peek(result.stack_range().begin()) << "}";
}
}
diff --git a/deps/v8/src/torque/declaration-visitor.cc b/deps/v8/src/torque/declaration-visitor.cc
index f762337463..c2fa1af98e 100644
--- a/deps/v8/src/torque/declaration-visitor.cc
+++ b/deps/v8/src/torque/declaration-visitor.cc
@@ -93,20 +93,6 @@ Builtin* DeclarationVisitor::CreateBuiltin(BuiltinDeclaration* decl,
}
}
- if (TorqueBuiltinDeclaration::DynamicCast(decl)) {
- for (size_t i = 0; i < signature.types().size(); ++i) {
- const Type* type = signature.types()[i];
- if (!type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
- const Identifier* id = signature.parameter_names.size() > i
- ? signature.parameter_names[i]
- : nullptr;
- Error("Untagged argument ", id ? (id->value + " ") : "", "at position ",
- i, " to builtin ", decl->name, " is not supported.")
- .Position(id ? id->pos : decl->pos);
- }
- }
- }
-
if (const StructType* struct_type =
StructType::DynamicCast(signature.return_type)) {
Error("Builtins ", decl->name, " cannot return structs ",
diff --git a/deps/v8/src/torque/declarations.h b/deps/v8/src/torque/declarations.h
index 240680fa1e..f3d2544ae1 100644
--- a/deps/v8/src/torque/declarations.h
+++ b/deps/v8/src/torque/declarations.h
@@ -5,6 +5,7 @@
#ifndef V8_TORQUE_DECLARATIONS_H_
#define V8_TORQUE_DECLARATIONS_H_
+#include <memory>
#include <string>
#include "src/torque/declarable.h"
diff --git a/deps/v8/src/torque/earley-parser.h b/deps/v8/src/torque/earley-parser.h
index 9f7ba6a7ae..43ad6eb452 100644
--- a/deps/v8/src/torque/earley-parser.h
+++ b/deps/v8/src/torque/earley-parser.h
@@ -6,6 +6,7 @@
#define V8_TORQUE_EARLEY_PARSER_H_
#include <map>
+#include <memory>
#include <vector>
#include "src/base/optional.h"
@@ -82,6 +83,7 @@ enum class ParseResultHolderBase::TypeId {
kTypeswitchCase,
kStdVectorOfTypeswitchCase,
kStdVectorOfIdentifierPtr,
+ kOptionalClassBody,
kJsonValue,
kJsonMember,
@@ -248,7 +250,7 @@ class Symbol {
size_t rule_number() const { return rules_.size(); }
void AddRule(const Rule& rule) {
- rules_.push_back(base::make_unique<Rule>(rule));
+ rules_.push_back(std::make_unique<Rule>(rule));
rules_.back()->SetLeftHandSide(this);
}
diff --git a/deps/v8/src/torque/global-context.cc b/deps/v8/src/torque/global-context.cc
index 13503038c5..e236de5a93 100644
--- a/deps/v8/src/torque/global-context.cc
+++ b/deps/v8/src/torque/global-context.cc
@@ -19,7 +19,7 @@ GlobalContext::GlobalContext(Ast ast)
CurrentSourcePosition::Scope current_source_position(
SourcePosition{CurrentSourceFile::Get(), {-1, -1}, {-1, -1}});
default_namespace_ =
- RegisterDeclarable(base::make_unique<Namespace>(kBaseNamespaceName));
+ RegisterDeclarable(std::make_unique<Namespace>(kBaseNamespaceName));
}
TargetArchitecture::TargetArchitecture(bool force_32bit)
diff --git a/deps/v8/src/torque/global-context.h b/deps/v8/src/torque/global-context.h
index e1106adbd1..940325b51a 100644
--- a/deps/v8/src/torque/global-context.h
+++ b/deps/v8/src/torque/global-context.h
@@ -6,6 +6,7 @@
#define V8_TORQUE_GLOBAL_CONTEXT_H_
#include <map>
+#include <memory>
#include "src/common/globals.h"
#include "src/torque/ast.h"
diff --git a/deps/v8/src/torque/implementation-visitor.cc b/deps/v8/src/torque/implementation-visitor.cc
index 8f36afd020..0c50a70099 100644
--- a/deps/v8/src/torque/implementation-visitor.cc
+++ b/deps/v8/src/torque/implementation-visitor.cc
@@ -526,7 +526,6 @@ void ImplementationVisitor::Visit(Builtin* builtin) {
source_out() << " USE(" << parameter0 << ");\n";
for (size_t i = 1; i < signature.parameter_names.size(); ++i) {
- const std::string& parameter_name = signature.parameter_names[i]->value;
const Type* type = signature.types()[i];
const bool mark_as_used = signature.implicit_count > i;
std::string var = AddParameter(i, builtin, &parameters, &parameter_types,
@@ -534,8 +533,8 @@ void ImplementationVisitor::Visit(Builtin* builtin) {
source_out() << " " << type->GetGeneratedTypeName() << " " << var
<< " = "
<< "UncheckedCast<" << type->GetGeneratedTNodeTypeName()
- << ">(Parameter(Descriptor::k"
- << CamelifyString(parameter_name) << "));\n";
+ << ">(Parameter(Descriptor::ParameterIndex<" << (i - 1)
+ << ">()));\n";
source_out() << " USE(" << var << ");\n";
}
}
@@ -1008,48 +1007,40 @@ const Type* ImplementationVisitor::Visit(AssertStatement* stmt) {
#if defined(DEBUG)
do_check = true;
#endif
- if (do_check) {
- // CSA_ASSERT & co. are not used here on purpose for two reasons. First,
- // Torque allows and handles two types of expressions in the if protocol
- // automagically, ones that return TNode<BoolT> and those that use the
- // BranchIf(..., Label* true, Label* false) idiom. Because the machinery to
- // handle this is embedded in the expression handling and to it's not
- // possible to make the decision to use CSA_ASSERT or CSA_ASSERT_BRANCH
- // isn't trivial up-front. Secondly, on failure, the assert text should be
- // the corresponding Torque code, not the -gen.cc code, which would be the
- // case when using CSA_ASSERT_XXX.
- Block* true_block = assembler().NewBlock(assembler().CurrentStack());
- Block* false_block = assembler().NewBlock(assembler().CurrentStack(), true);
- GenerateExpressionBranch(stmt->expression, true_block, false_block);
+ Block* resume_block;
+
+ if (!do_check) {
+ Block* unreachable_block = assembler().NewBlock(assembler().CurrentStack());
+ resume_block = assembler().NewBlock(assembler().CurrentStack());
+ assembler().Goto(resume_block);
+ assembler().Bind(unreachable_block);
+ }
+
+ // CSA_ASSERT & co. are not used here on purpose for two reasons. First,
+ // Torque allows and handles two types of expressions in the if protocol
+ // automagically, ones that return TNode<BoolT> and those that use the
+ // BranchIf(..., Label* true, Label* false) idiom. Because the machinery to
+ // handle this is embedded in the expression handling and to it's not
+ // possible to make the decision to use CSA_ASSERT or CSA_ASSERT_BRANCH
+ // isn't trivial up-front. Secondly, on failure, the assert text should be
+ // the corresponding Torque code, not the -gen.cc code, which would be the
+ // case when using CSA_ASSERT_XXX.
+ Block* true_block = assembler().NewBlock(assembler().CurrentStack());
+ Block* false_block = assembler().NewBlock(assembler().CurrentStack(), true);
+ GenerateExpressionBranch(stmt->expression, true_block, false_block);
- assembler().Bind(false_block);
+ assembler().Bind(false_block);
- assembler().Emit(AbortInstruction{
- AbortInstruction::Kind::kAssertionFailure,
- "Torque assert '" + FormatAssertSource(stmt->source) + "' failed"});
+ assembler().Emit(AbortInstruction{
+ AbortInstruction::Kind::kAssertionFailure,
+ "Torque assert '" + FormatAssertSource(stmt->source) + "' failed"});
- assembler().Bind(true_block);
- } else {
- // Visit the expression so bindings only used in asserts are marked
- // as such. Otherwise they might be wrongly reported as unused bindings
- // in release builds.
- stmt->expression->VisitAllSubExpressions([](Expression* expression) {
- if (auto id = IdentifierExpression::DynamicCast(expression)) {
- ValueBindingsManager::Get().TryLookup(id->name->value);
- } else if (auto call = CallExpression::DynamicCast(expression)) {
- for (Identifier* label : call->labels) {
- LabelBindingsManager::Get().TryLookup(label->value);
- }
- // TODO(szuend): In case the call expression resolves to a macro
- // callable, mark the macro as used as well.
- } else if (auto call = CallMethodExpression::DynamicCast(expression)) {
- for (Identifier* label : call->labels) {
- LabelBindingsManager::Get().TryLookup(label->value);
- }
- // TODO(szuend): Mark the underlying macro as used.
- }
- });
+ assembler().Bind(true_block);
+
+ if (!do_check) {
+ assembler().Bind(resume_block);
}
+
return TypeOracle::GetVoidType();
}
@@ -1214,16 +1205,16 @@ InitializerResults ImplementationVisitor::VisitInitializerResults(
result.names.push_back(initializer.name);
Expression* e = initializer.expression;
const Field& field = class_type->LookupField(initializer.name->value);
- auto field_index = field.index;
+ bool has_index = field.index.has_value();
if (SpreadExpression* s = SpreadExpression::DynamicCast(e)) {
- if (!field_index) {
+ if (!has_index) {
ReportError(
"spread expressions can only be used to initialize indexed class "
"fields ('",
initializer.name->value, "' is not)");
}
e = s->spreadee;
- } else if (field_index) {
+ } else if (has_index) {
ReportError("the indexed class field '", initializer.name->value,
"' must be initialized with a spread operator");
}
@@ -1261,7 +1252,7 @@ void ImplementationVisitor::InitializeClass(
void ImplementationVisitor::InitializeFieldFromSpread(
VisitResult object, const Field& field,
const InitializerResults& initializer_results) {
- NameAndType index = (*field.index)->name_and_type;
+ const NameAndType& index = *field.index;
VisitResult iterator =
initializer_results.field_value_map.at(field.name_and_type.name);
VisitResult length = initializer_results.field_value_map.at(index.name);
@@ -1289,15 +1280,14 @@ VisitResult ImplementationVisitor::AddVariableObjectSize(
}
VisitResult index_field_size =
VisitResult(TypeOracle::GetConstInt31Type(), "kTaggedSize");
- VisitResult initializer_value = initializer_results.field_value_map.at(
- (*current_field->index)->name_and_type.name);
+ VisitResult initializer_value =
+ initializer_results.field_value_map.at(current_field->index->name);
Arguments args;
args.parameters.push_back(object_size);
args.parameters.push_back(initializer_value);
args.parameters.push_back(index_field_size);
- object_size =
- GenerateCall("%AddIndexedFieldSizeToObjectSize", args,
- {(*current_field->index)->name_and_type.type}, false);
+ object_size = GenerateCall("%AddIndexedFieldSizeToObjectSize", args,
+ {current_field->index->type}, false);
}
++current_field;
}
@@ -1860,12 +1850,12 @@ LocationReference ImplementationVisitor::GetLocationReference(
{
StackScope length_scope(this);
// Get a reference to the length
- const Field* index_field = field.index.value();
+ const NameAndType& index_field = field.index.value();
GenerateCopy(object_result);
- assembler().Emit(CreateFieldReferenceInstruction{
- object_result.type(), index_field->name_and_type.name});
+ assembler().Emit(CreateFieldReferenceInstruction{object_result.type(),
+ index_field.name});
VisitResult length_reference(
- TypeOracle::GetReferenceType(index_field->name_and_type.type),
+ TypeOracle::GetReferenceType(index_field.type),
assembler().TopRange(2));
// Load the length from the reference and convert it to intptr
@@ -2670,13 +2660,34 @@ void ImplementationVisitor::Visit(Declarable* declarable) {
}
}
-void ImplementationVisitor::GenerateBuiltinDefinitions(
+std::string MachineTypeString(const Type* type) {
+ if (type->IsSubtypeOf(TypeOracle::GetSmiType())) {
+ return "MachineType::TaggedSigned()";
+ }
+ if (type->IsSubtypeOf(TypeOracle::GetHeapObjectType())) {
+ return "MachineType::TaggedPointer()";
+ }
+ if (type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
+ return "MachineType::AnyTagged()";
+ }
+ return "MachineTypeOf<" + type->GetGeneratedTNodeTypeName() + ">::value";
+}
+
+void ImplementationVisitor::GenerateBuiltinDefinitionsAndInterfaceDescriptors(
const std::string& output_directory) {
- std::stringstream new_contents_stream;
- std::string file_name = "builtin-definitions-tq.h";
+ std::stringstream builtin_definitions;
+ std::string builtin_definitions_file_name = "builtin-definitions-tq.h";
+
+ // This file contains plain interface descriptor definitions and has to be
+ // included in the middle of interface-descriptors.h. Thus it is not a normal
+ // header file and uses the .inc suffix instead of the .h suffix.
+ std::stringstream interface_descriptors;
+ std::string interface_descriptors_file_name = "interface-descriptors-tq.inc";
{
- IncludeGuardScope include_guard(new_contents_stream, file_name);
- new_contents_stream
+ IncludeGuardScope builtin_definitions_include_guard(
+ builtin_definitions, builtin_definitions_file_name);
+
+ builtin_definitions
<< "\n"
"#define BUILTIN_LIST_FROM_TORQUE(CPP, TFJ, TFC, TFS, TFH, "
"ASM) "
@@ -2684,40 +2695,67 @@ void ImplementationVisitor::GenerateBuiltinDefinitions(
for (auto& declarable : GlobalContext::AllDeclarables()) {
Builtin* builtin = Builtin::DynamicCast(declarable.get());
if (!builtin || builtin->IsExternal()) continue;
- size_t firstParameterIndex = 1;
- bool declareParameters = true;
if (builtin->IsStub()) {
- new_contents_stream << "TFS(" << builtin->ExternalName();
+ builtin_definitions << "TFC(" << builtin->ExternalName() << ", "
+ << builtin->ExternalName();
+ std::string descriptor_name = builtin->ExternalName() + "Descriptor";
+ constexpr size_t kFirstNonContextParameter = 1;
+ size_t parameter_count =
+ builtin->parameter_names().size() - kFirstNonContextParameter;
+
+ interface_descriptors << "class " << descriptor_name
+ << " : public TorqueInterfaceDescriptor<"
+ << parameter_count << "> {\n";
+ interface_descriptors << " DECLARE_DESCRIPTOR_WITH_BASE("
+ << descriptor_name
+ << ", TorqueInterfaceDescriptor)\n";
+
+ interface_descriptors << " MachineType ReturnType() override {\n";
+ interface_descriptors
+ << " return "
+ << MachineTypeString(builtin->signature().return_type) << ";\n";
+ interface_descriptors << " }\n";
+
+ interface_descriptors << " std::array<MachineType, " << parameter_count
+ << "> ParameterTypes() override {\n";
+ interface_descriptors << " return {";
+ for (size_t i = kFirstNonContextParameter;
+ i < builtin->parameter_names().size(); ++i) {
+ bool last = i + 1 == builtin->parameter_names().size();
+ const Type* type = builtin->signature().parameter_types.types[i];
+ interface_descriptors << MachineTypeString(type)
+ << (last ? "" : ", ");
+ }
+ interface_descriptors << "};\n";
+
+ interface_descriptors << " }\n";
+ interface_descriptors << "};\n\n";
} else {
- new_contents_stream << "TFJ(" << builtin->ExternalName();
+ builtin_definitions << "TFJ(" << builtin->ExternalName();
if (builtin->IsVarArgsJavaScript()) {
- new_contents_stream
+ builtin_definitions
<< ", SharedFunctionInfo::kDontAdaptArgumentsSentinel";
- declareParameters = false;
} else {
DCHECK(builtin->IsFixedArgsJavaScript());
// FixedArg javascript builtins need to offer the parameter
// count.
int parameter_count =
static_cast<int>(builtin->signature().ExplicitCount());
- new_contents_stream << ", " << parameter_count;
+ builtin_definitions << ", " << parameter_count;
// And the receiver is explicitly declared.
- new_contents_stream << ", kReceiver";
- firstParameterIndex = builtin->signature().implicit_count;
- }
- }
- if (declareParameters) {
- for (size_t i = firstParameterIndex;
- i < builtin->parameter_names().size(); ++i) {
- Identifier* parameter = builtin->parameter_names()[i];
- new_contents_stream << ", k" << CamelifyString(parameter->value);
+ builtin_definitions << ", kReceiver";
+ for (size_t i = builtin->signature().implicit_count;
+ i < builtin->parameter_names().size(); ++i) {
+ Identifier* parameter = builtin->parameter_names()[i];
+ builtin_definitions << ", k" << CamelifyString(parameter->value);
+ }
}
}
- new_contents_stream << ") \\\n";
+ builtin_definitions << ") \\\n";
}
- new_contents_stream << "\n";
+ builtin_definitions << "\n";
- new_contents_stream
+ builtin_definitions
<< "#define TORQUE_FUNCTION_POINTER_TYPE_TO_BUILTIN_MAP(V) \\\n";
for (const BuiltinPointerType* type :
TypeOracle::AllBuiltinPointerTypes()) {
@@ -2728,13 +2766,15 @@ void ImplementationVisitor::GenerateBuiltinDefinitions(
SourcePosition{CurrentSourceFile::Get(), {-1, -1}, {-1, -1}});
ReportError("unable to find any builtin with type \"", *type, "\"");
}
- new_contents_stream << " V(" << type->function_pointer_type_id() << ","
+ builtin_definitions << " V(" << type->function_pointer_type_id() << ","
<< example_builtin->ExternalName() << ")\\\n";
}
- new_contents_stream << "\n";
+ builtin_definitions << "\n";
}
- std::string new_contents(new_contents_stream.str());
- WriteFile(output_directory + "/" + file_name, new_contents);
+ WriteFile(output_directory + "/" + builtin_definitions_file_name,
+ builtin_definitions.str());
+ WriteFile(output_directory + "/" + interface_descriptors_file_name,
+ interface_descriptors.str());
}
namespace {
@@ -2894,40 +2934,8 @@ class MacroFieldOffsetsGenerator : public FieldOffsetsGenerator {
private:
std::ostream& out_;
};
-} // namespace
-
-void ImplementationVisitor::GenerateInstanceTypes(
- const std::string& output_directory) {
- std::stringstream header;
- std::string file_name = "instance-types-tq.h";
- {
- IncludeGuardScope(header, file_name);
- header << "#define TORQUE_DEFINED_INSTANCE_TYPES(V) \\\n";
- for (const TypeAlias* alias : GlobalContext::GetClasses()) {
- const ClassType* type = ClassType::DynamicCast(alias->type());
- if (type->IsExtern()) continue;
- std::string type_name =
- CapifyStringWithUnderscores(type->name()) + "_TYPE";
- header << " V(" << type_name << ") \\\n";
- }
- header << "\n\n";
-
- header << "#define TORQUE_STRUCT_LIST_GENERATOR(V, _) \\\n";
- for (const TypeAlias* alias : GlobalContext::GetClasses()) {
- const ClassType* type = ClassType::DynamicCast(alias->type());
- if (type->IsExtern()) continue;
- std::string type_name =
- CapifyStringWithUnderscores(type->name()) + "_TYPE";
- std::string variable_name = SnakeifyString(type->name());
- header << " V(_, " << type_name << ", " << type->name() << ", "
- << variable_name << ") \\\n";
- }
- header << "\n";
- }
- std::string output_header_path = output_directory + "/" + file_name;
- WriteFile(output_header_path, header.str());
-}
+} // namespace
void ImplementationVisitor::GenerateCppForInternalClasses(
const std::string& output_directory) {
@@ -3148,7 +3156,7 @@ void CppClassGenerator::GenerateClassConstructors() {
if (type_->IsInstantiatedAbstractClass()) {
// This is a hack to prevent wrong instance type checks.
inl_ << " // Instance check omitted because class is annotated with "
- "@dirtyInstantiatedAbstractClass.\n";
+ << ANNOTATION_INSTANTIATED_ABSTRACT_CLASS << ".\n";
} else {
inl_ << " SLOW_DCHECK(this->Is" << name_ << "());\n";
}
@@ -3241,7 +3249,8 @@ void CppClassGenerator::GenerateFieldAccessorForObject(const Field& f) {
const std::string offset = "k" + CamelifyString(name) + "Offset";
base::Optional<const ClassType*> class_type = field_type->ClassSupertype();
- std::string type = class_type ? (*class_type)->name() : "Object";
+ std::string type =
+ class_type ? (*class_type)->GetGeneratedTNodeTypeName() : "Object";
// Generate declarations in header.
if (!class_type && field_type != TypeOracle::GetObjectType()) {
@@ -3302,7 +3311,6 @@ void ImplementationVisitor::GenerateClassDefinitions(
{
IncludeGuardScope header_guard(header, basename + ".h");
- header << "#include \"src/objects/heap-number.h\"\n";
header << "#include \"src/objects/objects.h\"\n";
header << "#include \"src/objects/smi.h\"\n";
header << "#include \"torque-generated/field-offsets-tq.h\"\n";
@@ -3314,9 +3322,11 @@ void ImplementationVisitor::GenerateClassDefinitions(
IncludeGuardScope inline_header_guard(inline_header, basename + "-inl.h");
inline_header << "#include \"torque-generated/class-definitions-tq.h\"\n\n";
inline_header << "#include \"src/objects/js-promise.h\"\n";
+ inline_header << "#include \"src/objects/js-weak-refs.h\"\n";
inline_header << "#include \"src/objects/module.h\"\n";
inline_header << "#include \"src/objects/objects-inl.h\"\n";
- inline_header << "#include \"src/objects/script.h\"\n\n";
+ inline_header << "#include \"src/objects/script.h\"\n";
+ inline_header << "#include \"src/objects/shared-function-info.h\"\n\n";
IncludeObjectMacrosScope inline_header_macros(inline_header);
NamespaceScope inline_header_namespaces(inline_header, {"v8", "internal"});
@@ -3328,6 +3338,7 @@ void ImplementationVisitor::GenerateClassDefinitions(
implementation << "#include \"src/objects/embedder-data-array-inl.h\"\n";
implementation << "#include \"src/objects/js-generator-inl.h\"\n";
implementation << "#include \"src/objects/js-regexp-inl.h\"\n";
+ implementation << "#include \"src/objects/js-weak-refs-inl.h\"\n";
implementation
<< "#include \"src/objects/js-regexp-string-iterator-inl.h\"\n";
implementation << "#include \"src/objects/literal-objects-inl.h\"\n";
@@ -3346,7 +3357,7 @@ void ImplementationVisitor::GenerateClassDefinitions(
// Generate forward declarations for every class.
for (const TypeAlias* alias : GlobalContext::GetClasses()) {
const ClassType* type = ClassType::DynamicCast(alias->type());
- header << "class " << type->name() << ";\n";
+ header << "class " << type->GetGeneratedTNodeTypeName() << ";\n";
}
for (const TypeAlias* alias : GlobalContext::GetClasses()) {
@@ -3439,13 +3450,13 @@ void GenerateClassFieldVerifier(const std::string& class_name,
if (!field_type->IsSubtypeOf(TypeOracle::GetObjectType())) return;
if (f.index) {
- if ((*f.index)->name_and_type.type != TypeOracle::GetSmiType()) {
+ if (f.index->type != TypeOracle::GetSmiType()) {
ReportError("Non-SMI values are not (yet) supported as indexes.");
}
// We already verified the index field because it was listed earlier, so we
// can assume it's safe to read here.
cc_contents << " for (int i = 0; i < TaggedField<Smi, " << class_name
- << "::k" << CamelifyString((*f.index)->name_and_type.name)
+ << "::k" << CamelifyString(f.index->name)
<< "Offset>::load(o).value(); ++i) {\n";
} else {
cc_contents << " {\n";
diff --git a/deps/v8/src/torque/implementation-visitor.h b/deps/v8/src/torque/implementation-visitor.h
index eb1a6c4452..e1ebfeeb17 100644
--- a/deps/v8/src/torque/implementation-visitor.h
+++ b/deps/v8/src/torque/implementation-visitor.h
@@ -5,6 +5,7 @@
#ifndef V8_TORQUE_IMPLEMENTATION_VISITOR_H_
#define V8_TORQUE_IMPLEMENTATION_VISITOR_H_
+#include <memory>
#include <string>
#include "src/base/macros.h"
@@ -260,7 +261,7 @@ class BlockBindings {
void Add(std::string name, T value, bool mark_as_used = false) {
ReportErrorIfAlreadyBound(name);
auto binding =
- base::make_unique<Binding<T>>(manager_, name, std::move(value));
+ std::make_unique<Binding<T>>(manager_, name, std::move(value));
if (mark_as_used) binding->SetUsed();
bindings_.push_back(std::move(binding));
}
@@ -268,7 +269,7 @@ class BlockBindings {
void Add(const Identifier* name, T value, bool mark_as_used = false) {
ReportErrorIfAlreadyBound(name->value);
auto binding =
- base::make_unique<Binding<T>>(manager_, name, std::move(value));
+ std::make_unique<Binding<T>>(manager_, name, std::move(value));
if (mark_as_used) binding->SetUsed();
bindings_.push_back(std::move(binding));
}
@@ -342,7 +343,8 @@ bool IsCompatibleSignature(const Signature& sig, const TypeVector& types,
class ImplementationVisitor {
public:
- void GenerateBuiltinDefinitions(const std::string& output_directory);
+ void GenerateBuiltinDefinitionsAndInterfaceDescriptors(
+ const std::string& output_directory);
void GenerateClassFieldOffsets(const std::string& output_directory);
void GeneratePrintDefinitions(const std::string& output_directory);
void GenerateClassDefinitions(const std::string& output_directory);
diff --git a/deps/v8/src/torque/instance-type-generator.cc b/deps/v8/src/torque/instance-type-generator.cc
new file mode 100644
index 0000000000..275e706485
--- /dev/null
+++ b/deps/v8/src/torque/instance-type-generator.cc
@@ -0,0 +1,376 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/torque/implementation-visitor.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+
+namespace {
+
+// Contains all necessary state for a single class type during the process of
+// assigning instance types, and provides a convenient way to access the list of
+// types that inherit from this one.
+struct InstanceTypeTree {
+ explicit InstanceTypeTree(const ClassType* type)
+ : type(type),
+ parent(nullptr),
+ start(INT_MAX),
+ end(INT_MIN),
+ value(-1),
+ num_values(0),
+ num_own_values(0) {}
+ const ClassType* type;
+ InstanceTypeTree* parent;
+ std::vector<std::unique_ptr<InstanceTypeTree>> children;
+ int start; // Start of range for this and subclasses, or INT_MAX.
+ int end; // End of range for this and subclasses, or INT_MIN.
+ int value; // Assigned value for this class itself, or -1 when unassigned.
+ int num_values; // Number of values assigned for this and subclasses.
+ int num_own_values; // How many values this needs (not including subclasses).
+};
+
+// Assembles all class types into a tree, but doesn't yet attempt to assign
+// instance types for them.
+std::unique_ptr<InstanceTypeTree> BuildInstanceTypeTree() {
+ // First, build InstanceTypeTree instances for every class but don't try to
+ // attach them to their subclasses yet.
+ std::unordered_map<const ClassType*, InstanceTypeTree*> map_by_type;
+ std::vector<std::unique_ptr<InstanceTypeTree>> unparented_types;
+ for (auto& p : GlobalContext::AllDeclarables()) {
+ if (const TypeAlias* alias = TypeAlias::DynamicCast(p.get())) {
+ const Type* type = alias->type();
+ const ClassType* class_type = ClassType::DynamicCast(type);
+ if (class_type == nullptr) {
+ continue;
+ }
+ auto& map_slot = map_by_type[class_type];
+ if (map_slot != nullptr) {
+ continue; // We already encountered this type.
+ }
+ std::unique_ptr<InstanceTypeTree> type_tree =
+ std::make_unique<InstanceTypeTree>(class_type);
+ map_slot = type_tree.get();
+ unparented_types.push_back(std::move(type_tree));
+ }
+ }
+
+ // Second, assemble them all into a tree following the inheritance hierarchy.
+ std::unique_ptr<InstanceTypeTree> root;
+ for (auto& type_tree : unparented_types) {
+ const ClassType* parent = type_tree->type->GetSuperClass();
+ if (parent == nullptr) {
+ if (root != nullptr)
+ Error("Expected only one root class type. Found: ", root->type->name(),
+ " and ", type_tree->type->name())
+ .Position(type_tree->type->GetPosition());
+ root = std::move(type_tree);
+ } else {
+ map_by_type[parent]->children.push_back(std::move(type_tree));
+ }
+ }
+ return root;
+}
+
+// Propagates constraints about instance types from children to their parents.
+void PropagateInstanceTypeConstraints(InstanceTypeTree* root) {
+ for (auto& child : root->children) {
+ PropagateInstanceTypeConstraints(child.get());
+ if (child->start < root->start) root->start = child->start;
+ if (child->end > root->end) root->end = child->end;
+ root->num_values += child->num_values;
+ }
+ const InstanceTypeConstraints& constraints =
+ root->type->GetInstanceTypeConstraints();
+ if ((!root->type->IsAbstract() ||
+ root->type->IsInstantiatedAbstractClass()) &&
+ !root->type->HasSameInstanceTypeAsParent()) {
+ root->num_own_values = 1;
+ }
+ root->num_values += root->num_own_values;
+ if (constraints.num_flags_bits != -1) {
+ // Children won't get any types assigned; must be done manually in C++.
+ root->children.clear();
+ root->num_values = 1 << constraints.num_flags_bits;
+ root->num_own_values = root->num_values;
+ root->start = 0;
+ root->end = root->num_values - 1;
+ }
+ if (constraints.value != -1) {
+ if (root->num_own_values != 1) {
+ Error("Instance type value requested for abstract class ",
+ root->type->name())
+ .Position(root->type->GetPosition());
+ }
+ root->value = constraints.value;
+ if (constraints.value < root->start) root->start = constraints.value;
+ if (constraints.value > root->end) root->end = constraints.value;
+ }
+}
+
+// Assigns values for the type itself, not including any children. Returns the
+// next available value.
+int SelectOwnValues(InstanceTypeTree* root, int start_value) {
+ if (root->value == -1) {
+ root->value = start_value;
+ } else if (root->value < start_value) {
+ Error("Failed to assign instance type ", root->value, " to ",
+ root->type->name())
+ .Position(root->type->GetPosition());
+ }
+ return root->value + root->num_own_values;
+}
+
+// Sorting function for types that don't have specific values they must include.
+// Prioritizes bigger type ranges (those with more subtypes) first, and
+// then sorts alphabetically within each size category.
+struct CompareUnconstrainedTypes {
+ constexpr bool operator()(const InstanceTypeTree* a,
+ const InstanceTypeTree* b) const {
+ return (a->num_values > b->num_values)
+ ? true
+ : (a->num_values < b->num_values)
+ ? false
+ : std::less<std::string>()(a->type->name(),
+ b->type->name());
+ }
+};
+
+// Assigns concrete values for every instance type range, and sorts the children
+// at each layer of the tree into increasing order. Appends the newly-assigned
+// tree to the destination vector. Returns the first unassigned value after
+// those that have been used.
+int SolveInstanceTypeConstraints(
+ std::unique_ptr<InstanceTypeTree> root, int start_value,
+ std::vector<std::unique_ptr<InstanceTypeTree>>* destination) {
+ if (root->start < start_value) {
+ Error("Failed to assign instance type ", root->start, " to ",
+ root->type->name())
+ .Position(root->type->GetPosition());
+ }
+
+ // First, separate the children into four groups:
+ // - The one child that must go first, if it exists;
+ // - Children with specific value requirements ("constrained");
+ // - Children without specific value requirements ("unconstrained");
+ // - The one child that must go last, if it exists.
+ std::unique_ptr<InstanceTypeTree> lowest_child;
+ std::unique_ptr<InstanceTypeTree> highest_child;
+ std::multimap<int, std::unique_ptr<InstanceTypeTree>>
+ constrained_children_by_start;
+ // Using std::map because you can't std::move out of a std::set until C++17.
+ std::map<InstanceTypeTree*, std::unique_ptr<InstanceTypeTree>,
+ CompareUnconstrainedTypes>
+ unconstrained_children_by_size;
+ for (auto& child : root->children) {
+ if (child->type->IsHighestInstanceTypeWithinParent()) {
+ if (highest_child) {
+ Error("Two classes requested to be the highest instance type: ",
+ highest_child->type->name(), " and ", child->type->name(),
+ " within range for parent class ", root->type->name())
+ .Position(child->type->GetPosition());
+ }
+ if (child->type->IsLowestInstanceTypeWithinParent()) {
+ Error(
+ "Class requested to be both highest and lowest instance type "
+ "within its parent range: ",
+ child->type->name())
+ .Position(child->type->GetPosition());
+ }
+ highest_child = std::move(child);
+ } else if (child->type->IsLowestInstanceTypeWithinParent()) {
+ if (lowest_child) {
+ Error("Two classes requested to be the lowest instance type: ",
+ lowest_child->type->name(), " and ", child->type->name(),
+ " within range for parent class ", root->type->name())
+ .Position(child->type->GetPosition());
+ }
+ lowest_child = std::move(child);
+ } else if (child->start > child->end) {
+ unconstrained_children_by_size.insert(
+ std::make_pair(child.get(), std::move(child)));
+ } else {
+ constrained_children_by_start.insert(
+ std::make_pair(child->start, std::move(child)));
+ }
+ }
+ root->children.clear();
+
+ bool own_type_pending = root->num_own_values > 0;
+
+ // Second, iterate and place the children in ascending order.
+ if (lowest_child != nullptr) {
+ start_value = SolveInstanceTypeConstraints(std::move(lowest_child),
+ start_value, &root->children);
+ }
+ for (auto& constrained_child_pair : constrained_children_by_start) {
+ // Select the next constrained child type in ascending order.
+ std::unique_ptr<InstanceTypeTree> constrained_child =
+ std::move(constrained_child_pair.second);
+
+ // Try to place the root type before the constrained child type if it fits.
+ if (own_type_pending) {
+ if ((root->value != -1 && root->value < constrained_child->start) ||
+ (root->value == -1 &&
+ start_value + root->num_own_values <= constrained_child->start)) {
+ start_value = SelectOwnValues(root.get(), start_value);
+ own_type_pending = false;
+ }
+ }
+
+ // Try to find any unconstrained children that fit before the constrained
+ // one. This simple greedy algorithm just puts the biggest unconstrained
+ // children in first, which might not fill the space as efficiently as
+ // possible but is good enough for our needs.
+ for (auto it = unconstrained_children_by_size.begin();
+ it != unconstrained_children_by_size.end();) {
+ if (it->second->num_values + start_value <= constrained_child->start) {
+ start_value = SolveInstanceTypeConstraints(
+ std::move(it->second), start_value, &root->children);
+ it = unconstrained_children_by_size.erase(it);
+ } else {
+ ++it;
+ }
+ }
+
+ // Place the constrained child type.
+ start_value = SolveInstanceTypeConstraints(std::move(constrained_child),
+ start_value, &root->children);
+ }
+ if (own_type_pending) {
+ start_value = SelectOwnValues(root.get(), start_value);
+ own_type_pending = false;
+ }
+ for (auto& child_pair : unconstrained_children_by_size) {
+ start_value = SolveInstanceTypeConstraints(std::move(child_pair.second),
+ start_value, &root->children);
+ }
+ if (highest_child != nullptr) {
+ start_value = SolveInstanceTypeConstraints(std::move(highest_child),
+ start_value, &root->children);
+ }
+
+ // Finally, set the range for this class to include all placed subclasses.
+ root->end = start_value - 1;
+ root->start =
+ root->children.empty() ? start_value : root->children.front()->start;
+ if (root->value != -1 && root->value < root->start) {
+ root->start = root->value;
+ }
+ root->num_values = root->end - root->start + 1;
+
+ if (root->num_values > 0) {
+ destination->push_back(std::move(root));
+ }
+ return start_value;
+}
+
+std::unique_ptr<InstanceTypeTree> SolveInstanceTypeConstraints(
+ std::unique_ptr<InstanceTypeTree> root) {
+ std::vector<std::unique_ptr<InstanceTypeTree>> destination;
+ SolveInstanceTypeConstraints(std::move(root), 0, &destination);
+ return destination.empty() ? nullptr : std::move(destination.front());
+}
+
+std::unique_ptr<InstanceTypeTree> AssignInstanceTypes() {
+ std::unique_ptr<InstanceTypeTree> root = BuildInstanceTypeTree();
+ if (root != nullptr) {
+ PropagateInstanceTypeConstraints(root.get());
+ root = SolveInstanceTypeConstraints(std::move(root));
+ }
+ return root;
+}
+
+// Prints items in macro lists for the given type and its descendants.
+// - definitions: This list is pairs of instance type name and assigned value,
+// such as V(ODDBALL_TYPE, 67). It includes FIRST_* and LAST_* items for each
+// type that has more than one associated InstanceType. Items within those
+// ranges are indented for readability.
+// - values: This list is just instance type names, like V(ODDBALL_TYPE). It
+// does not include any FIRST_* and LAST_* range markers.
+void PrintInstanceTypes(InstanceTypeTree* root, std::ostream& definitions,
+ std::ostream& values, const std::string& indent) {
+ std::string type_name =
+ CapifyStringWithUnderscores(root->type->name()) + "_TYPE";
+ std::string inner_indent = indent;
+
+ if (root->num_values > 1) {
+ definitions << indent << "V(FIRST_" << type_name << ", " << root->start
+ << ") \\\n";
+ inner_indent += " ";
+ }
+ if (root->num_own_values == 1) {
+ definitions << inner_indent << "V(" << type_name << ", " << root->value
+ << ") \\\n";
+ values << " V(" << type_name << ") \\\n";
+ }
+ for (auto& child : root->children) {
+ PrintInstanceTypes(child.get(), definitions, values, inner_indent);
+ }
+ // We can't emit LAST_STRING_TYPE because it's not a valid flags combination.
+ // So if the class type has multiple own values, which only happens when using
+ // ANNOTATION_RESERVE_BITS_IN_INSTANCE_TYPE, then omit the end marker.
+ if (root->num_values > 1 && root->num_own_values <= 1) {
+ definitions << indent << "V(LAST_" << type_name << ", " << root->end
+ << ") \\\n";
+ }
+}
+
+} // namespace
+
+void ImplementationVisitor::GenerateInstanceTypes(
+ const std::string& output_directory) {
+ std::stringstream header;
+ std::string file_name = "instance-types-tq.h";
+ {
+ IncludeGuardScope guard(header, file_name);
+
+ header << "// Instance types for all classes except for those that use "
+ "InstanceType as flags.\n";
+ header << "#define TORQUE_ASSIGNED_INSTANCE_TYPES(V) \\\n";
+ std::unique_ptr<InstanceTypeTree> instance_types = AssignInstanceTypes();
+ std::stringstream values_list;
+ if (instance_types != nullptr) {
+ PrintInstanceTypes(instance_types.get(), header, values_list, " ");
+ }
+ header << "\n\n";
+
+ header << "// Instance types for all classes except for those that use "
+ "InstanceType as flags.\n";
+ header << "#define TORQUE_ASSIGNED_INSTANCE_TYPE_LIST(V) \\\n";
+ header << values_list.str();
+ header << "\n\n";
+
+ header << "// Instance types for Torque-internal classes.\n";
+ header << "#define TORQUE_INTERNAL_INSTANCE_TYPES(V) \\\n";
+ for (const TypeAlias* alias : GlobalContext::GetClasses()) {
+ const ClassType* type = ClassType::DynamicCast(alias->type());
+ if (type->IsExtern()) continue;
+ std::string type_name =
+ CapifyStringWithUnderscores(type->name()) + "_TYPE";
+ header << " V(" << type_name << ") \\\n";
+ }
+ header << "\n\n";
+
+ header << "// Struct list entries for Torque-internal classes.\n";
+ header << "#define TORQUE_STRUCT_LIST_GENERATOR(V, _) \\\n";
+ for (const TypeAlias* alias : GlobalContext::GetClasses()) {
+ const ClassType* type = ClassType::DynamicCast(alias->type());
+ if (type->IsExtern()) continue;
+ std::string type_name =
+ CapifyStringWithUnderscores(type->name()) + "_TYPE";
+ std::string variable_name = SnakeifyString(type->name());
+ header << " V(_, " << type_name << ", " << type->name() << ", "
+ << variable_name << ") \\\n";
+ }
+ header << "\n";
+ }
+ std::string output_header_path = output_directory + "/" + file_name;
+ WriteFile(output_header_path, header.str());
+}
+
+} // namespace torque
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/torque/ls/json.h b/deps/v8/src/torque/ls/json.h
index 43d9f7ab11..1a033cf614 100644
--- a/deps/v8/src/torque/ls/json.h
+++ b/deps/v8/src/torque/ls/json.h
@@ -6,11 +6,11 @@
#define V8_TORQUE_LS_JSON_H_
#include <map>
+#include <memory>
#include <string>
#include <vector>
#include "src/base/logging.h"
-#include "src/base/template-utils.h"
namespace v8 {
namespace internal {
@@ -44,7 +44,7 @@ struct JsonValue {
static JsonValue From(JsonObject object) {
JsonValue result;
result.tag = JsonValue::OBJECT;
- result.object_ = base::make_unique<JsonObject>(std::move(object));
+ result.object_ = std::make_unique<JsonObject>(std::move(object));
return result;
}
@@ -65,7 +65,7 @@ struct JsonValue {
static JsonValue From(JsonArray array) {
JsonValue result;
result.tag = JsonValue::ARRAY;
- result.array_ = base::make_unique<JsonArray>(std::move(array));
+ result.array_ = std::make_unique<JsonArray>(std::move(array));
return result;
}
diff --git a/deps/v8/src/torque/ls/message-handler.cc b/deps/v8/src/torque/ls/message-handler.cc
index 6ec124b5a2..becc97c9dc 100644
--- a/deps/v8/src/torque/ls/message-handler.cc
+++ b/deps/v8/src/torque/ls/message-handler.cc
@@ -87,6 +87,8 @@ void ResetCompilationErrorDiagnostics(MessageWriter writer) {
class DiagnosticCollector {
public:
void AddTorqueMessage(const TorqueMessage& message) {
+ if (!ShouldAddMessageOfKind(message.kind)) return;
+
SourceId id =
message.position ? message.position->source : SourceId::Invalid();
auto& notification = GetOrCreateNotificationForSource(id);
@@ -120,6 +122,20 @@ class DiagnosticCollector {
return notification;
}
+ bool ShouldAddMessageOfKind(TorqueMessage::Kind kind) {
+ // An error can easily cause a lot of false positive lint messages, due to
+ // unused variables, macros, etc. Thus we suppress subsequent lint messages
+ // when there are errors.
+ switch (kind) {
+ case TorqueMessage::Kind::kError:
+ suppress_lint_messages_ = true;
+ return true;
+ case TorqueMessage::Kind::kLint:
+ if (suppress_lint_messages_) return false;
+ return true;
+ }
+ }
+
void PopulateRangeFromSourcePosition(Range range,
const SourcePosition& position) {
range.start().set_line(position.start.line);
@@ -138,6 +154,7 @@ class DiagnosticCollector {
}
std::map<SourceId, PublishDiagnosticsNotification> notifications_;
+ bool suppress_lint_messages_ = false;
};
void SendCompilationDiagnostics(const TorqueCompilerResult& result,
diff --git a/deps/v8/src/torque/server-data.h b/deps/v8/src/torque/server-data.h
index 04cd0b317f..b80d1b67f4 100644
--- a/deps/v8/src/torque/server-data.h
+++ b/deps/v8/src/torque/server-data.h
@@ -6,6 +6,7 @@
#define V8_TORQUE_SERVER_DATA_H_
#include <map>
+#include <memory>
#include <vector>
#include "src/base/macros.h"
@@ -47,12 +48,12 @@ class LanguageServerData : public ContextualClass<LanguageServerData> {
static void SetGlobalContext(GlobalContext global_context) {
Get().global_context_ =
- base::make_unique<GlobalContext>(std::move(global_context));
+ std::make_unique<GlobalContext>(std::move(global_context));
Get().PrepareAllDeclarableSymbols();
}
static void SetTypeOracle(TypeOracle type_oracle) {
- Get().type_oracle_ = base::make_unique<TypeOracle>(std::move(type_oracle));
+ Get().type_oracle_ = std::make_unique<TypeOracle>(std::move(type_oracle));
}
static const Symbols& SymbolsForSourceId(SourceId id) {
diff --git a/deps/v8/src/torque/torque-compiler.cc b/deps/v8/src/torque/torque-compiler.cc
index 3968b001fb..6d2b14fc18 100644
--- a/deps/v8/src/torque/torque-compiler.cc
+++ b/deps/v8/src/torque/torque-compiler.cc
@@ -79,7 +79,8 @@ void CompileCurrentAst(TorqueCompilerOptions options) {
ReportAllUnusedMacros();
- implementation_visitor.GenerateBuiltinDefinitions(output_directory);
+ implementation_visitor.GenerateBuiltinDefinitionsAndInterfaceDescriptors(
+ output_directory);
implementation_visitor.GenerateClassFieldOffsets(output_directory);
implementation_visitor.GeneratePrintDefinitions(output_directory);
implementation_visitor.GenerateClassDefinitions(output_directory);
diff --git a/deps/v8/src/torque/torque-parser.cc b/deps/v8/src/torque/torque-parser.cc
index d9973dde3c..3639bef97c 100644
--- a/deps/v8/src/torque/torque-parser.cc
+++ b/deps/v8/src/torque/torque-parser.cc
@@ -214,6 +214,10 @@ template <>
V8_EXPORT_PRIVATE const ParseResultTypeId
ParseResultHolder<std::vector<Identifier*>>::id =
ParseResultTypeId::kStdVectorOfIdentifierPtr;
+template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId
+ ParseResultHolder<base::Optional<ClassBody*>>::id =
+ ParseResultTypeId::kOptionalClassBody;
namespace {
@@ -675,7 +679,9 @@ class AnnotationSet {
Lint("Annotation ", a.name->value, error_message)
.Position(a.name->pos);
}
- map_[a.name->value].push_back(*a.param);
+ if (!map_.insert({a.name->value, {*a.param, a.name->pos}}).second) {
+ Lint("Duplicate annotation ", a.name->value).Position(a.name->pos);
+ }
} else {
if (allowed_without_param.find(a.name->value) ==
allowed_without_param.end()) {
@@ -693,41 +699,104 @@ class AnnotationSet {
}
}
- bool Contains(const std::string& s) { return set_.find(s) != set_.end(); }
- const std::vector<std::string>& GetParams(const std::string& s) {
- return map_[s];
+ bool Contains(const std::string& s) const {
+ return set_.find(s) != set_.end();
+ }
+ base::Optional<std::pair<std::string, SourcePosition>> GetParam(
+ const std::string& s) const {
+ auto it = map_.find(s);
+ return it == map_.end()
+ ? base::Optional<std::pair<std::string, SourcePosition>>()
+ : it->second;
}
private:
std::set<std::string> set_;
- std::map<std::string, std::vector<std::string>> map_;
+ std::map<std::string, std::pair<std::string, SourcePosition>> map_;
};
+int GetAnnotationValue(const AnnotationSet& annotations, const char* name,
+ int default_value) {
+ auto value_and_pos = annotations.GetParam(name);
+ if (!value_and_pos.has_value()) return default_value;
+ const std::string& value = value_and_pos->first;
+ SourcePosition pos = value_and_pos->second;
+ if (value.empty()) {
+ Error("Annotation ", name, " requires an integer parameter").Position(pos);
+ }
+ size_t num_chars_converted = 0;
+ int result = default_value;
+ try {
+ result = std::stoi(value, &num_chars_converted, 0);
+ } catch (const std::invalid_argument&) {
+ Error("Expected an integer for annotation ", name).Position(pos);
+ return result;
+ } catch (const std::out_of_range&) {
+ Error("Integer out of 32-bit range in annotation ", name).Position(pos);
+ return result;
+ }
+ if (num_chars_converted != value.size()) {
+ Error("Parameter for annotation ", name,
+ " must be an integer with no trailing characters")
+ .Position(pos);
+ }
+ return result;
+}
+
+InstanceTypeConstraints MakeInstanceTypeConstraints(
+ const AnnotationSet& annotations) {
+ InstanceTypeConstraints result;
+ result.value =
+ GetAnnotationValue(annotations, ANNOTATION_INSTANCE_TYPE_VALUE, -1);
+ result.num_flags_bits = GetAnnotationValue(
+ annotations, ANNOTATION_RESERVE_BITS_IN_INSTANCE_TYPE, -1);
+ return result;
+}
+
+base::Optional<ParseResult> MakeClassBody(ParseResultIterator* child_results) {
+ auto methods = child_results->NextAs<std::vector<Declaration*>>();
+ auto fields = child_results->NextAs<std::vector<ClassFieldExpression>>();
+ base::Optional<ClassBody*> result =
+ MakeNode<ClassBody>(std::move(methods), std::move(fields));
+ return ParseResult(result);
+}
+
base::Optional<ParseResult> MakeClassDeclaration(
ParseResultIterator* child_results) {
AnnotationSet annotations(
child_results,
- {"@generatePrint", "@noVerifier", "@abstract",
- "@dirtyInstantiatedAbstractClass", "@hasSameInstanceTypeAsParent",
- "@generateCppClass"},
- {});
+ {ANNOTATION_GENERATE_PRINT, ANNOTATION_NO_VERIFIER, ANNOTATION_ABSTRACT,
+ ANNOTATION_INSTANTIATED_ABSTRACT_CLASS,
+ ANNOTATION_HAS_SAME_INSTANCE_TYPE_AS_PARENT,
+ ANNOTATION_GENERATE_CPP_CLASS,
+ ANNOTATION_HIGHEST_INSTANCE_TYPE_WITHIN_PARENT,
+ ANNOTATION_LOWEST_INSTANCE_TYPE_WITHIN_PARENT},
+ {ANNOTATION_RESERVE_BITS_IN_INSTANCE_TYPE,
+ ANNOTATION_INSTANCE_TYPE_VALUE});
ClassFlags flags = ClassFlag::kNone;
- bool generate_print = annotations.Contains("@generatePrint");
+ bool generate_print = annotations.Contains(ANNOTATION_GENERATE_PRINT);
if (generate_print) flags |= ClassFlag::kGeneratePrint;
- bool generate_verify = !annotations.Contains("@noVerifier");
+ bool generate_verify = !annotations.Contains(ANNOTATION_NO_VERIFIER);
if (generate_verify) flags |= ClassFlag::kGenerateVerify;
- if (annotations.Contains("@abstract")) {
+ if (annotations.Contains(ANNOTATION_ABSTRACT)) {
flags |= ClassFlag::kAbstract;
}
- if (annotations.Contains("@dirtyInstantiatedAbstractClass")) {
+ if (annotations.Contains(ANNOTATION_INSTANTIATED_ABSTRACT_CLASS)) {
flags |= ClassFlag::kInstantiatedAbstractClass;
}
- if (annotations.Contains("@hasSameInstanceTypeAsParent")) {
+ if (annotations.Contains(ANNOTATION_HAS_SAME_INSTANCE_TYPE_AS_PARENT)) {
flags |= ClassFlag::kHasSameInstanceTypeAsParent;
}
- if (annotations.Contains("@generateCppClass")) {
+ if (annotations.Contains(ANNOTATION_GENERATE_CPP_CLASS)) {
flags |= ClassFlag::kGenerateCppClassDefinitions;
}
+ if (annotations.Contains(ANNOTATION_HIGHEST_INSTANCE_TYPE_WITHIN_PARENT)) {
+ flags |= ClassFlag::kHighestInstanceTypeWithinParent;
+ }
+ if (annotations.Contains(ANNOTATION_LOWEST_INSTANCE_TYPE_WITHIN_PARENT)) {
+ flags |= ClassFlag::kLowestInstanceTypeWithinParent;
+ }
+
auto is_extern = child_results->NextAs<bool>();
if (is_extern) flags |= ClassFlag::kExtern;
auto transient = child_results->NextAs<bool>();
@@ -741,8 +810,15 @@ base::Optional<ParseResult> MakeClassDeclaration(
ReportError("Expected type name in extends clause.");
}
auto generates = child_results->NextAs<base::Optional<std::string>>();
- auto methods = child_results->NextAs<std::vector<Declaration*>>();
- auto fields_raw = child_results->NextAs<std::vector<ClassFieldExpression>>();
+ auto body = child_results->NextAs<base::Optional<ClassBody*>>();
+ std::vector<Declaration*> methods;
+ std::vector<ClassFieldExpression> fields_raw;
+ if (body.has_value()) {
+ methods = (*body)->methods;
+ fields_raw = (*body)->fields;
+ } else {
+ flags |= ClassFlag::kUndefinedLayout;
+ }
// Filter to only include fields that should be present based on decoration.
std::vector<ClassFieldExpression> fields;
@@ -751,8 +827,9 @@ base::Optional<ParseResult> MakeClassDeclaration(
[](const ClassFieldExpression& exp) {
for (const ConditionalAnnotation& condition : exp.conditions) {
if (condition.type == ConditionalAnnotationType::kPositive
- ? !BuildFlags::GetFlag(condition.condition, "@if")
- : BuildFlags::GetFlag(condition.condition, "@ifnot")) {
+ ? !BuildFlags::GetFlag(condition.condition, ANNOTATION_IF)
+ : BuildFlags::GetFlag(condition.condition,
+ ANNOTATION_IFNOT)) {
return false;
}
}
@@ -761,7 +838,7 @@ base::Optional<ParseResult> MakeClassDeclaration(
Declaration* result = MakeNode<ClassDeclaration>(
name, flags, std::move(extends), std::move(generates), std::move(methods),
- fields);
+ fields, MakeInstanceTypeConstraints(annotations));
return ParseResult{result};
}
@@ -1358,14 +1435,21 @@ base::Optional<ParseResult> MakeAnnotation(ParseResultIterator* child_results) {
}
base::Optional<ParseResult> MakeClassField(ParseResultIterator* child_results) {
- AnnotationSet annotations(child_results, {"@noVerifier"}, {"@if", "@ifnot"});
- bool generate_verify = !annotations.Contains("@noVerifier");
+ AnnotationSet annotations(child_results, {ANNOTATION_NO_VERIFIER},
+ {ANNOTATION_IF, ANNOTATION_IFNOT});
+ bool generate_verify = !annotations.Contains(ANNOTATION_NO_VERIFIER);
std::vector<ConditionalAnnotation> conditions;
- for (const std::string& condition : annotations.GetParams("@if")) {
- conditions.push_back({condition, ConditionalAnnotationType::kPositive});
+ base::Optional<std::pair<std::string, SourcePosition>> if_condition =
+ annotations.GetParam(ANNOTATION_IF);
+ base::Optional<std::pair<std::string, SourcePosition>> ifnot_condition =
+ annotations.GetParam(ANNOTATION_IFNOT);
+ if (if_condition.has_value()) {
+ conditions.push_back(
+ {if_condition->first, ConditionalAnnotationType::kPositive});
}
- for (const std::string& condition : annotations.GetParams("@ifnot")) {
- conditions.push_back({condition, ConditionalAnnotationType::kNegative});
+ if (ifnot_condition.has_value()) {
+ conditions.push_back(
+ {ifnot_condition->first, ConditionalAnnotationType::kNegative});
}
auto weak = child_results->NextAs<bool>();
auto const_qualified = child_results->NextAs<bool>();
@@ -1892,6 +1976,13 @@ struct TorqueGrammar : Grammar {
&block},
MakeMethodDeclaration)};
+ // Result: base::Optional<ClassBody*>
+ Symbol optionalClassBody = {
+ Rule({Token("{"), List<Declaration*>(&method),
+ List<ClassFieldExpression>(&classField), Token("}")},
+ MakeClassBody),
+ Rule({Token(";")}, YieldDefaultValue<base::Optional<ClassBody*>>)};
+
// Result: std::vector<Declaration*>
Symbol declaration = {
Rule({Token("const"), &name, Token(":"), &type, Token("="), expression,
@@ -1905,8 +1996,7 @@ struct TorqueGrammar : Grammar {
Optional<TypeExpression*>(Sequence({Token("extends"), &type})),
Optional<std::string>(
Sequence({Token("generates"), &externalString})),
- Token("{"), List<Declaration*>(&method),
- List<ClassFieldExpression>(&classField), Token("}")},
+ &optionalClassBody},
AsSingletonVector<Declaration*, MakeClassDeclaration>()),
Rule({Token("struct"), &name,
TryOrDefault<GenericParameters>(&genericParameters), Token("{"),
diff --git a/deps/v8/src/torque/type-oracle.h b/deps/v8/src/torque/type-oracle.h
index 643c78c030..03aad34d2a 100644
--- a/deps/v8/src/torque/type-oracle.h
+++ b/deps/v8/src/torque/type-oracle.h
@@ -5,6 +5,8 @@
#ifndef V8_TORQUE_TYPE_ORACLE_H_
#define V8_TORQUE_TYPE_ORACLE_H_
+#include <memory>
+
#include "src/torque/contextual.h"
#include "src/torque/declarable.h"
#include "src/torque/declarations.h"
diff --git a/deps/v8/src/torque/type-visitor.cc b/deps/v8/src/torque/type-visitor.cc
index 9b5c96ee40..b45452058d 100644
--- a/deps/v8/src/torque/type-visitor.cc
+++ b/deps/v8/src/torque/type-visitor.cc
@@ -165,6 +165,12 @@ const ClassType* TypeVisitor::ComputeType(ClassDeclaration* decl) {
"class \"", decl->name->value,
"\" must extend either Tagged or an already declared class");
}
+ if (super_class->HasUndefinedLayout() &&
+ !(decl->flags & ClassFlag::kUndefinedLayout)) {
+ Error("Class \"", decl->name->value,
+ "\" defines its layout but extends a class which does not")
+ .Position(decl->pos);
+ }
}
std::string generates = decl->name->value;
@@ -282,8 +288,9 @@ void TypeVisitor::VisitClassFieldsAndMethods(
"only one indexable field is currently supported per class");
}
seen_indexed_field = true;
- const Field* index_field =
- &(class_type->LookupFieldInternal(*field_expression.index));
+ const NameAndType& index_field =
+ class_type->LookupFieldInternal(*field_expression.index)
+ .name_and_type;
class_type->RegisterField(
{field_expression.name_and_type.name->pos,
class_type,
diff --git a/deps/v8/src/torque/types.cc b/deps/v8/src/torque/types.cc
index fe792401f6..022649e065 100644
--- a/deps/v8/src/torque/types.cc
+++ b/deps/v8/src/torque/types.cc
@@ -50,7 +50,9 @@ bool Type::IsSubtypeOf(const Type* supertype) const {
base::Optional<const ClassType*> Type::ClassSupertype() const {
for (const Type* t = this; t != nullptr; t = t->parent()) {
- if (auto* class_type = ClassType::DynamicCast(t)) return class_type;
+ if (auto* class_type = ClassType::DynamicCast(t)) {
+ return class_type;
+ }
}
return base::nullopt;
}
@@ -86,7 +88,7 @@ bool Type::IsAbstractName(const std::string& name) const {
std::string Type::GetGeneratedTypeName() const {
std::string result = GetGeneratedTypeNameImpl();
- if (result.empty() || result == "compiler::TNode<>") {
+ if (result.empty() || result == "TNode<>") {
ReportError("Generated type is required for type '", ToString(),
"'. Use 'generates' clause in definition.");
}
@@ -382,7 +384,7 @@ std::string ClassType::GetGeneratedTNodeTypeNameImpl() const {
std::string ClassType::GetGeneratedTypeNameImpl() const {
return IsConstexpr() ? GetGeneratedTNodeTypeName()
- : "compiler::TNode<" + GetGeneratedTNodeTypeName() + ">";
+ : "TNode<" + GetGeneratedTNodeTypeName() + ">";
}
std::string ClassType::ToExplicitString() const {
@@ -404,11 +406,11 @@ void ClassType::Finalize() const {
if (const ClassType* super_class = ClassType::DynamicCast(parent())) {
if (super_class->HasIndexedField()) flags_ |= ClassFlag::kHasIndexedField;
if (!super_class->IsAbstract() && !HasSameInstanceTypeAsParent()) {
- Error(
- "Super class must either be abstract (annotate super class with "
- "@abstract) "
- "or this class must have the same instance type as the super class "
- "(annotate this class with @hasSameInstanceTypeAsParent).")
+ Error("Super class must either be abstract (annotate super class with ",
+ ANNOTATION_ABSTRACT,
+ ") or this class must have the same instance type as the super "
+ "class (annotate this class with ",
+ ANNOTATION_HAS_SAME_INSTANCE_TYPE_AS_PARENT, ").")
.Position(this->decl_->name->pos);
}
}
diff --git a/deps/v8/src/torque/types.h b/deps/v8/src/torque/types.h
index d2198d50c3..0102cf31d5 100644
--- a/deps/v8/src/torque/types.h
+++ b/deps/v8/src/torque/types.h
@@ -156,7 +156,7 @@ struct Field {
SourcePosition pos;
const AggregateType* aggregate;
- base::Optional<const Field*> index;
+ base::Optional<NameAndType> index;
NameAndType name_and_type;
size_t offset;
bool is_weak;
@@ -204,8 +204,7 @@ class AbstractType final : public Type {
return "AT" + str;
}
std::string GetGeneratedTypeNameImpl() const override {
- return IsConstexpr() ? generated_type_
- : "compiler::TNode<" + generated_type_ + ">";
+ return IsConstexpr() ? generated_type_ : "TNode<" + generated_type_ + ">";
}
std::string GetGeneratedTNodeTypeNameImpl() const override;
bool IsConstexpr() const override {
@@ -316,7 +315,7 @@ class V8_EXPORT_PRIVATE UnionType final : public Type {
std::string ToExplicitString() const override;
std::string MangledName() const override;
std::string GetGeneratedTypeNameImpl() const override {
- return "compiler::TNode<" + GetGeneratedTNodeTypeName() + ">";
+ return "TNode<" + GetGeneratedTNodeTypeName() + ">";
}
std::string GetGeneratedTNodeTypeNameImpl() const override;
@@ -514,10 +513,12 @@ class ClassType final : public AggregateType {
std::string GetGeneratedTNodeTypeNameImpl() const override;
bool IsExtern() const { return flags_ & ClassFlag::kExtern; }
bool ShouldGeneratePrint() const {
- return flags_ & ClassFlag::kGeneratePrint || !IsExtern();
+ return (flags_ & ClassFlag::kGeneratePrint || !IsExtern()) &&
+ !HasUndefinedLayout();
}
bool ShouldGenerateVerify() const {
- return flags_ & ClassFlag::kGenerateVerify || !IsExtern();
+ return (flags_ & ClassFlag::kGenerateVerify || !IsExtern()) &&
+ !HasUndefinedLayout();
}
bool IsTransient() const override { return flags_ & ClassFlag::kTransient; }
bool IsAbstract() const { return flags_ & ClassFlag::kAbstract; }
@@ -549,6 +550,20 @@ class ClassType final : public AggregateType {
std::vector<Field> ComputeAllFields() const;
+ const InstanceTypeConstraints& GetInstanceTypeConstraints() const {
+ return decl_->instance_type_constraints;
+ }
+ bool IsHighestInstanceTypeWithinParent() const {
+ return flags_ & ClassFlag::kHighestInstanceTypeWithinParent;
+ }
+ bool IsLowestInstanceTypeWithinParent() const {
+ return flags_ & ClassFlag::kLowestInstanceTypeWithinParent;
+ }
+ bool HasUndefinedLayout() const {
+ return flags_ & ClassFlag::kUndefinedLayout;
+ }
+ SourcePosition GetPosition() const { return decl_->pos; }
+
private:
friend class TypeOracle;
friend class TypeVisitor;
diff --git a/deps/v8/src/torque/utils.cc b/deps/v8/src/torque/utils.cc
index 38862b31b0..4e757ac9e8 100644
--- a/deps/v8/src/torque/utils.cc
+++ b/deps/v8/src/torque/utils.cc
@@ -212,19 +212,25 @@ bool IsValidTypeName(const std::string& s) {
}
std::string CapifyStringWithUnderscores(const std::string& camellified_string) {
+ // Special case: JSAbc yields JS_ABC, not JSABC, for any Abc.
+ size_t js_position = camellified_string.find("JS");
+
std::string result;
- bool previousWasLower = false;
- for (auto current : camellified_string) {
- if (previousWasLower && isupper(current)) {
+ bool previousWasLowerOrDigit = false;
+ for (size_t index = 0; index < camellified_string.size(); ++index) {
+ char current = camellified_string[index];
+ if ((previousWasLowerOrDigit && isupper(current)) ||
+ (js_position != std::string::npos &&
+ index == js_position + strlen("JS"))) {
result += "_";
}
if (current == '.' || current == '-') {
result += "_";
- previousWasLower = false;
+ previousWasLowerOrDigit = false;
continue;
}
result += toupper(current);
- previousWasLower = (islower(current));
+ previousWasLowerOrDigit = islower(current) || isdigit(current);
}
return result;
}
diff --git a/deps/v8/src/utils/allocation.cc b/deps/v8/src/utils/allocation.cc
index f44b3c42ea..c89f83ba85 100644
--- a/deps/v8/src/utils/allocation.cc
+++ b/deps/v8/src/utils/allocation.cc
@@ -10,6 +10,7 @@
#include "src/base/logging.h"
#include "src/base/page-allocator.h"
#include "src/base/platform/platform.h"
+#include "src/flags/flags.h"
#include "src/init/v8.h"
#include "src/sanitizer/lsan-page-allocator.h"
#include "src/utils/memcopy.h"
@@ -166,6 +167,9 @@ void* AllocatePages(v8::PageAllocator* page_allocator, void* hint, size_t size,
DCHECK_NOT_NULL(page_allocator);
DCHECK_EQ(hint, AlignedAddress(hint, alignment));
DCHECK(IsAligned(size, page_allocator->AllocatePageSize()));
+ if (FLAG_randomize_all_allocations) {
+ hint = page_allocator->GetRandomMmapAddr();
+ }
void* result = nullptr;
for (int i = 0; i < kAllocationTries; ++i) {
result = page_allocator->AllocatePages(hint, size, alignment, access);
diff --git a/deps/v8/src/utils/memcopy.cc b/deps/v8/src/utils/memcopy.cc
index 1cac2189d0..c67d1d359a 100644
--- a/deps/v8/src/utils/memcopy.cc
+++ b/deps/v8/src/utils/memcopy.cc
@@ -25,18 +25,8 @@ V8_EXPORT_PRIVATE void MemMove(void* dest, const void* src, size_t size) {
(*memmove_function)(dest, src, size);
}
#elif V8_OS_POSIX && V8_HOST_ARCH_ARM
-void MemCopyUint16Uint8Wrapper(uint16_t* dest, const uint8_t* src,
- size_t chars) {
- uint16_t* limit = dest + chars;
- while (dest < limit) {
- *dest++ = static_cast<uint16_t>(*src++);
- }
-}
-
V8_EXPORT_PRIVATE MemCopyUint8Function memcopy_uint8_function =
&MemCopyUint8Wrapper;
-MemCopyUint16Uint8Function memcopy_uint16_uint8_function =
- &MemCopyUint16Uint8Wrapper;
#elif V8_OS_POSIX && V8_HOST_ARCH_MIPS
V8_EXPORT_PRIVATE MemCopyUint8Function memcopy_uint8_function =
&MemCopyUint8Wrapper;
@@ -54,9 +44,6 @@ void init_memcopy_functions() {
EmbeddedData d = EmbeddedData::FromBlob();
memcopy_uint8_function = reinterpret_cast<MemCopyUint8Function>(
d.InstructionStartOfBuiltin(Builtins::kMemCopyUint8Uint8));
- memcopy_uint16_uint8_function =
- reinterpret_cast<MemCopyUint16Uint8Function>(
- d.InstructionStartOfBuiltin(Builtins::kMemCopyUint16Uint8));
}
#elif V8_OS_POSIX && V8_HOST_ARCH_MIPS
if (Isolate::CurrentEmbeddedBlobIsBinaryEmbedded()) {
diff --git a/deps/v8/src/utils/memcopy.h b/deps/v8/src/utils/memcopy.h
index c1a0afbcb4..7e1b8539df 100644
--- a/deps/v8/src/utils/memcopy.h
+++ b/deps/v8/src/utils/memcopy.h
@@ -8,6 +8,7 @@
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
+#include <algorithm>
#include "src/base/logging.h"
#include "src/base/macros.h"
@@ -55,17 +56,8 @@ V8_EXPORT_PRIVATE V8_INLINE void MemMove(void* dest, const void* src,
memmove(dest, src, size);
}
-using MemCopyUint16Uint8Function = void (*)(uint16_t* dest, const uint8_t* src,
- size_t size);
-extern MemCopyUint16Uint8Function memcopy_uint16_uint8_function;
-void MemCopyUint16Uint8Wrapper(uint16_t* dest, const uint8_t* src,
- size_t chars);
// For values < 12, the assembler function is slower than the inlined C code.
const int kMinComplexConvertMemCopy = 12;
-V8_INLINE void MemCopyUint16Uint8(uint16_t* dest, const uint8_t* src,
- size_t size) {
- (*memcopy_uint16_uint8_function)(dest, src, size);
-}
#elif defined(V8_HOST_ARCH_MIPS)
using MemCopyUint8Function = void (*)(uint8_t* dest, const uint8_t* src,
size_t size);
@@ -109,6 +101,7 @@ inline void CopyImpl(T* dst_ptr, const T* src_ptr, size_t count) {
DCHECK(((src <= dst) && ((src + count * kTWordSize) <= dst)) ||
((dst <= src) && ((dst + count * kTWordSize) <= src)));
#endif
+ if (count == 0) return;
// Use block copying MemCopy if the segment we're copying is
// enough to justify the extra call/setup overhead.
@@ -204,308 +197,32 @@ inline void MemsetPointer(T** dest, U* value, size_t counter) {
reinterpret_cast<Address>(value), counter);
}
-template <typename sourcechar, typename sinkchar>
-V8_INLINE static void CopyCharsUnsigned(sinkchar* dest, const sourcechar* src,
- size_t chars);
-#if defined(V8_HOST_ARCH_ARM)
-V8_INLINE void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src,
- size_t chars);
-V8_INLINE void CopyCharsUnsigned(uint16_t* dest, const uint8_t* src,
- size_t chars);
-V8_INLINE void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src,
- size_t chars);
-#elif defined(V8_HOST_ARCH_MIPS)
-V8_INLINE void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src,
- size_t chars);
-V8_INLINE void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src,
- size_t chars);
-#elif defined(V8_HOST_ARCH_PPC) || defined(V8_HOST_ARCH_S390)
-V8_INLINE void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src,
- size_t chars);
-V8_INLINE void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src,
- size_t chars);
-#endif
-
-// Copy from 8bit/16bit chars to 8bit/16bit chars.
-template <typename sourcechar, typename sinkchar>
-V8_INLINE void CopyChars(sinkchar* dest, const sourcechar* src, size_t chars);
-
-template <typename sourcechar, typename sinkchar>
-void CopyChars(sinkchar* dest, const sourcechar* src, size_t chars) {
- DCHECK_LE(sizeof(sourcechar), 2);
- DCHECK_LE(sizeof(sinkchar), 2);
- if (sizeof(sinkchar) == 1) {
- if (sizeof(sourcechar) == 1) {
- CopyCharsUnsigned(reinterpret_cast<uint8_t*>(dest),
- reinterpret_cast<const uint8_t*>(src), chars);
- } else {
- CopyCharsUnsigned(reinterpret_cast<uint8_t*>(dest),
- reinterpret_cast<const uint16_t*>(src), chars);
- }
- } else {
- if (sizeof(sourcechar) == 1) {
- CopyCharsUnsigned(reinterpret_cast<uint16_t*>(dest),
- reinterpret_cast<const uint8_t*>(src), chars);
- } else {
- CopyCharsUnsigned(reinterpret_cast<uint16_t*>(dest),
- reinterpret_cast<const uint16_t*>(src), chars);
- }
- }
-}
-
-template <typename sourcechar, typename sinkchar>
-void CopyCharsUnsigned(sinkchar* dest, const sourcechar* src, size_t chars) {
- sinkchar* limit = dest + chars;
- if ((sizeof(*dest) == sizeof(*src)) &&
- (chars >= kMinComplexMemCopy / sizeof(*dest))) {
- MemCopy(dest, src, chars * sizeof(*dest));
- } else {
- while (dest < limit) *dest++ = static_cast<sinkchar>(*src++);
- }
-}
+// Copy from 8bit/16bit chars to 8bit/16bit chars. Values are zero-extended if
+// needed. Ranges are not allowed to overlap.
+// The separate declaration is needed for the V8_NONNULL, which is not allowed
+// on a definition.
+template <typename SrcType, typename DstType>
+void CopyChars(DstType* dst, const SrcType* src, size_t count) V8_NONNULL(1, 2);
-#if defined(V8_HOST_ARCH_ARM)
-void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, size_t chars) {
- switch (static_cast<unsigned>(chars)) {
- case 0:
- break;
- case 1:
- *dest = *src;
- break;
- case 2:
- memcpy(dest, src, 2);
- break;
- case 3:
- memcpy(dest, src, 3);
- break;
- case 4:
- memcpy(dest, src, 4);
- break;
- case 5:
- memcpy(dest, src, 5);
- break;
- case 6:
- memcpy(dest, src, 6);
- break;
- case 7:
- memcpy(dest, src, 7);
- break;
- case 8:
- memcpy(dest, src, 8);
- break;
- case 9:
- memcpy(dest, src, 9);
- break;
- case 10:
- memcpy(dest, src, 10);
- break;
- case 11:
- memcpy(dest, src, 11);
- break;
- case 12:
- memcpy(dest, src, 12);
- break;
- case 13:
- memcpy(dest, src, 13);
- break;
- case 14:
- memcpy(dest, src, 14);
- break;
- case 15:
- memcpy(dest, src, 15);
- break;
- default:
- MemCopy(dest, src, chars);
- break;
- }
-}
-
-void CopyCharsUnsigned(uint16_t* dest, const uint8_t* src, size_t chars) {
- if (chars >= static_cast<size_t>(kMinComplexConvertMemCopy)) {
- MemCopyUint16Uint8(dest, src, chars);
- } else {
- MemCopyUint16Uint8Wrapper(dest, src, chars);
- }
-}
+template <typename SrcType, typename DstType>
+void CopyChars(DstType* dst, const SrcType* src, size_t count) {
+ STATIC_ASSERT(std::is_integral<SrcType>::value);
+ STATIC_ASSERT(std::is_integral<DstType>::value);
-void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, size_t chars) {
- switch (static_cast<unsigned>(chars)) {
- case 0:
- break;
- case 1:
- *dest = *src;
- break;
- case 2:
- memcpy(dest, src, 4);
- break;
- case 3:
- memcpy(dest, src, 6);
- break;
- case 4:
- memcpy(dest, src, 8);
- break;
- case 5:
- memcpy(dest, src, 10);
- break;
- case 6:
- memcpy(dest, src, 12);
- break;
- case 7:
- memcpy(dest, src, 14);
- break;
- default:
- MemCopy(dest, src, chars * sizeof(*dest));
- break;
- }
-}
-
-#elif defined(V8_HOST_ARCH_MIPS)
-void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, size_t chars) {
- if (chars < kMinComplexMemCopy) {
- memcpy(dest, src, chars);
- } else {
- MemCopy(dest, src, chars);
- }
-}
-
-void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, size_t chars) {
- if (chars < kMinComplexMemCopy) {
- memcpy(dest, src, chars * sizeof(*dest));
- } else {
- MemCopy(dest, src, chars * sizeof(*dest));
- }
-}
-#elif defined(V8_HOST_ARCH_PPC) || defined(V8_HOST_ARCH_S390)
-#define CASE(n) \
- case n: \
- memcpy(dest, src, n); \
- break
-void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, size_t chars) {
- switch (static_cast<unsigned>(chars)) {
- case 0:
- break;
- case 1:
- *dest = *src;
- break;
- CASE(2);
- CASE(3);
- CASE(4);
- CASE(5);
- CASE(6);
- CASE(7);
- CASE(8);
- CASE(9);
- CASE(10);
- CASE(11);
- CASE(12);
- CASE(13);
- CASE(14);
- CASE(15);
- CASE(16);
- CASE(17);
- CASE(18);
- CASE(19);
- CASE(20);
- CASE(21);
- CASE(22);
- CASE(23);
- CASE(24);
- CASE(25);
- CASE(26);
- CASE(27);
- CASE(28);
- CASE(29);
- CASE(30);
- CASE(31);
- CASE(32);
- CASE(33);
- CASE(34);
- CASE(35);
- CASE(36);
- CASE(37);
- CASE(38);
- CASE(39);
- CASE(40);
- CASE(41);
- CASE(42);
- CASE(43);
- CASE(44);
- CASE(45);
- CASE(46);
- CASE(47);
- CASE(48);
- CASE(49);
- CASE(50);
- CASE(51);
- CASE(52);
- CASE(53);
- CASE(54);
- CASE(55);
- CASE(56);
- CASE(57);
- CASE(58);
- CASE(59);
- CASE(60);
- CASE(61);
- CASE(62);
- CASE(63);
- CASE(64);
- default:
- memcpy(dest, src, chars);
- break;
- }
-}
-#undef CASE
+#ifdef DEBUG
+ // Check for no overlap, otherwise {std::copy_n} cannot be used.
+ Address src_start = reinterpret_cast<Address>(src);
+ Address src_end = src_start + count * sizeof(SrcType);
+ Address dst_start = reinterpret_cast<Address>(dst);
+ Address dst_end = dst_start + count * sizeof(DstType);
+ DCHECK(src_end <= dst_start || dst_end <= src_start);
+#endif
-#define CASE(n) \
- case n: \
- memcpy(dest, src, n * 2); \
- break
-void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, size_t chars) {
- switch (static_cast<unsigned>(chars)) {
- case 0:
- break;
- case 1:
- *dest = *src;
- break;
- CASE(2);
- CASE(3);
- CASE(4);
- CASE(5);
- CASE(6);
- CASE(7);
- CASE(8);
- CASE(9);
- CASE(10);
- CASE(11);
- CASE(12);
- CASE(13);
- CASE(14);
- CASE(15);
- CASE(16);
- CASE(17);
- CASE(18);
- CASE(19);
- CASE(20);
- CASE(21);
- CASE(22);
- CASE(23);
- CASE(24);
- CASE(25);
- CASE(26);
- CASE(27);
- CASE(28);
- CASE(29);
- CASE(30);
- CASE(31);
- CASE(32);
- default:
- memcpy(dest, src, chars * 2);
- break;
- }
+ using SrcTypeUnsigned = typename std::make_unsigned<SrcType>::type;
+ using DstTypeUnsigned = typename std::make_unsigned<DstType>::type;
+ std::copy_n(reinterpret_cast<const SrcTypeUnsigned*>(src), count,
+ reinterpret_cast<DstTypeUnsigned*>(dst));
}
-#undef CASE
-#endif
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/utils/utils-inl.h b/deps/v8/src/utils/utils-inl.h
index e88055023e..0c8af7cb88 100644
--- a/deps/v8/src/utils/utils-inl.h
+++ b/deps/v8/src/utils/utils-inl.h
@@ -36,13 +36,30 @@ template <typename Char>
bool TryAddIndexChar(uint32_t* index, Char c) {
if (!IsDecimalDigit(c)) return false;
int d = c - '0';
+ // The maximum index is 4294967294; for the computation below to not
+ // exceed that, the previous index value must be <= 429496729 if d <= 4,
+ // or <= 429496728 if d >= 5. The (d+3)>>3 computation is a branch-free
+ // way to express that.
if (*index > 429496729U - ((d + 3) >> 3)) return false;
*index = (*index) * 10 + d;
return true;
}
-template <typename Stream>
-bool StringToArrayIndex(Stream* stream, uint32_t* index) {
+template <typename Char>
+bool TryAddIndexChar(uint64_t* index, Char c) {
+ if (!IsDecimalDigit(c)) return false;
+ int d = c - '0';
+ // The maximum uint64_t is 18446744073709551615; for the computation below to
+ // not exceed that, the previous index value must be <= 1844674407370955161
+ // if d <= 5, or <= 1844674407370955160 if d >= 6. The (d+2)>>3 computation
+ // is a branch-free way to express that.
+ if (*index > 1844674407370955161ull - ((d + 2) >> 3)) return false;
+ *index = (*index) * 10 + d;
+ return true;
+}
+
+template <typename Stream, typename index_t>
+bool StringToArrayIndex(Stream* stream, index_t* index) {
uint16_t ch = stream->GetNext();
// If the string begins with a '0' character, it must only consist
@@ -55,9 +72,20 @@ bool StringToArrayIndex(Stream* stream, uint32_t* index) {
// Convert string to uint32 array index; character by character.
if (!IsDecimalDigit(ch)) return false;
int d = ch - '0';
- uint32_t result = d;
+ index_t result = d;
while (stream->HasMore()) {
- if (!TryAddIndexChar(&result, stream->GetNext())) return false;
+ // Clang on Mac doesn't think that size_t and uint*_t should be
+ // implicitly convertible.
+ if (sizeof(index_t) == 8) {
+ if (!TryAddIndexChar(reinterpret_cast<uint64_t*>(&result),
+ stream->GetNext())) {
+ return false;
+ }
+ } else {
+ if (!TryAddIndexChar(reinterpret_cast<uint32_t*>(&result),
+ stream->GetNext()))
+ return false;
+ }
}
*index = result;
diff --git a/deps/v8/src/utils/utils.h b/deps/v8/src/utils/utils.h
index 27d3d5ef21..b414a4c52b 100644
--- a/deps/v8/src/utils/utils.h
+++ b/deps/v8/src/utils/utils.h
@@ -760,13 +760,8 @@ inline uint64_t unsigned_bitextract_64(int msb, int lsb, uint64_t x) {
return (x >> lsb) & ((static_cast<uint64_t>(1) << (1 + msb - lsb)) - 1);
}
-inline int32_t signed_bitextract_32(int msb, int lsb, int32_t x) {
- return (x << (31 - msb)) >> (lsb + 31 - msb);
-}
-
-inline int signed_bitextract_64(int msb, int lsb, int x) {
- // TODO(jbramley): This is broken for big bitfields.
- return (x << (63 - msb)) >> (lsb + 63 - msb);
+inline int32_t signed_bitextract_32(int msb, int lsb, uint32_t x) {
+ return static_cast<int32_t>(x << (31 - msb)) >> (lsb + 31 - msb);
}
// Check number width.
@@ -978,8 +973,8 @@ bool DoubleToBoolean(double d);
template <typename Char>
bool TryAddIndexChar(uint32_t* index, Char c);
-template <typename Stream>
-bool StringToArrayIndex(Stream* stream, uint32_t* index);
+template <typename Stream, typename index_t>
+bool StringToArrayIndex(Stream* stream, index_t* index);
// Returns the current stack top. Works correctly with ASAN and SafeStack.
// GetCurrentStackPosition() should not be inlined, because it works on stack
diff --git a/deps/v8/src/utils/vector.h b/deps/v8/src/utils/vector.h
index dd5c59e553..e0c13afc90 100644
--- a/deps/v8/src/utils/vector.h
+++ b/deps/v8/src/utils/vector.h
@@ -8,6 +8,7 @@
#include <algorithm>
#include <cstring>
#include <iterator>
+#include <memory>
#include "src/common/checks.h"
#include "src/common/globals.h"
diff --git a/deps/v8/src/wasm/DEPS b/deps/v8/src/wasm/DEPS
index eb0780f5e3..2d310c631c 100644
--- a/deps/v8/src/wasm/DEPS
+++ b/deps/v8/src/wasm/DEPS
@@ -1,4 +1,11 @@
specific_include_rules = {
+ "jump-table-assembler\.(cc|h)": [
+ # The JumpTableAssembler should not depend on any wasm-specific headers.
+ # The only allowed include is 'src/codegen' for assembler headers.
+ "-src",
+ "+src/codegen",
+ "+src/wasm/jump-table-assembler.h",
+ ],
"c-api\.cc": [
"+include/libplatform/libplatform.h",
"+third_party/wasm-api/wasm.h",
diff --git a/deps/v8/src/wasm/OWNERS b/deps/v8/src/wasm/OWNERS
index 8aa6e24739..bc9ec357df 100644
--- a/deps/v8/src/wasm/OWNERS
+++ b/deps/v8/src/wasm/OWNERS
@@ -1,7 +1,7 @@
ahaas@chromium.org
bbudge@chromium.org
binji@chromium.org
-clemensh@chromium.org
+clemensb@chromium.org
gdeepti@chromium.org
mstarzinger@chromium.org
titzer@chromium.org
diff --git a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
index 834eb181d8..e6c46e4a09 100644
--- a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -46,10 +46,12 @@ constexpr int32_t kConstantStackSpace = kSystemPointerSize;
// Three instructions are required to sub a large constant, movw + movt + sub.
constexpr int32_t kPatchInstructionsRequired = 3;
+inline int GetStackSlotOffset(uint32_t index) {
+ return kFirstStackSlotOffset + index * LiftoffAssembler::kStackSlotSize;
+}
+
inline MemOperand GetStackSlot(uint32_t index) {
- int32_t offset =
- kFirstStackSlotOffset + index * LiftoffAssembler::kStackSlotSize;
- return MemOperand(fp, -offset);
+ return MemOperand(fp, -GetStackSlotOffset(index));
}
inline MemOperand GetHalfStackSlot(uint32_t index, RegPairHalf half) {
@@ -635,6 +637,44 @@ void LiftoffAssembler::FillI64Half(Register reg, uint32_t index,
ldr(reg, liftoff::GetHalfStackSlot(index, half));
}
+void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
+ DCHECK_LT(0, count);
+ uint32_t last_stack_slot = index + count - 1;
+ RecordUsedSpillSlot(last_stack_slot);
+
+ // We need a zero reg. Always use r0 for that, and push it before to restore
+ // its value afterwards.
+ push(r0);
+ mov(r0, Operand(0));
+
+ if (count <= 5) {
+ // Special straight-line code for up to five slots. Generates two
+ // instructions per slot.
+ for (uint32_t offset = 0; offset < count; ++offset) {
+ str(r0, liftoff::GetHalfStackSlot(index + offset, kLowWord));
+ str(r0, liftoff::GetHalfStackSlot(index + offset, kHighWord));
+ }
+ } else {
+ // General case for bigger counts (9 instructions).
+ // Use r1 for start address (inclusive), r2 for end address (exclusive).
+ push(r1);
+ push(r2);
+ sub(r1, fp, Operand(liftoff::GetStackSlotOffset(last_stack_slot)));
+ sub(r2, fp, Operand(liftoff::GetStackSlotOffset(index) - kStackSlotSize));
+
+ Label loop;
+ bind(&loop);
+ str(r0, MemOperand(r1, /* offset */ kSystemPointerSize, PostIndex));
+ cmp(r1, r2);
+ b(&loop, ne);
+
+ pop(r2);
+ pop(r1);
+ }
+
+ pop(r0);
+}
+
#define I32_BINOP(name, instruction) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
Register rhs) { \
diff --git a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index dc68267825..dede53b7a4 100644
--- a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -43,10 +43,12 @@ constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize;
constexpr int32_t kFirstStackSlotOffset = kInstanceOffset + kSystemPointerSize;
constexpr int32_t kConstantStackSpace = 0;
+inline int GetStackSlotOffset(uint32_t index) {
+ return kFirstStackSlotOffset + index * LiftoffAssembler::kStackSlotSize;
+}
+
inline MemOperand GetStackSlot(uint32_t index) {
- int32_t offset =
- kFirstStackSlotOffset + index * LiftoffAssembler::kStackSlotSize;
- return MemOperand(fp, -offset);
+ return MemOperand(fp, -GetStackSlotOffset(index));
}
inline MemOperand GetInstanceOperand() {
@@ -398,6 +400,38 @@ void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
UNREACHABLE();
}
+void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
+ DCHECK_LT(0, count);
+ uint32_t last_stack_slot = index + count - 1;
+ RecordUsedSpillSlot(last_stack_slot);
+
+ int max_stp_offset = -liftoff::GetStackSlotOffset(index + count - 1);
+ if (count <= 20 && IsImmLSPair(max_stp_offset, kXRegSizeLog2)) {
+ // Special straight-line code for up to 20 slots. Generates one
+ // instruction per two slots (<= 10 instructions total).
+ for (; count > 1; count -= 2) {
+ STATIC_ASSERT(kStackSlotSize == kSystemPointerSize);
+ stp(xzr, xzr, liftoff::GetStackSlot(index + count - 1));
+ }
+ DCHECK(count == 0 || count == 1);
+ if (count) str(xzr, liftoff::GetStackSlot(index));
+ } else {
+ // General case for bigger counts (7 instructions).
+ // Use x0 for start address (inclusive), x1 for end address (exclusive).
+ Push(x1, x0);
+ Sub(x0, fp, Operand(liftoff::GetStackSlotOffset(last_stack_slot)));
+ Sub(x1, fp, Operand(liftoff::GetStackSlotOffset(index) - kStackSlotSize));
+
+ Label loop;
+ bind(&loop);
+ str(xzr, MemOperand(x0, /* offset */ kSystemPointerSize, PostIndex));
+ cmp(x0, x1);
+ b(&loop, ne);
+
+ Pop(x0, x1);
+ }
+}
+
#define I32_BINOP(name, instruction) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
Register rhs) { \
diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index 7bc3596d2e..fa88d20df6 100644
--- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -41,7 +41,7 @@ inline MemOperand GetHalfStackSlot(uint32_t index, RegPairHalf half) {
return Operand(ebp, -kFirstStackSlotOffset - offset);
}
-// TODO(clemensh): Make this a constexpr variable once Operand is constexpr.
+// TODO(clemensb): Make this a constexpr variable once Operand is constexpr.
inline Operand GetInstanceOperand() { return Operand(ebp, -8); }
static constexpr LiftoffRegList kByteRegs =
@@ -511,6 +511,37 @@ void LiftoffAssembler::FillI64Half(Register reg, uint32_t index,
mov(reg, liftoff::GetHalfStackSlot(index, half));
}
+void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
+ DCHECK_LT(0, count);
+ uint32_t last_stack_slot = index + count - 1;
+ RecordUsedSpillSlot(last_stack_slot);
+
+ if (count <= 2) {
+ // Special straight-line code for up to two slots (6-9 bytes per word:
+ // C7 <1-4 bytes operand> <4 bytes imm>, makes 12-18 bytes per slot).
+ for (uint32_t offset = 0; offset < count; ++offset) {
+ mov(liftoff::GetHalfStackSlot(index + offset, kLowWord), Immediate(0));
+ mov(liftoff::GetHalfStackSlot(index + offset, kHighWord), Immediate(0));
+ }
+ } else {
+ // General case for bigger counts.
+ // This sequence takes 19-22 bytes (3 for pushes, 3-6 for lea, 2 for xor, 5
+ // for mov, 3 for repstosq, 3 for pops).
+ // Note: rep_stos fills ECX doublewords at [EDI] with EAX.
+ push(eax);
+ push(ecx);
+ push(edi);
+ lea(edi, liftoff::GetStackSlot(last_stack_slot));
+ xor_(eax, eax);
+ // Number of words is number of slots times two.
+ mov(ecx, Immediate(count * 2));
+ rep_stos();
+ pop(edi);
+ pop(ecx);
+ pop(eax);
+ }
+}
+
void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) {
if (lhs != dst) {
lea(dst, Operand(lhs, rhs, times_1, 0));
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.cc b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
index 0fcfb8dbfc..389c065507 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
@@ -297,7 +297,7 @@ class StackTransferRecipe {
// process all remaining moves in that cycle. Repeat for all cycles.
uint32_t next_spill_slot = asm_->cache_state()->stack_height();
while (!move_dst_regs_.is_empty()) {
- // TODO(clemensh): Use an unused register if available.
+ // TODO(clemensb): Use an unused register if available.
LiftoffRegister dst = move_dst_regs_.GetFirstRegSet();
RegisterMove* move = register_move(dst);
LiftoffRegister spill_reg = move->src;
@@ -412,7 +412,7 @@ void InitMergeRegion(LiftoffAssembler::CacheState* state,
} // namespace
-// TODO(clemensh): Don't copy the full parent state (this makes us N^2).
+// TODO(clemensb): Don't copy the full parent state (this makes us N^2).
void LiftoffAssembler::CacheState::InitMerge(const CacheState& source,
uint32_t num_locals,
uint32_t arity,
@@ -484,7 +484,7 @@ constexpr AssemblerOptions DefaultLiftoffOptions() {
} // namespace
-// TODO(clemensh): Provide a reasonably sized buffer, based on wasm function
+// TODO(clemensb): Provide a reasonably sized buffer, based on wasm function
// size.
LiftoffAssembler::LiftoffAssembler(std::unique_ptr<AssemblerBuffer> buffer)
: TurboAssembler(nullptr, DefaultLiftoffOptions(), CodeObjectRequired::kNo,
@@ -526,7 +526,7 @@ LiftoffRegister LiftoffAssembler::PopToRegister(LiftoffRegList pinned) {
void LiftoffAssembler::MergeFullStackWith(const CacheState& target,
const CacheState& source) {
DCHECK_EQ(source.stack_height(), target.stack_height());
- // TODO(clemensh): Reuse the same StackTransferRecipe object to save some
+ // TODO(clemensb): Reuse the same StackTransferRecipe object to save some
// allocations.
StackTransferRecipe transfers(this);
for (uint32_t i = 0, e = source.stack_height(); i < e; ++i) {
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h b/deps/v8/src/wasm/baseline/liftoff-assembler.h
index 766ce71db1..f0d49a8782 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h
@@ -228,7 +228,7 @@ class LiftoffAssembler : public TurboAssembler {
return reg;
}
- // TODO(clemensh): Don't copy the full parent state (this makes us N^2).
+ // TODO(clemensb): Don't copy the full parent state (this makes us N^2).
void InitMerge(const CacheState& source, uint32_t num_locals,
uint32_t arity, uint32_t stack_depth);
@@ -386,6 +386,7 @@ class LiftoffAssembler : public TurboAssembler {
// Only used on 32-bit systems: Fill a register from a "half stack slot", i.e.
// 4 bytes on the stack holding half of a 64-bit value.
inline void FillI64Half(Register, uint32_t index, RegPairHalf);
+ inline void FillStackSlotsWithZero(uint32_t index, uint32_t count);
// i32 binops.
inline void emit_i32_add(Register dst, Register lhs, Register rhs);
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
index 02de06763c..997c8ff52b 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -6,7 +6,7 @@
#include "src/base/optional.h"
#include "src/codegen/assembler-inl.h"
-// TODO(clemensh): Remove dependences on compiler stuff.
+// TODO(clemensb): Remove dependences on compiler stuff.
#include "src/codegen/interface-descriptors.h"
#include "src/codegen/macro-assembler-inl.h"
#include "src/compiler/linkage.h"
@@ -121,7 +121,7 @@ constexpr Vector<const ValueType> kSupportedTypes =
class LiftoffCompiler {
public:
- // TODO(clemensh): Make this a template parameter.
+ // TODO(clemensb): Make this a template parameter.
static constexpr Decoder::ValidateFlag validate = Decoder::kValidate;
using Value = ValueBase;
@@ -341,6 +341,24 @@ class LiftoffCompiler {
__ bind(ool.continuation.get());
}
+ bool SpillLocalsInitially(FullDecoder* decoder, uint32_t num_params) {
+ int actual_locals = __ num_locals() - num_params;
+ DCHECK_LE(0, actual_locals);
+ constexpr int kNumCacheRegisters = NumRegs(kLiftoffAssemblerGpCacheRegs);
+ // If we have many locals, we put them on the stack initially. This avoids
+ // having to spill them on merge points. Use of these initial values should
+ // be rare anyway.
+ if (actual_locals > kNumCacheRegisters / 2) return true;
+ // If there are locals which are not i32 or i64, we also spill all locals,
+ // because other types cannot be initialized to constants.
+ for (uint32_t param_idx = num_params; param_idx < __ num_locals();
+ ++param_idx) {
+ ValueType type = decoder->GetLocalType(param_idx);
+ if (type != kWasmI32 && type != kWasmI64) return true;
+ }
+ return false;
+ }
+
void StartFunctionBody(FullDecoder* decoder, Control* block) {
for (uint32_t i = 0; i < __ num_locals(); ++i) {
if (!CheckSupportedType(decoder, kSupportedTypes, __ local_type(i),
@@ -373,6 +391,7 @@ class LiftoffCompiler {
// LiftoffAssembler methods.
if (DidAssemblerBailout(decoder)) return;
+ // Process parameters.
__ SpillInstance(instance_reg);
// Input 0 is the code target, 1 is the instance. First parameter at 2.
uint32_t input_idx = kInstanceParameterIndex + 1;
@@ -380,32 +399,20 @@ class LiftoffCompiler {
input_idx += ProcessParameter(__ local_type(param_idx), input_idx);
}
DCHECK_EQ(input_idx, descriptor_->InputCount());
- // Set to a gp register, to mark this uninitialized.
- LiftoffRegister zero_double_reg = kGpCacheRegList.GetFirstRegSet();
- DCHECK(zero_double_reg.is_gp());
- for (uint32_t param_idx = num_params; param_idx < __ num_locals();
- ++param_idx) {
- ValueType type = decoder->GetLocalType(param_idx);
- switch (type) {
- case kWasmI32:
- __ cache_state()->stack_state.emplace_back(kWasmI32, uint32_t{0});
- break;
- case kWasmI64:
- __ cache_state()->stack_state.emplace_back(kWasmI64, uint32_t{0});
- break;
- case kWasmF32:
- case kWasmF64:
- if (zero_double_reg.is_gp()) {
- // Note: This might spill one of the registers used to hold
- // parameters.
- zero_double_reg = __ GetUnusedRegister(kFpReg);
- // Zero is represented by the bit pattern 0 for both f32 and f64.
- __ LoadConstant(zero_double_reg, WasmValue(0.));
- }
- __ PushRegister(type, zero_double_reg);
- break;
- default:
- UNIMPLEMENTED();
+
+ // Initialize locals beyond parameters.
+ if (SpillLocalsInitially(decoder, num_params)) {
+ __ FillStackSlotsWithZero(num_params, __ num_locals() - num_params);
+ for (uint32_t param_idx = num_params; param_idx < __ num_locals();
+ ++param_idx) {
+ ValueType type = decoder->GetLocalType(param_idx);
+ __ cache_state()->stack_state.emplace_back(type);
+ }
+ } else {
+ for (uint32_t param_idx = num_params; param_idx < __ num_locals();
+ ++param_idx) {
+ ValueType type = decoder->GetLocalType(param_idx);
+ __ cache_state()->stack_state.emplace_back(type, int32_t{0});
}
}
@@ -488,7 +495,7 @@ class LiftoffCompiler {
// Before entering a loop, spill all locals to the stack, in order to free
// the cache registers, and to avoid unnecessarily reloading stack values
// into registers at branches.
- // TODO(clemensh): Come up with a better strategy here, involving
+ // TODO(clemensb): Come up with a better strategy here, involving
// pre-analysis of the function.
__ SpillLocals();
@@ -519,7 +526,7 @@ class LiftoffCompiler {
}
// Allocate the else state.
- if_block->else_state = base::make_unique<ElseState>();
+ if_block->else_state = std::make_unique<ElseState>();
// Test the condition, jump to else if zero.
Register value = __ PopToRegister().gp();
@@ -617,8 +624,8 @@ class LiftoffCompiler {
template <ValueType src_type, ValueType result_type, class EmitFn>
void EmitUnOp(EmitFn fn) {
- static RegClass src_rc = reg_class_for(src_type);
- static RegClass result_rc = reg_class_for(result_type);
+ constexpr RegClass src_rc = reg_class_for(src_type);
+ constexpr RegClass result_rc = reg_class_for(result_type);
LiftoffRegister src = __ PopToRegister();
LiftoffRegister dst = src_rc == result_rc
? __ GetUnusedRegister(result_rc, {src})
@@ -693,45 +700,44 @@ class LiftoffCompiler {
void UnOp(FullDecoder* decoder, WasmOpcode opcode, const Value& value,
Value* result) {
#define CASE_I32_UNOP(opcode, fn) \
- case WasmOpcode::kExpr##opcode: \
+ case kExpr##opcode: \
EmitUnOp<kWasmI32, kWasmI32>( \
[=](LiftoffRegister dst, LiftoffRegister src) { \
__ emit_##fn(dst.gp(), src.gp()); \
}); \
break;
#define CASE_I32_SIGN_EXTENSION(opcode, fn) \
- case WasmOpcode::kExpr##opcode: \
+ case kExpr##opcode: \
EmitUnOp<kWasmI32, kWasmI32>( \
[=](LiftoffRegister dst, LiftoffRegister src) { \
__ emit_##fn(dst.gp(), src.gp()); \
}); \
break;
#define CASE_I64_SIGN_EXTENSION(opcode, fn) \
- case WasmOpcode::kExpr##opcode: \
+ case kExpr##opcode: \
EmitUnOp<kWasmI64, kWasmI64>( \
[=](LiftoffRegister dst, LiftoffRegister src) { \
__ emit_##fn(dst, src); \
}); \
break;
#define CASE_FLOAT_UNOP(opcode, type, fn) \
- case WasmOpcode::kExpr##opcode: \
+ case kExpr##opcode: \
EmitUnOp<kWasm##type, kWasm##type>( \
[=](LiftoffRegister dst, LiftoffRegister src) { \
__ emit_##fn(dst.fp(), src.fp()); \
}); \
break;
#define CASE_FLOAT_UNOP_WITH_CFALLBACK(opcode, type, fn) \
- case WasmOpcode::kExpr##opcode: \
+ case kExpr##opcode: \
EmitFloatUnOpWithCFallback<kWasm##type>(&LiftoffAssembler::emit_##fn, \
&ExternalReference::wasm_##fn); \
break;
#define CASE_TYPE_CONVERSION(opcode, dst_type, src_type, ext_ref, can_trap) \
- case WasmOpcode::kExpr##opcode: \
+ case kExpr##opcode: \
EmitTypeConversion<kWasm##dst_type, kWasm##src_type, can_trap>( \
kExpr##opcode, ext_ref, can_trap ? decoder->position() : 0); \
break;
switch (opcode) {
- CASE_I32_UNOP(I32Eqz, i32_eqz)
CASE_I32_UNOP(I32Clz, i32_clz)
CASE_I32_UNOP(I32Ctz, i32_ctz)
CASE_FLOAT_UNOP(F32Abs, F32, f32_abs)
@@ -786,29 +792,41 @@ class LiftoffCompiler {
CASE_I64_SIGN_EXTENSION(I64SExtendI8, i64_signextend_i8)
CASE_I64_SIGN_EXTENSION(I64SExtendI16, i64_signextend_i16)
CASE_I64_SIGN_EXTENSION(I64SExtendI32, i64_signextend_i32)
+ case kExprI32Eqz:
+ DCHECK(decoder->lookahead(0, kExprI32Eqz));
+ if (decoder->lookahead(1, kExprBrIf)) {
+ DCHECK(!has_outstanding_op());
+ outstanding_op_ = kExprI32Eqz;
+ break;
+ }
+ EmitUnOp<kWasmI32, kWasmI32>(
+ [=](LiftoffRegister dst, LiftoffRegister src) {
+ __ emit_i32_eqz(dst.gp(), src.gp());
+ });
+ break;
case kExprI32Popcnt:
EmitI32UnOpWithCFallback(&LiftoffAssembler::emit_i32_popcnt,
&ExternalReference::wasm_word32_popcnt);
break;
- case WasmOpcode::kExprI64Eqz:
+ case kExprI64Eqz:
EmitUnOp<kWasmI64, kWasmI32>(
[=](LiftoffRegister dst, LiftoffRegister src) {
__ emit_i64_eqz(dst.gp(), src);
});
break;
- case WasmOpcode::kExprI64Clz:
- case WasmOpcode::kExprI64Ctz:
- case WasmOpcode::kExprI64Popcnt:
+ case kExprI64Clz:
+ case kExprI64Ctz:
+ case kExprI64Popcnt:
return unsupported(decoder, kComplexOperation,
WasmOpcodes::OpcodeName(opcode));
- case WasmOpcode::kExprI32SConvertSatF32:
- case WasmOpcode::kExprI32UConvertSatF32:
- case WasmOpcode::kExprI32SConvertSatF64:
- case WasmOpcode::kExprI32UConvertSatF64:
- case WasmOpcode::kExprI64SConvertSatF32:
- case WasmOpcode::kExprI64UConvertSatF32:
- case WasmOpcode::kExprI64SConvertSatF64:
- case WasmOpcode::kExprI64UConvertSatF64:
+ case kExprI32SConvertSatF32:
+ case kExprI32UConvertSatF32:
+ case kExprI32SConvertSatF64:
+ case kExprI32UConvertSatF64:
+ case kExprI64SConvertSatF32:
+ case kExprI64UConvertSatF32:
+ case kExprI64SConvertSatF64:
+ case kExprI64UConvertSatF64:
return unsupported(decoder, kNonTrappingFloatToInt,
WasmOpcodes::OpcodeName(opcode));
default:
@@ -1224,7 +1242,7 @@ class LiftoffCompiler {
ReturnImpl(decoder);
}
- void GetLocal(FullDecoder* decoder, Value* result,
+ void LocalGet(FullDecoder* decoder, Value* result,
const LocalIndexImmediate<validate>& imm) {
auto& slot = __ cache_state()->stack_state[imm.index];
DCHECK_EQ(slot.type(), imm.type);
@@ -1245,7 +1263,7 @@ class LiftoffCompiler {
}
}
- void SetLocalFromStackSlot(LiftoffAssembler::VarState* dst_slot,
+ void LocalSetFromStackSlot(LiftoffAssembler::VarState* dst_slot,
uint32_t local_index) {
auto& state = *__ cache_state();
ValueType type = dst_slot->type();
@@ -1266,7 +1284,7 @@ class LiftoffCompiler {
__ cache_state()->inc_used(dst_reg);
}
- void SetLocal(uint32_t local_index, bool is_tee) {
+ void LocalSet(uint32_t local_index, bool is_tee) {
auto& state = *__ cache_state();
auto& source_slot = state.stack_state.back();
auto& target_slot = state.stack_state[local_index];
@@ -1281,20 +1299,20 @@ class LiftoffCompiler {
target_slot = source_slot;
break;
case kStack:
- SetLocalFromStackSlot(&target_slot, local_index);
+ LocalSetFromStackSlot(&target_slot, local_index);
break;
}
if (!is_tee) __ cache_state()->stack_state.pop_back();
}
- void SetLocal(FullDecoder* decoder, const Value& value,
+ void LocalSet(FullDecoder* decoder, const Value& value,
const LocalIndexImmediate<validate>& imm) {
- SetLocal(imm.index, false);
+ LocalSet(imm.index, false);
}
- void TeeLocal(FullDecoder* decoder, const Value& value, Value* result,
+ void LocalTee(FullDecoder* decoder, const Value& value, Value* result,
const LocalIndexImmediate<validate>& imm) {
- SetLocal(imm.index, true);
+ LocalSet(imm.index, true);
}
Register GetGlobalBaseAndOffset(const WasmGlobal* global,
@@ -1312,7 +1330,7 @@ class LiftoffCompiler {
return addr;
}
- void GetGlobal(FullDecoder* decoder, Value* result,
+ void GlobalGet(FullDecoder* decoder, Value* result,
const GlobalIndexImmediate<validate>& imm) {
const auto* global = &env_->module->globals[imm.index];
if (!CheckSupportedType(decoder, kSupportedTypes, global->type, "global"))
@@ -1327,7 +1345,7 @@ class LiftoffCompiler {
__ PushRegister(global->type, value);
}
- void SetGlobal(FullDecoder* decoder, const Value& value,
+ void GlobalSet(FullDecoder* decoder, const Value& value,
const GlobalIndexImmediate<validate>& imm) {
auto* global = &env_->module->globals[imm.index];
if (!CheckSupportedType(decoder, kSupportedTypes, global->type, "global"))
@@ -1402,10 +1420,18 @@ class LiftoffCompiler {
}
}
- void BrIf(FullDecoder* decoder, const Value& cond, uint32_t depth) {
+ void BrIf(FullDecoder* decoder, const Value& /* cond */, uint32_t depth) {
+ Condition cond = kEqual; // Unary "equal" means "equals zero".
+
+ if (has_outstanding_op()) {
+ DCHECK_EQ(kExprI32Eqz, outstanding_op_);
+ cond = kUnequal; // Unary "unequal" means "not equals zero".
+ outstanding_op_ = kNoOutstandingOp;
+ }
+
Label cont_false;
Register value = __ PopToRegister().gp();
- __ emit_cond_jump(kEqual, &cont_false, kWasmI32, value);
+ __ emit_cond_jump(cond, &cont_false, kWasmI32, value);
BrOrRet(decoder, depth);
__ bind(&cont_false);
@@ -2056,7 +2082,14 @@ class LiftoffCompiler {
}
private:
+ static constexpr WasmOpcode kNoOutstandingOp = kExprUnreachable;
+
LiftoffAssembler asm_;
+
+ // Used for merging code generation of subsequent operations (via look-ahead).
+ // Set by the first opcode, reset by the second.
+ WasmOpcode outstanding_op_ = kNoOutstandingOp;
+
compiler::CallDescriptor* const descriptor_;
CompilationEnv* const env_;
LiftoffBailoutReason bailout_reason_ = kSuccess;
@@ -2072,6 +2105,10 @@ class LiftoffCompiler {
// patch the actually needed stack size in the end.
uint32_t pc_offset_stack_frame_construction_ = 0;
+ bool has_outstanding_op() const {
+ return outstanding_op_ != kNoOutstandingOp;
+ }
+
void TraceCacheState(FullDecoder* decoder) const {
#ifdef DEBUG
if (!FLAG_trace_liftoff || !FLAG_trace_wasm_decoder) return;
diff --git a/deps/v8/src/wasm/baseline/liftoff-register.h b/deps/v8/src/wasm/baseline/liftoff-register.h
index 267a005547..b322f7eb68 100644
--- a/deps/v8/src/wasm/baseline/liftoff-register.h
+++ b/deps/v8/src/wasm/baseline/liftoff-register.h
@@ -32,15 +32,18 @@ static inline constexpr bool needs_reg_pair(ValueType type) {
return kNeedI64RegPair && type == kWasmI64;
}
-// TODO(clemensh): Use a switch once we require C++14 support.
static inline constexpr RegClass reg_class_for(ValueType type) {
- return needs_reg_pair(type) // i64 on 32 bit
- ? kGpRegPair
- : type == kWasmI32 || type == kWasmI64 // int types
- ? kGpReg
- : type == kWasmF32 || type == kWasmF64 // float types
- ? kFpReg
- : kNoReg; // other (unsupported) types
+ switch (type) {
+ case kWasmF32:
+ case kWasmF64:
+ return kFpReg;
+ case kWasmI32:
+ return kGpReg;
+ case kWasmI64:
+ return kNeedI64RegPair ? kGpRegPair : kGpReg;
+ default:
+ return kNoReg; // unsupported type
+ }
}
// Maximum code of a gp cache register.
diff --git a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
index e82ffe8f67..4c69e423c1 100644
--- a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
+++ b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
@@ -13,6 +13,28 @@ namespace wasm {
namespace liftoff {
+// half
+// slot Frame
+// -----+--------------------+---------------------------
+// n+3 | parameter n |
+// ... | ... |
+// 4 | parameter 1 | or parameter 2
+// 3 | parameter 0 | or parameter 1
+// 2 | (result address) | or parameter 0
+// -----+--------------------+---------------------------
+// 1 | return addr (ra) |
+// 0 | previous frame (fp)|
+// -----+--------------------+ <-- frame ptr (fp)
+// -1 | 0xa: WASM_COMPILED |
+// -2 | instance |
+// -----+--------------------+---------------------------
+// -3 | slot 0 (high) | ^
+// -4 | slot 0 (low) | |
+// -5 | slot 1 (high) | Frame slots
+// -6 | slot 1 (low) | |
+// | | v
+// -----+--------------------+ <-- stack ptr (sp)
+//
#if defined(V8_TARGET_BIG_ENDIAN)
constexpr int32_t kLowWordOffset = 4;
constexpr int32_t kHighWordOffset = 0;
@@ -27,9 +49,12 @@ constexpr int32_t kConstantStackSpace = 8;
constexpr int32_t kFirstStackSlotOffset =
kConstantStackSpace + LiftoffAssembler::kStackSlotSize;
+inline int GetStackSlotOffset(uint32_t index) {
+ return kFirstStackSlotOffset + index * LiftoffAssembler::kStackSlotSize;
+}
+
inline MemOperand GetStackSlot(uint32_t index) {
- int32_t offset = index * LiftoffAssembler::kStackSlotSize;
- return MemOperand(fp, -kFirstStackSlotOffset - offset);
+ return MemOperand(fp, -GetStackSlotOffset(index));
}
inline MemOperand GetHalfStackSlot(uint32_t index, RegPairHalf half) {
@@ -583,6 +608,34 @@ void LiftoffAssembler::FillI64Half(Register reg, uint32_t index,
lw(reg, liftoff::GetHalfStackSlot(index, half));
}
+void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
+ DCHECK_LT(0, count);
+ uint32_t last_stack_slot = index + count - 1;
+ RecordUsedSpillSlot(last_stack_slot);
+
+ if (count <= 12) {
+ // Special straight-line code for up to 12 slots. Generates one
+ // instruction per slot (<=12 instructions total).
+ for (uint32_t offset = 0; offset < count; ++offset) {
+ Sw(zero_reg, liftoff::GetStackSlot(index + offset));
+ }
+ } else {
+ // General case for bigger counts (12 instructions).
+ // Use a0 for start address (inclusive), a1 for end address (exclusive).
+ Push(a1, a0);
+ Addu(a0, fp, Operand(-liftoff::GetStackSlotOffset(last_stack_slot)));
+ Addu(a1, fp, Operand(-liftoff::GetStackSlotOffset(index) + kStackSlotSize));
+
+ Label loop;
+ bind(&loop);
+ Sw(zero_reg, MemOperand(a0, kSystemPointerSize));
+ addiu(a0, a0, kSystemPointerSize);
+ BranchShort(&loop, ne, a0, Operand(a1));
+
+ Pop(a1, a0);
+ }
+}
+
void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
TurboAssembler::Mul(dst, lhs, rhs);
}
diff --git a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
index 9c87dca733..5314a65da5 100644
--- a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
+++ b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
@@ -13,15 +13,44 @@ namespace wasm {
namespace liftoff {
+// Liftoff Frames.
+//
+// slot Frame
+// +--------------------+---------------------------
+// n+4 | optional padding slot to keep the stack 16 byte aligned.
+// n+3 | parameter n |
+// ... | ... |
+// 4 | parameter 1 | or parameter 2
+// 3 | parameter 0 | or parameter 1
+// 2 | (result address) | or parameter 0
+// -----+--------------------+---------------------------
+// 1 | return addr (ra) |
+// 0 | previous frame (fp)|
+// -----+--------------------+ <-- frame ptr (fp)
+// -1 | 0xa: WASM_COMPILED |
+// -2 | instance |
+// -----+--------------------+---------------------------
+// -3 | slot 0 | ^
+// -4 | slot 1 | |
+// | | Frame slots
+// | | |
+// | | v
+// | optional padding slot to keep the stack 16 byte aligned.
+// -----+--------------------+ <-- stack ptr (sp)
+//
+
// fp-8 holds the stack marker, fp-16 is the instance parameter, first stack
// slot is located at fp-24.
constexpr int32_t kConstantStackSpace = 16;
constexpr int32_t kFirstStackSlotOffset =
kConstantStackSpace + LiftoffAssembler::kStackSlotSize;
+inline int GetStackSlotOffset(uint32_t index) {
+ return kFirstStackSlotOffset + index * LiftoffAssembler::kStackSlotSize;
+}
+
inline MemOperand GetStackSlot(uint32_t index) {
- int32_t offset = index * LiftoffAssembler::kStackSlotSize;
- return MemOperand(fp, -kFirstStackSlotOffset - offset);
+ return MemOperand(fp, -GetStackSlotOffset(index));
}
inline MemOperand GetInstanceOperand() { return MemOperand(fp, -16); }
@@ -498,6 +527,35 @@ void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
UNREACHABLE();
}
+void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
+ DCHECK_LT(0, count);
+ uint32_t last_stack_slot = index + count - 1;
+ RecordUsedSpillSlot(last_stack_slot);
+
+ if (count <= 12) {
+ // Special straight-line code for up to 12 slots. Generates one
+ // instruction per slot (<= 12 instructions total).
+ for (uint32_t offset = 0; offset < count; ++offset) {
+ Sd(zero_reg, liftoff::GetStackSlot(index + offset));
+ }
+ } else {
+ // General case for bigger counts (12 instructions).
+ // Use a0 for start address (inclusive), a1 for end address (exclusive).
+ Push(a1, a0);
+ Daddu(a0, fp, Operand(-liftoff::GetStackSlotOffset(last_stack_slot)));
+ Daddu(a1, fp,
+ Operand(-liftoff::GetStackSlotOffset(index) + kStackSlotSize));
+
+ Label loop;
+ bind(&loop);
+ Sd(zero_reg, MemOperand(a0, kSystemPointerSize));
+ daddiu(a0, a0, kSystemPointerSize);
+ BranchShort(&loop, ne, a0, Operand(a1));
+
+ Pop(a1, a0);
+ }
+}
+
void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
TurboAssembler::Mul(dst, lhs, rhs);
}
diff --git a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
index a690a1c090..3b436a96d5 100644
--- a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
+++ b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
@@ -12,6 +12,49 @@ namespace v8 {
namespace internal {
namespace wasm {
+namespace liftoff {
+
+// half
+// slot Frame
+// -----+--------------------+---------------------------
+// n+3 | parameter n |
+// ... | ... |
+// 4 | parameter 1 | or parameter 2
+// 3 | parameter 0 | or parameter 1
+// 2 | (result address) | or parameter 0
+// -----+--------------------+---------------------------
+// 1 | return addr (lr) |
+// 0 | previous frame (fp)|
+// -----+--------------------+ <-- frame ptr (fp)
+// -1 | 0xa: WASM_COMPILED |
+// -2 | instance |
+// -----+--------------------+---------------------------
+// -3 | slot 0 (high) | ^
+// -4 | slot 0 (low) | |
+// -5 | slot 1 (high) | Frame slots
+// -6 | slot 1 (low) | |
+// | | v
+// -----+--------------------+ <-- stack ptr (sp)
+//
+
+constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize;
+constexpr int32_t kFirstStackSlotOffset =
+ kInstanceOffset + 2 * kSystemPointerSize;
+
+inline int GetStackSlotOffset(uint32_t index) {
+ return kFirstStackSlotOffset + index * LiftoffAssembler::kStackSlotSize;
+}
+
+inline MemOperand GetHalfStackSlot(uint32_t index, RegPairHalf half) {
+ int32_t half_offset =
+ half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
+ int32_t offset = kFirstStackSlotOffset +
+ index * LiftoffAssembler::kStackSlotSize - half_offset;
+ return MemOperand(fp, -offset);
+}
+
+} // namespace liftoff
+
int LiftoffAssembler::PrepareStackFrame() {
bailout(kUnsupportedArchitecture, "PrepareStackFrame");
return 0;
@@ -108,6 +151,45 @@ void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
bailout(kUnsupportedArchitecture, "FillI64Half");
}
+void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
+ DCHECK_LT(0, count);
+ uint32_t last_stack_slot = index + count - 1;
+ RecordUsedSpillSlot(last_stack_slot);
+
+ // We need a zero reg. Always use r0 for that, and push it before to restore
+ // its value afterwards.
+ push(r0);
+ mov(r0, Operand(0));
+
+ if (count <= 5) {
+ // Special straight-line code for up to five slots. Generates two
+ // instructions per slot.
+ for (uint32_t offset = 0; offset < count; ++offset) {
+ StoreP(r0, liftoff::GetHalfStackSlot(index + offset, kLowWord));
+ StoreP(r0, liftoff::GetHalfStackSlot(index + offset, kHighWord));
+ }
+ } else {
+ // General case for bigger counts (9 instructions).
+ // Use r4 for start address (inclusive), r5 for end address (exclusive).
+ push(r4);
+ push(r5);
+ subi(r4, fp, Operand(liftoff::GetStackSlotOffset(last_stack_slot)));
+ subi(r5, fp, Operand(liftoff::GetStackSlotOffset(index) + kStackSlotSize));
+
+ Label loop;
+ bind(&loop);
+ StoreP(r0, MemOperand(r0));
+ addi(r0, r0, Operand(kSystemPointerSize));
+ cmp(r4, r5);
+ bne(&loop);
+
+ pop(r4);
+ pop(r5);
+ }
+
+ pop(r0);
+}
+
#define UNIMPLEMENTED_I32_BINOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
Register rhs) { \
diff --git a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
index d17c7dada1..36267560dd 100644
--- a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
+++ b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
@@ -12,6 +12,48 @@ namespace v8 {
namespace internal {
namespace wasm {
+namespace liftoff {
+
+// half
+// slot Frame
+// -----+--------------------+---------------------------
+// n+3 | parameter n |
+// ... | ... |
+// 4 | parameter 1 | or parameter 2
+// 3 | parameter 0 | or parameter 1
+// 2 | (result address) | or parameter 0
+// -----+--------------------+---------------------------
+// 1 | return addr (lr) |
+// 0 | previous frame (fp)|
+// -----+--------------------+ <-- frame ptr (fp)
+// -1 | 0xa: WASM_COMPILED |
+// -2 | instance |
+// -----+--------------------+---------------------------
+// -3 | slot 0 (high) | ^
+// -4 | slot 0 (low) | |
+// -5 | slot 1 (high) | Frame slots
+// -6 | slot 1 (low) | |
+// | | v
+// -----+--------------------+ <-- stack ptr (sp)
+//
+constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize;
+constexpr int32_t kFirstStackSlotOffset =
+ kInstanceOffset + 2 * kSystemPointerSize;
+
+inline int GetStackSlotOffset(uint32_t index) {
+ return kFirstStackSlotOffset + index * LiftoffAssembler::kStackSlotSize;
+}
+
+inline MemOperand GetHalfStackSlot(uint32_t index, RegPairHalf half) {
+ int32_t half_offset =
+ half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
+ int32_t offset = kFirstStackSlotOffset +
+ index * LiftoffAssembler::kStackSlotSize - half_offset;
+ return MemOperand(fp, -offset);
+}
+
+} // namespace liftoff
+
int LiftoffAssembler::PrepareStackFrame() {
bailout(kUnsupportedArchitecture, "PrepareStackFrame");
return 0;
@@ -108,6 +150,45 @@ void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
bailout(kUnsupportedArchitecture, "FillI64Half");
}
+void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
+ DCHECK_LT(0, count);
+ uint32_t last_stack_slot = index + count - 1;
+ RecordUsedSpillSlot(last_stack_slot);
+
+ // We need a zero reg. Always use r0 for that, and push it before to restore
+ // its value afterwards.
+ push(r0);
+ mov(r0, Operand(0));
+
+ if (count <= 5) {
+ // Special straight-line code for up to five slots. Generates two
+ // instructions per slot.
+ for (uint32_t offset = 0; offset < count; ++offset) {
+ StoreP(r0, liftoff::GetHalfStackSlot(index + offset, kLowWord));
+ StoreP(r0, liftoff::GetHalfStackSlot(index + offset, kHighWord));
+ }
+ } else {
+ // General case for bigger counts (9 instructions).
+ // Use r3 for start address (inclusive), r4 for end address (exclusive).
+ push(r3);
+ push(r4);
+ SubP(r3, fp, Operand(liftoff::GetStackSlotOffset(last_stack_slot)));
+ SubP(r4, fp, Operand(liftoff::GetStackSlotOffset(index) + kStackSlotSize));
+
+ Label loop;
+ bind(&loop);
+ StoreP(r0, MemOperand(r0));
+ la(r0, MemOperand(r0, kSystemPointerSize));
+ CmpLogicalP(r3, r4);
+ bne(&loop);
+
+ pop(r4);
+ pop(r3);
+ }
+
+ pop(r0);
+}
+
#define UNIMPLEMENTED_I32_BINOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
Register rhs) { \
diff --git a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
index 43637985d0..f4185de070 100644
--- a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -47,7 +47,7 @@ inline Operand GetStackSlot(uint32_t index) {
return Operand(rbp, -kFirstStackSlotOffset - offset);
}
-// TODO(clemensh): Make this a constexpr variable once Operand is constexpr.
+// TODO(clemensb): Make this a constexpr variable once Operand is constexpr.
inline Operand GetInstanceOperand() { return Operand(rbp, -16); }
inline Operand GetMemOp(LiftoffAssembler* assm, Register addr, Register offset,
@@ -452,6 +452,35 @@ void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
UNREACHABLE();
}
+void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
+ DCHECK_LT(0, count);
+ uint32_t last_stack_slot = index + count - 1;
+ RecordUsedSpillSlot(last_stack_slot);
+
+ if (count <= 3) {
+ // Special straight-line code for up to three slots
+ // (7-10 bytes per slot: REX C7 <1-4 bytes op> <4 bytes imm>).
+ for (uint32_t offset = 0; offset < count; ++offset) {
+ movq(liftoff::GetStackSlot(index + offset), Immediate(0));
+ }
+ } else {
+ // General case for bigger counts.
+ // This sequence takes 20-23 bytes (3 for pushes, 4-7 for lea, 2 for xor, 5
+ // for mov, 3 for repstosq, 3 for pops).
+ // From intel manual: repstosq fills RCX quadwords at [RDI] with RAX.
+ pushq(rax);
+ pushq(rcx);
+ pushq(rdi);
+ leaq(rdi, liftoff::GetStackSlot(last_stack_slot));
+ xorl(rax, rax);
+ movl(rcx, Immediate(count));
+ repstosq();
+ popq(rdi);
+ popq(rcx);
+ popq(rax);
+ }
+}
+
void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) {
if (lhs != dst) {
leal(dst, Operand(lhs, rhs, times_1, 0));
diff --git a/deps/v8/src/wasm/c-api.cc b/deps/v8/src/wasm/c-api.cc
index e812dd7994..31b68e9cdc 100644
--- a/deps/v8/src/wasm/c-api.cc
+++ b/deps/v8/src/wasm/c-api.cc
@@ -1692,17 +1692,17 @@ auto Global::type() const -> own<GlobalType> {
auto Global::get() const -> Val {
i::Handle<i::WasmGlobalObject> v8_global = impl(this)->v8_object();
- switch (type()->content()->kind()) {
- case I32:
+ switch (v8_global->type()) {
+ case i::wasm::kWasmI32:
return Val(v8_global->GetI32());
- case I64:
+ case i::wasm::kWasmI64:
return Val(v8_global->GetI64());
- case F32:
+ case i::wasm::kWasmF32:
return Val(v8_global->GetF32());
- case F64:
+ case i::wasm::kWasmF64:
return Val(v8_global->GetF64());
- case ANYREF:
- case FUNCREF: {
+ case i::wasm::kWasmAnyRef:
+ case i::wasm::kWasmFuncRef: {
StoreImpl* store = impl(this)->store();
i::HandleScope scope(store->i_isolate());
return Val(V8RefValueToWasm(store, v8_global->GetRef()));
@@ -1883,9 +1883,10 @@ auto Memory::make(Store* store_abs, const MemoryType* type) -> own<Memory> {
if (maximum < minimum) return nullptr;
if (maximum > i::wasm::kSpecMaxWasmMemoryPages) return nullptr;
}
- bool is_shared = false; // TODO(wasm+): Support shared memory.
+ // TODO(wasm+): Support shared memory.
+ i::SharedFlag shared = i::SharedFlag::kNotShared;
i::Handle<i::WasmMemoryObject> memory_obj;
- if (!i::WasmMemoryObject::New(isolate, minimum, maximum, is_shared)
+ if (!i::WasmMemoryObject::New(isolate, minimum, maximum, shared)
.ToHandle(&memory_obj)) {
return own<Memory>();
}
diff --git a/deps/v8/src/wasm/decoder.h b/deps/v8/src/wasm/decoder.h
index abb7b8ee86..71c06467f1 100644
--- a/deps/v8/src/wasm/decoder.h
+++ b/deps/v8/src/wasm/decoder.h
@@ -267,6 +267,12 @@ class Decoder {
}
const byte* end() const { return end_; }
+ // Check if the byte at {offset} from the current pc equals {expected}.
+ bool lookahead(int offset, byte expected) {
+ DCHECK_LE(pc_, end_);
+ return end_ - pc_ > offset && pc_[offset] == expected;
+ }
+
protected:
const byte* start_;
const byte* pc_;
diff --git a/deps/v8/src/wasm/function-body-decoder-impl.h b/deps/v8/src/wasm/function-body-decoder-impl.h
index 582934e19f..1f29571e40 100644
--- a/deps/v8/src/wasm/function-body-decoder-impl.h
+++ b/deps/v8/src/wasm/function-body-decoder-impl.h
@@ -714,12 +714,12 @@ struct ControlBase {
F(RefFunc, uint32_t function_index, Value* result) \
F(Drop, const Value& value) \
F(DoReturn, Vector<Value> values) \
- F(GetLocal, Value* result, const LocalIndexImmediate<validate>& imm) \
- F(SetLocal, const Value& value, const LocalIndexImmediate<validate>& imm) \
- F(TeeLocal, const Value& value, Value* result, \
+ F(LocalGet, Value* result, const LocalIndexImmediate<validate>& imm) \
+ F(LocalSet, const Value& value, const LocalIndexImmediate<validate>& imm) \
+ F(LocalTee, const Value& value, Value* result, \
const LocalIndexImmediate<validate>& imm) \
- F(GetGlobal, Value* result, const GlobalIndexImmediate<validate>& imm) \
- F(SetGlobal, const Value& value, const GlobalIndexImmediate<validate>& imm) \
+ F(GlobalGet, Value* result, const GlobalIndexImmediate<validate>& imm) \
+ F(GlobalSet, const Value& value, const GlobalIndexImmediate<validate>& imm) \
F(TableGet, const Value& index, Value* result, \
const TableIndexImmediate<validate>& imm) \
F(TableSet, const Value& index, const Value& value, \
@@ -910,8 +910,8 @@ class WasmDecoder : public Decoder {
length = OpcodeLength(decoder, pc);
depth++;
break;
- case kExprSetLocal: // fallthru
- case kExprTeeLocal: {
+ case kExprLocalSet: // fallthru
+ case kExprLocalTee: {
LocalIndexImmediate<validate> imm(decoder, pc);
if (assigned->length() > 0 &&
imm.index < static_cast<uint32_t>(assigned->length())) {
@@ -1045,8 +1045,8 @@ class WasmDecoder : public Decoder {
bool Validate(const byte* pc, BranchTableImmediate<validate>& imm,
size_t block_depth) {
- if (!VALIDATE(imm.table_count < kV8MaxWasmFunctionSize)) {
- errorf(pc + 1, "invalid table count (> max function size): %u",
+ if (!VALIDATE(imm.table_count <= kV8MaxWasmFunctionBrTableSize)) {
+ errorf(pc + 1, "invalid table count (> max br_table size): %u",
imm.table_count);
return false;
}
@@ -1069,11 +1069,13 @@ class WasmDecoder : public Decoder {
case kExprI32x4ReplaceLane:
num_lanes = 4;
break;
- case kExprI16x8ExtractLane:
+ case kExprI16x8ExtractLaneS:
+ case kExprI16x8ExtractLaneU:
case kExprI16x8ReplaceLane:
num_lanes = 8;
break;
- case kExprI8x16ExtractLane:
+ case kExprI8x16ExtractLaneS:
+ case kExprI8x16ExtractLaneU:
case kExprI8x16ReplaceLane:
num_lanes = 16;
break;
@@ -1252,8 +1254,8 @@ class WasmDecoder : public Decoder {
BranchDepthImmediate<validate> imm(decoder, pc);
return 1 + imm.length;
}
- case kExprGetGlobal:
- case kExprSetGlobal: {
+ case kExprGlobalGet:
+ case kExprGlobalSet: {
GlobalIndexImmediate<validate> imm(decoder, pc);
return 1 + imm.length;
}
@@ -1291,9 +1293,9 @@ class WasmDecoder : public Decoder {
return 1 + imm.length;
}
- case kExprSetLocal:
- case kExprTeeLocal:
- case kExprGetLocal: {
+ case kExprLocalGet:
+ case kExprLocalSet:
+ case kExprLocalTee: {
LocalIndexImmediate<validate> imm(decoder, pc);
return 1 + imm.length;
}
@@ -1458,19 +1460,19 @@ class WasmDecoder : public Decoder {
return {2, 0};
FOREACH_LOAD_MEM_OPCODE(DECLARE_OPCODE_CASE)
case kExprTableGet:
- case kExprTeeLocal:
+ case kExprLocalTee:
case kExprMemoryGrow:
return {1, 1};
- case kExprSetLocal:
- case kExprSetGlobal:
+ case kExprLocalSet:
+ case kExprGlobalSet:
case kExprDrop:
case kExprBrIf:
case kExprBrTable:
case kExprIf:
case kExprRethrow:
return {1, 0};
- case kExprGetLocal:
- case kExprGetGlobal:
+ case kExprLocalGet:
+ case kExprGlobalGet:
case kExprI32Const:
case kExprI64Const:
case kExprF32Const:
@@ -2125,28 +2127,28 @@ class WasmFullDecoder : public WasmDecoder<validate> {
len = 1 + imm.length;
break;
}
- case kExprGetLocal: {
+ case kExprLocalGet: {
LocalIndexImmediate<validate> imm(this, this->pc_);
if (!this->Validate(this->pc_, imm)) break;
auto* value = Push(imm.type);
- CALL_INTERFACE_IF_REACHABLE(GetLocal, value, imm);
+ CALL_INTERFACE_IF_REACHABLE(LocalGet, value, imm);
len = 1 + imm.length;
break;
}
- case kExprSetLocal: {
+ case kExprLocalSet: {
LocalIndexImmediate<validate> imm(this, this->pc_);
if (!this->Validate(this->pc_, imm)) break;
auto value = Pop(0, local_type_vec_[imm.index]);
- CALL_INTERFACE_IF_REACHABLE(SetLocal, value, imm);
+ CALL_INTERFACE_IF_REACHABLE(LocalSet, value, imm);
len = 1 + imm.length;
break;
}
- case kExprTeeLocal: {
+ case kExprLocalTee: {
LocalIndexImmediate<validate> imm(this, this->pc_);
if (!this->Validate(this->pc_, imm)) break;
auto value = Pop(0, local_type_vec_[imm.index]);
auto* result = Push(value.type);
- CALL_INTERFACE_IF_REACHABLE(TeeLocal, value, result, imm);
+ CALL_INTERFACE_IF_REACHABLE(LocalTee, value, result, imm);
len = 1 + imm.length;
break;
}
@@ -2155,15 +2157,15 @@ class WasmFullDecoder : public WasmDecoder<validate> {
CALL_INTERFACE_IF_REACHABLE(Drop, value);
break;
}
- case kExprGetGlobal: {
+ case kExprGlobalGet: {
GlobalIndexImmediate<validate> imm(this, this->pc_);
len = 1 + imm.length;
if (!this->Validate(this->pc_, imm)) break;
auto* result = Push(imm.type);
- CALL_INTERFACE_IF_REACHABLE(GetGlobal, result, imm);
+ CALL_INTERFACE_IF_REACHABLE(GlobalGet, result, imm);
break;
}
- case kExprSetGlobal: {
+ case kExprGlobalSet: {
GlobalIndexImmediate<validate> imm(this, this->pc_);
len = 1 + imm.length;
if (!this->Validate(this->pc_, imm)) break;
@@ -2173,7 +2175,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
break;
}
auto value = Pop(0, imm.type);
- CALL_INTERFACE_IF_REACHABLE(SetGlobal, value, imm);
+ CALL_INTERFACE_IF_REACHABLE(GlobalSet, value, imm);
break;
}
case kExprTableGet: {
@@ -2447,15 +2449,15 @@ class WasmFullDecoder : public WasmDecoder<validate> {
TRACE_PART("[%d]", imm.value);
break;
}
- case kExprGetLocal:
- case kExprSetLocal:
- case kExprTeeLocal: {
+ case kExprLocalGet:
+ case kExprLocalSet:
+ case kExprLocalTee: {
LocalIndexImmediate<Decoder::kNoValidate> imm(this, val.pc);
TRACE_PART("[%u]", imm.index);
break;
}
- case kExprGetGlobal:
- case kExprSetGlobal: {
+ case kExprGlobalGet:
+ case kExprGlobalSet: {
GlobalIndexImmediate<Decoder::kNoValidate> imm(this, val.pc);
TRACE_PART("[%u]", imm.index);
break;
@@ -2700,8 +2702,10 @@ class WasmFullDecoder : public WasmDecoder<validate> {
break;
}
case kExprI32x4ExtractLane:
- case kExprI16x8ExtractLane:
- case kExprI8x16ExtractLane: {
+ case kExprI16x8ExtractLaneS:
+ case kExprI16x8ExtractLaneU:
+ case kExprI8x16ExtractLaneS:
+ case kExprI8x16ExtractLaneU: {
len = SimdExtractLane(opcode, kWasmI32);
break;
}
diff --git a/deps/v8/src/wasm/function-compiler.cc b/deps/v8/src/wasm/function-compiler.cc
index 4940134d53..e89c31d729 100644
--- a/deps/v8/src/wasm/function-compiler.cc
+++ b/deps/v8/src/wasm/function-compiler.cc
@@ -49,7 +49,7 @@ class WasmInstructionBufferImpl {
holder_->old_buffer_ = std::move(holder_->buffer_);
holder_->buffer_ = OwnedVector<uint8_t>::New(new_size);
- return base::make_unique<View>(holder_->buffer_.as_vector(), holder_);
+ return std::make_unique<View>(holder_->buffer_.as_vector(), holder_);
}
private:
@@ -59,7 +59,7 @@ class WasmInstructionBufferImpl {
std::unique_ptr<AssemblerBuffer> CreateView() {
DCHECK_NOT_NULL(buffer_);
- return base::make_unique<View>(buffer_.as_vector(), this);
+ return std::make_unique<View>(buffer_.as_vector(), this);
}
std::unique_ptr<uint8_t[]> ReleaseBuffer() {
@@ -170,7 +170,7 @@ WasmCompilationResult WasmCompilationUnit::ExecuteFunctionCompilation(
TimedHistogramScope wasm_compile_function_time_scope(timed_histogram);
if (FLAG_trace_wasm_compiler) {
- PrintF("Compiling wasm function %d with %s\n\n", func_index_,
+ PrintF("Compiling wasm function %d with %s\n", func_index_,
ExecutionTierToString(tier_));
}
diff --git a/deps/v8/src/wasm/function-compiler.h b/deps/v8/src/wasm/function-compiler.h
index 2da028a047..bdebfebe14 100644
--- a/deps/v8/src/wasm/function-compiler.h
+++ b/deps/v8/src/wasm/function-compiler.h
@@ -5,6 +5,8 @@
#ifndef V8_WASM_FUNCTION_COMPILER_H_
#define V8_WASM_FUNCTION_COMPILER_H_
+#include <memory>
+
#include "src/codegen/code-desc.h"
#include "src/trap-handler/trap-handler.h"
#include "src/wasm/compilation-environment.h"
diff --git a/deps/v8/src/wasm/graph-builder-interface.cc b/deps/v8/src/wasm/graph-builder-interface.cc
index 923e1154ea..b08aa9215e 100644
--- a/deps/v8/src/wasm/graph-builder-interface.cc
+++ b/deps/v8/src/wasm/graph-builder-interface.cc
@@ -166,7 +166,8 @@ class WasmGraphBuildingInterface {
// Wrap input merge into phis.
for (uint32_t i = 0; i < block->start_merge.arity; ++i) {
Value& val = block->start_merge[i];
- val.node = builder_->Phi(val.type, 1, &val.node, block->end_env->control);
+ TFNode* inputs[] = {val.node, block->end_env->control};
+ val.node = builder_->Phi(val.type, 1, inputs);
}
}
@@ -212,7 +213,10 @@ class WasmGraphBuildingInterface {
if (block->is_onearmed_if()) {
// Merge the else branch into the end merge.
SetEnv(block->false_env);
- MergeValuesInto(decoder, block, &block->end_merge);
+ DCHECK_EQ(block->start_merge.arity, block->end_merge.arity);
+ Value* values =
+ block->start_merge.arity > 0 ? &block->start_merge[0] : nullptr;
+ MergeValuesInto(decoder, block, &block->end_merge, values);
}
// Now continue with the merged environment.
SetEnv(block->end_env);
@@ -258,37 +262,38 @@ class WasmGraphBuildingInterface {
void Drop(FullDecoder* decoder, const Value& value) {}
void DoReturn(FullDecoder* decoder, Vector<Value> values) {
- Vector<TFNode*> nodes = GetNodes(values);
- BUILD(Return, nodes);
+ base::SmallVector<TFNode*, 8> nodes(values.size());
+ GetNodes(nodes.begin(), values);
+ BUILD(Return, VectorOf(nodes));
}
- void GetLocal(FullDecoder* decoder, Value* result,
+ void LocalGet(FullDecoder* decoder, Value* result,
const LocalIndexImmediate<validate>& imm) {
if (!ssa_env_->locals) return; // unreachable
result->node = ssa_env_->locals[imm.index];
}
- void SetLocal(FullDecoder* decoder, const Value& value,
+ void LocalSet(FullDecoder* decoder, const Value& value,
const LocalIndexImmediate<validate>& imm) {
if (!ssa_env_->locals) return; // unreachable
ssa_env_->locals[imm.index] = value.node;
}
- void TeeLocal(FullDecoder* decoder, const Value& value, Value* result,
+ void LocalTee(FullDecoder* decoder, const Value& value, Value* result,
const LocalIndexImmediate<validate>& imm) {
result->node = value.node;
if (!ssa_env_->locals) return; // unreachable
ssa_env_->locals[imm.index] = value.node;
}
- void GetGlobal(FullDecoder* decoder, Value* result,
+ void GlobalGet(FullDecoder* decoder, Value* result,
const GlobalIndexImmediate<validate>& imm) {
- result->node = BUILD(GetGlobal, imm.index);
+ result->node = BUILD(GlobalGet, imm.index);
}
- void SetGlobal(FullDecoder* decoder, const Value& value,
+ void GlobalSet(FullDecoder* decoder, const Value& value,
const GlobalIndexImmediate<validate>& imm) {
- BUILD(SetGlobal, imm.index, value.node);
+ BUILD(GlobalSet, imm.index, value.node);
}
void TableGet(FullDecoder* decoder, const Value& index, Value* result,
@@ -310,8 +315,8 @@ class WasmGraphBuildingInterface {
TFNode* controls[2];
BUILD(BranchNoHint, cond.node, &controls[0], &controls[1]);
TFNode* merge = BUILD(Merge, 2, controls);
- TFNode* vals[2] = {tval.node, fval.node};
- TFNode* phi = BUILD(Phi, tval.type, 2, vals, merge);
+ TFNode* inputs[] = {tval.node, fval.node, merge};
+ TFNode* phi = BUILD(Phi, tval.type, 2, inputs);
result->node = phi;
ssa_env_->control = merge;
}
@@ -319,10 +324,11 @@ class WasmGraphBuildingInterface {
void BrOrRet(FullDecoder* decoder, uint32_t depth) {
if (depth == decoder->control_depth() - 1) {
uint32_t ret_count = static_cast<uint32_t>(decoder->sig_->return_count());
- Vector<TFNode*> values =
- ret_count == 0 ? Vector<TFNode*>{}
- : GetNodes(decoder->stack_value(ret_count), ret_count);
- BUILD(Return, values);
+ base::SmallVector<TFNode*, 8> values(ret_count);
+ if (ret_count > 0) {
+ GetNodes(values.begin(), decoder->stack_value(ret_count), ret_count);
+ }
+ BUILD(Return, VectorOf(values));
} else {
Br(decoder, decoder->control_at(depth));
}
@@ -431,7 +437,8 @@ class WasmGraphBuildingInterface {
void SimdOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
Value* result) {
- Vector<TFNode*> inputs = GetNodes(args);
+ base::SmallVector<TFNode*, 8> inputs(args.size());
+ GetNodes(inputs.begin(), args);
TFNode* node = BUILD(SimdOp, opcode, inputs.begin());
if (result) result->node = node;
}
@@ -439,7 +446,8 @@ class WasmGraphBuildingInterface {
void SimdLaneOp(FullDecoder* decoder, WasmOpcode opcode,
const SimdLaneImmediate<validate> imm, Vector<Value> inputs,
Value* result) {
- Vector<TFNode*> nodes = GetNodes(inputs);
+ base::SmallVector<TFNode*, 8> nodes(inputs.size());
+ GetNodes(nodes.begin(), inputs);
result->node = BUILD(SimdLaneOp, opcode, imm.lane, nodes.begin());
}
@@ -486,12 +494,11 @@ class WasmGraphBuildingInterface {
// If the tags match we extract the values from the exception object and
// push them onto the operand stack using the passed {values} vector.
SetEnv(if_match_env);
- // TODO(mstarzinger): Can't use BUILD() here, GetExceptionValues() returns
- // TFNode** rather than TFNode*. Fix to add landing pads.
- Vector<TFNode*> caught_values =
- builder_->GetExceptionValues(exception.node, imm.exception);
+ base::SmallVector<TFNode*, 8> caught_values(values.size());
+ Vector<TFNode*> caught_vector = VectorOf(caught_values);
+ BUILD(GetExceptionValues, exception.node, imm.exception, caught_vector);
for (size_t i = 0, e = values.size(); i < e; ++i) {
- values[i].node = caught_values[i];
+ values[i].node = caught_vector[i];
}
BrOrRet(decoder, depth);
@@ -519,7 +526,8 @@ class WasmGraphBuildingInterface {
void AtomicOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
const MemoryAccessImmediate<validate>& imm, Value* result) {
- Vector<TFNode*> inputs = GetNodes(args);
+ base::SmallVector<TFNode*, 8> inputs(args.size());
+ GetNodes(inputs.begin(), args);
TFNode* node = BUILD(AtomicOp, opcode, inputs.begin(), imm.alignment,
imm.offset, decoder->position());
if (result) result->node = node;
@@ -591,16 +599,14 @@ class WasmGraphBuildingInterface {
->try_info;
}
- Vector<TFNode*> GetNodes(Value* values, size_t count) {
- Vector<TFNode*> nodes = builder_->Buffer(count);
+ void GetNodes(TFNode** nodes, Value* values, size_t count) {
for (size_t i = 0; i < count; ++i) {
nodes[i] = values[i].node;
}
- return nodes;
}
- Vector<TFNode*> GetNodes(Vector<Value> values) {
- return GetNodes(values.begin(), values.size());
+ void GetNodes(TFNode** nodes, Vector<Value> values) {
+ GetNodes(nodes, values.begin(), values.size());
}
void SetEnv(SsaEnv* env) {
@@ -656,10 +662,10 @@ class WasmGraphBuildingInterface {
SsaEnv* exception_env = Split(decoder, success_env);
exception_env->control = if_exception;
+ exception_env->effect = if_exception;
TryInfo* try_info = current_try_info(decoder);
Goto(decoder, exception_env, try_info->catch_env);
- TFNode* exception = try_info->exception;
- if (exception == nullptr) {
+ if (try_info->exception == nullptr) {
DCHECK_EQ(SsaEnv::kReached, try_info->catch_env->state);
try_info->exception = if_exception;
} else {
@@ -694,7 +700,8 @@ class WasmGraphBuildingInterface {
}
}
- void MergeValuesInto(FullDecoder* decoder, Control* c, Merge<Value>* merge) {
+ void MergeValuesInto(FullDecoder* decoder, Control* c, Merge<Value>* merge,
+ Value* values) {
DCHECK(merge == &c->start_merge || merge == &c->end_merge);
SsaEnv* target = c->end_env;
@@ -703,13 +710,8 @@ class WasmGraphBuildingInterface {
if (merge->arity == 0) return;
- uint32_t avail =
- decoder->stack_size() - decoder->control_at(0)->stack_depth;
- DCHECK_GE(avail, merge->arity);
- uint32_t start = avail >= merge->arity ? 0 : merge->arity - avail;
- Value* stack_values = decoder->stack_value(merge->arity);
- for (uint32_t i = start; i < merge->arity; ++i) {
- Value& val = stack_values[i];
+ for (uint32_t i = 0; i < merge->arity; ++i) {
+ Value& val = values[i];
Value& old = (*merge)[i];
DCHECK_NOT_NULL(val.node);
DCHECK(val.type == kWasmBottom ||
@@ -722,6 +724,17 @@ class WasmGraphBuildingInterface {
}
}
+ void MergeValuesInto(FullDecoder* decoder, Control* c, Merge<Value>* merge) {
+#ifdef DEBUG
+ uint32_t avail =
+ decoder->stack_size() - decoder->control_at(0)->stack_depth;
+ DCHECK_GE(avail, merge->arity);
+#endif
+ Value* stack_values =
+ merge->arity > 0 ? decoder->stack_value(merge->arity) : nullptr;
+ MergeValuesInto(decoder, c, merge, stack_values);
+ }
+
void Goto(FullDecoder* decoder, SsaEnv* from, SsaEnv* to) {
DCHECK_NOT_NULL(to);
switch (to->state) {
@@ -741,17 +754,16 @@ class WasmGraphBuildingInterface {
to->control = merge;
// Merge effects.
if (from->effect != to->effect) {
- TFNode* effects[] = {to->effect, from->effect, merge};
- to->effect = builder_->EffectPhi(2, effects, merge);
+ TFNode* inputs[] = {to->effect, from->effect, merge};
+ to->effect = builder_->EffectPhi(2, inputs);
}
// Merge SSA values.
for (int i = decoder->num_locals() - 1; i >= 0; i--) {
TFNode* a = to->locals[i];
TFNode* b = from->locals[i];
if (a != b) {
- TFNode* vals[] = {a, b};
- to->locals[i] =
- builder_->Phi(decoder->GetLocalType(i), 2, vals, merge);
+ TFNode* inputs[] = {a, b, merge};
+ to->locals[i] = builder_->Phi(decoder->GetLocalType(i), 2, inputs);
}
}
// Start a new merge from the instance cache.
@@ -787,7 +799,8 @@ class WasmGraphBuildingInterface {
env->state = SsaEnv::kMerged;
env->control = builder_->Loop(env->control);
- env->effect = builder_->EffectPhi(1, &env->effect, env->control);
+ TFNode* effect_inputs[] = {env->effect, env->control};
+ env->effect = builder_->EffectPhi(1, effect_inputs);
builder_->TerminateLoop(env->effect, env->control);
// The '+ 1' here is to be able to set the instance cache as assigned.
BitVector* assigned = WasmDecoder<validate>::AnalyzeLoopAssignment(
@@ -798,8 +811,8 @@ class WasmGraphBuildingInterface {
int instance_cache_index = decoder->total_locals();
for (int i = decoder->num_locals() - 1; i >= 0; i--) {
if (!assigned->Contains(i)) continue;
- env->locals[i] = builder_->Phi(decoder->GetLocalType(i), 1,
- &env->locals[i], env->control);
+ TFNode* inputs[] = {env->locals[i], env->control};
+ env->locals[i] = builder_->Phi(decoder->GetLocalType(i), 1, inputs);
}
// Introduce phis for instance cache pointers if necessary.
if (assigned->Contains(instance_cache_index)) {
@@ -815,8 +828,8 @@ class WasmGraphBuildingInterface {
// Conservatively introduce phis for all local variables.
for (int i = decoder->num_locals() - 1; i >= 0; i--) {
- env->locals[i] = builder_->Phi(decoder->GetLocalType(i), 1,
- &env->locals[i], env->control);
+ TFNode* inputs[] = {env->locals[i], env->control};
+ env->locals[i] = builder_->Phi(decoder->GetLocalType(i), 1, inputs);
}
// Conservatively introduce phis for instance cache.
@@ -877,22 +890,22 @@ class WasmGraphBuildingInterface {
void DoCall(FullDecoder* decoder, uint32_t table_index, TFNode* index_node,
FunctionSig* sig, uint32_t sig_index, const Value args[],
Value returns[]) {
- int param_count = static_cast<int>(sig->parameter_count());
- Vector<TFNode*> arg_nodes = builder_->Buffer(param_count + 1);
- TFNode** return_nodes = nullptr;
+ size_t param_count = sig->parameter_count();
+ size_t return_count = sig->return_count();
+ base::SmallVector<TFNode*, 16> arg_nodes(param_count + 1);
+ base::SmallVector<TFNode*, 1> return_nodes(return_count);
arg_nodes[0] = index_node;
- for (int i = 0; i < param_count; ++i) {
+ for (size_t i = 0; i < param_count; ++i) {
arg_nodes[i + 1] = args[i].node;
}
if (index_node) {
- BUILD(CallIndirect, table_index, sig_index, arg_nodes.begin(),
- &return_nodes, decoder->position());
+ BUILD(CallIndirect, table_index, sig_index, VectorOf(arg_nodes),
+ VectorOf(return_nodes), decoder->position());
} else {
- BUILD(CallDirect, sig_index, arg_nodes.begin(), &return_nodes,
+ BUILD(CallDirect, sig_index, VectorOf(arg_nodes), VectorOf(return_nodes),
decoder->position());
}
- int return_count = static_cast<int>(sig->return_count());
- for (int i = 0; i < return_count; ++i) {
+ for (size_t i = 0; i < return_count; ++i) {
returns[i].node = return_nodes[i];
}
// The invoked function could have used grow_memory, so we need to
@@ -903,17 +916,17 @@ class WasmGraphBuildingInterface {
void DoReturnCall(FullDecoder* decoder, uint32_t table_index,
TFNode* index_node, FunctionSig* sig, uint32_t sig_index,
const Value args[]) {
- int arg_count = static_cast<int>(sig->parameter_count());
- Vector<TFNode*> arg_nodes = builder_->Buffer(arg_count + 1);
+ size_t arg_count = sig->parameter_count();
+ base::SmallVector<TFNode*, 16> arg_nodes(arg_count + 1);
arg_nodes[0] = index_node;
- for (int i = 0; i < arg_count; ++i) {
+ for (size_t i = 0; i < arg_count; ++i) {
arg_nodes[i + 1] = args[i].node;
}
if (index_node) {
- BUILD(ReturnCallIndirect, table_index, sig_index, arg_nodes.begin(),
+ BUILD(ReturnCallIndirect, table_index, sig_index, VectorOf(arg_nodes),
decoder->position());
} else {
- BUILD(ReturnCall, sig_index, arg_nodes.begin(), decoder->position());
+ BUILD(ReturnCall, sig_index, VectorOf(arg_nodes), decoder->position());
}
}
};
diff --git a/deps/v8/src/wasm/jump-table-assembler.cc b/deps/v8/src/wasm/jump-table-assembler.cc
index 7c41c0a209..adb7e19158 100644
--- a/deps/v8/src/wasm/jump-table-assembler.cc
+++ b/deps/v8/src/wasm/jump-table-assembler.cc
@@ -21,17 +21,37 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
EmitJumpSlot(lazy_compile_target); // 5 bytes
}
-void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
- JumpToInstructionStream(builtin_target);
-}
-
-void JumpTableAssembler::EmitJumpSlot(Address target) {
- // On x64, all code is allocated within a single code section, so we can use
- // relative jumps.
- static_assert(kMaxWasmCodeMemory <= size_t{2} * GB, "can use relative jump");
+bool JumpTableAssembler::EmitJumpSlot(Address target) {
intptr_t displacement = static_cast<intptr_t>(
reinterpret_cast<byte*>(target) - pc_ - kNearJmpInstrSize);
- near_jmp(displacement, RelocInfo::NONE);
+ if (!is_int32(displacement)) return false;
+ near_jmp(displacement, RelocInfo::NONE); // 5 bytes
+ return true;
+}
+
+void JumpTableAssembler::EmitFarJumpSlot(Address target) {
+ Label data;
+ int start_offset = pc_offset();
+ jmp(Operand(&data)); // 6 bytes
+ Nop(2); // 2 bytes
+ // The data must be properly aligned, so it can be patched atomically (see
+ // {PatchFarJumpSlot}).
+ DCHECK_EQ(start_offset + kSystemPointerSize, pc_offset());
+ USE(start_offset);
+ bind(&data);
+ dq(target); // 8 bytes
+}
+
+// static
+void JumpTableAssembler::PatchFarJumpSlot(Address slot, Address target) {
+ // The slot needs to be pointer-size aligned so we can atomically update it.
+ DCHECK(IsAligned(slot, kSystemPointerSize));
+ // Offset of the target is at 8 bytes, see {EmitFarJumpSlot}.
+ reinterpret_cast<std::atomic<Address>*>(slot + kSystemPointerSize)
+ ->store(target, std::memory_order_relaxed);
+ // The update is atomic because the address is properly aligned.
+ // Because of cache coherence, the data update will eventually be seen by all
+ // cores. It's ok if they temporarily jump to the old target.
}
void JumpTableAssembler::NopBytes(int bytes) {
@@ -46,14 +66,20 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
jmp(lazy_compile_target, RelocInfo::NONE); // 5 bytes
}
-void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
- JumpToInstructionStream(builtin_target);
+bool JumpTableAssembler::EmitJumpSlot(Address target) {
+ jmp(target, RelocInfo::NONE);
+ return true;
}
-void JumpTableAssembler::EmitJumpSlot(Address target) {
+void JumpTableAssembler::EmitFarJumpSlot(Address target) {
jmp(target, RelocInfo::NONE);
}
+// static
+void JumpTableAssembler::PatchFarJumpSlot(Address slot, Address target) {
+ UNREACHABLE();
+}
+
void JumpTableAssembler::NopBytes(int bytes) {
DCHECK_LE(0, bytes);
Nop(bytes);
@@ -74,16 +100,26 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
EmitJumpSlot(lazy_compile_target);
}
-void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
- JumpToInstructionStream(builtin_target);
- CheckConstPool(true, false); // force emit of const pool
-}
-
-void JumpTableAssembler::EmitJumpSlot(Address target) {
+bool JumpTableAssembler::EmitJumpSlot(Address target) {
// Note that {Move32BitImmediate} emits [ldr, constant] for the relocation
// mode used below, we need this to allow concurrent patching of this slot.
Move32BitImmediate(pc, Operand(target, RelocInfo::WASM_CALL));
CheckConstPool(true, false); // force emit of const pool
+ return true;
+}
+
+void JumpTableAssembler::EmitFarJumpSlot(Address target) {
+ // Load from [pc + kInstrSize] to pc. Note that {pc} points two instructions
+ // after the currently executing one.
+ ldr_pcrel(pc, -kInstrSize); // 1 instruction
+ dd(target); // 4 bytes (== 1 instruction)
+ STATIC_ASSERT(kInstrSize == kInt32Size);
+ STATIC_ASSERT(kFarJumpTableSlotSize == 2 * kInstrSize);
+}
+
+// static
+void JumpTableAssembler::PatchFarJumpSlot(Address slot, Address target) {
+ UNREACHABLE();
}
void JumpTableAssembler::NopBytes(int bytes) {
@@ -105,19 +141,43 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
if (nop_bytes) nop();
}
-void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
- JumpToInstructionStream(builtin_target);
- ForceConstantPoolEmissionWithoutJump();
+bool JumpTableAssembler::EmitJumpSlot(Address target) {
+ if (!TurboAssembler::IsNearCallOffset(
+ (reinterpret_cast<byte*>(target) - pc_) / kInstrSize)) {
+ return false;
+ }
+
+ Jump(target, RelocInfo::NONE);
+ return true;
}
-void JumpTableAssembler::EmitJumpSlot(Address target) {
- // TODO(wasm): Currently this is guaranteed to be a {near_call} and hence is
- // patchable concurrently. Once {kMaxWasmCodeMemory} is raised on ARM64, make
- // sure concurrent patching is still supported.
- DCHECK(TurboAssembler::IsNearCallOffset(
- (reinterpret_cast<byte*>(target) - pc_) / kInstrSize));
+void JumpTableAssembler::EmitFarJumpSlot(Address target) {
+ // This code uses hard-coded registers and instructions (and avoids
+ // {UseScratchRegisterScope} or {InstructionAccurateScope}) because this code
+ // will only be called for the very specific runtime slot table, and we want
+ // to have maximum control over the generated code.
+ // Do not reuse this code without validating that the same assumptions hold.
+ constexpr Register kTmpReg = x16;
+ DCHECK(TmpList()->IncludesAliasOf(kTmpReg));
+ // Load from [pc + 2 * kInstrSize] to {kTmpReg}, then branch there.
+ ldr_pcrel(kTmpReg, 2); // 1 instruction
+ br(kTmpReg); // 1 instruction
+ dq(target); // 8 bytes (== 2 instructions)
+ STATIC_ASSERT(2 * kInstrSize == kSystemPointerSize);
+ STATIC_ASSERT(kFarJumpTableSlotSize == 4 * kInstrSize);
+}
- Jump(target, RelocInfo::NONE);
+// static
+void JumpTableAssembler::PatchFarJumpSlot(Address slot, Address target) {
+ // The slot needs to be pointer-size aligned so we can atomically update it.
+ DCHECK(IsAligned(slot, kSystemPointerSize));
+ // Offset of the target is at 8 bytes, see {EmitFarJumpSlot}.
+ reinterpret_cast<std::atomic<Address>*>(slot + kSystemPointerSize)
+ ->store(target, std::memory_order_relaxed);
+ // The data update is guaranteed to be atomic since it's a properly aligned
+ // and stores a single machine word. This update will eventually be observed
+ // by any concurrent [ldr] on the same address because of the data cache
+ // coherence. It's ok if other cores temporarily jump to the old target.
}
void JumpTableAssembler::NopBytes(int bytes) {
@@ -138,13 +198,19 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
b(r1); // 2 bytes
}
-void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
- JumpToInstructionStream(builtin_target);
-}
-
-void JumpTableAssembler::EmitJumpSlot(Address target) {
+bool JumpTableAssembler::EmitJumpSlot(Address target) {
mov(r1, Operand(target));
b(r1);
+ return true;
+}
+
+void JumpTableAssembler::EmitFarJumpSlot(Address target) {
+ JumpToInstructionStream(target);
+}
+
+// static
+void JumpTableAssembler::PatchFarJumpSlot(Address slot, Address target) {
+ UNREACHABLE();
}
void JumpTableAssembler::NopBytes(int bytes) {
@@ -168,12 +234,18 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
for (int i = 0; i < nop_bytes; i += kInstrSize) nop();
}
-void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
- JumpToInstructionStream(builtin_target);
+bool JumpTableAssembler::EmitJumpSlot(Address target) {
+ Jump(target, RelocInfo::NONE);
+ return true;
}
-void JumpTableAssembler::EmitJumpSlot(Address target) {
- Jump(target, RelocInfo::NONE);
+void JumpTableAssembler::EmitFarJumpSlot(Address target) {
+ JumpToInstructionStream(target);
+}
+
+// static
+void JumpTableAssembler::PatchFarJumpSlot(Address slot, Address target) {
+ UNREACHABLE();
}
void JumpTableAssembler::NopBytes(int bytes) {
@@ -199,14 +271,20 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
for (int i = 0; i < nop_bytes; i += kInstrSize) nop();
}
-void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
- JumpToInstructionStream(builtin_target);
-}
-
-void JumpTableAssembler::EmitJumpSlot(Address target) {
+bool JumpTableAssembler::EmitJumpSlot(Address target) {
mov(r0, Operand(target));
mtctr(r0);
bctr();
+ return true;
+}
+
+void JumpTableAssembler::EmitFarJumpSlot(Address target) {
+ JumpToInstructionStream(target);
+}
+
+// static
+void JumpTableAssembler::PatchFarJumpSlot(Address slot, Address target) {
+ UNREACHABLE();
}
void JumpTableAssembler::NopBytes(int bytes) {
@@ -218,21 +296,7 @@ void JumpTableAssembler::NopBytes(int bytes) {
}
#else
-void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
- Address lazy_compile_target) {
- UNIMPLEMENTED();
-}
-
-void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
- UNIMPLEMENTED();
-}
-
-void JumpTableAssembler::EmitJumpSlot(Address target) { UNIMPLEMENTED(); }
-
-void JumpTableAssembler::NopBytes(int bytes) {
- DCHECK_LE(0, bytes);
- UNIMPLEMENTED();
-}
+#error Unknown architecture.
#endif
} // namespace wasm
diff --git a/deps/v8/src/wasm/jump-table-assembler.h b/deps/v8/src/wasm/jump-table-assembler.h
index 8889c18e9c..2100e44199 100644
--- a/deps/v8/src/wasm/jump-table-assembler.h
+++ b/deps/v8/src/wasm/jump-table-assembler.h
@@ -6,7 +6,6 @@
#define V8_WASM_JUMP_TABLE_ASSEMBLER_H_
#include "src/codegen/macro-assembler.h"
-#include "src/wasm/wasm-code-manager.h"
namespace v8 {
namespace internal {
@@ -19,9 +18,11 @@ namespace wasm {
//
// Additionally to this main jump table, there exist special jump tables for
// other purposes:
-// - the runtime stub table contains one entry per wasm runtime stub (see
+// - the far stub table contains one entry per wasm runtime stub (see
// {WasmCode::RuntimeStubId}, which jumps to the corresponding embedded
-// builtin.
+// builtin, plus (if {FLAG_wasm_far_jump_table} is enabled and not the full
+// address space can be reached via the jump table) one entry per wasm
+// function.
// - the lazy compile table contains one entry per wasm function which jumps to
// the common {WasmCompileLazy} builtin and passes the function index that was
// invoked.
@@ -73,16 +74,28 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler {
// Determine the size of a jump table containing the given number of slots.
static constexpr uint32_t SizeForNumberOfSlots(uint32_t slot_count) {
- // TODO(wasm): Once the {RoundUp} utility handles non-powers of two values,
- // use: {RoundUp<kJumpTableSlotsPerLine>(slot_count) * kJumpTableLineSize}
return ((slot_count + kJumpTableSlotsPerLine - 1) /
kJumpTableSlotsPerLine) *
kJumpTableLineSize;
}
- // Translate a stub slot index to an offset into the continuous jump table.
- static uint32_t StubSlotIndexToOffset(uint32_t slot_index) {
- return slot_index * kJumpTableStubSlotSize;
+ // Translate a far jump table index to an offset into the table.
+ static uint32_t FarJumpSlotIndexToOffset(uint32_t slot_index) {
+ return slot_index * kFarJumpTableSlotSize;
+ }
+
+ // Translate a far jump table offset to the index into the table.
+ static uint32_t FarJumpSlotOffsetToIndex(uint32_t offset) {
+ DCHECK_EQ(0, offset % kFarJumpTableSlotSize);
+ return offset / kFarJumpTableSlotSize;
+ }
+
+ // Determine the size of a far jump table containing the given number of
+ // slots.
+ static constexpr uint32_t SizeForNumberOfFarJumpSlots(
+ int num_runtime_slots, int num_function_slots) {
+ int num_entries = num_runtime_slots + num_function_slots;
+ return num_entries * kFarJumpTableSlotSize;
}
// Translate a slot index to an offset into the lazy compile table.
@@ -90,11 +103,6 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler {
return slot_index * kLazyCompileTableSlotSize;
}
- // Determine the size of a jump table containing only runtime stub slots.
- static constexpr uint32_t SizeForNumberOfStubSlots(uint32_t slot_count) {
- return slot_count * kJumpTableStubSlotSize;
- }
-
// Determine the size of a lazy compile table.
static constexpr uint32_t SizeForNumberOfLazyFunctions(uint32_t slot_count) {
return slot_count * kLazyCompileTableSlotSize;
@@ -115,32 +123,41 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler {
FlushInstructionCache(base, lazy_compile_table_size);
}
- static void GenerateRuntimeStubTable(Address base, Address* targets,
- int num_stubs) {
- uint32_t table_size = num_stubs * kJumpTableStubSlotSize;
+ static void GenerateFarJumpTable(Address base, Address* stub_targets,
+ int num_runtime_slots,
+ int num_function_slots) {
+ uint32_t table_size =
+ SizeForNumberOfFarJumpSlots(num_runtime_slots, num_function_slots);
// Assume enough space, so the Assembler does not try to grow the buffer.
JumpTableAssembler jtasm(base, table_size + 256);
int offset = 0;
- for (int index = 0; index < num_stubs; ++index) {
- DCHECK_EQ(offset, StubSlotIndexToOffset(index));
+ for (int index = 0; index < num_runtime_slots + num_function_slots;
+ ++index) {
+ DCHECK_EQ(offset, FarJumpSlotIndexToOffset(index));
+ // Functions slots initially jump to themselves. They are patched before
+ // being used.
+ Address target =
+ index < num_runtime_slots ? stub_targets[index] : base + offset;
+ jtasm.EmitFarJumpSlot(target);
+ offset += kFarJumpTableSlotSize;
DCHECK_EQ(offset, jtasm.pc_offset());
- jtasm.EmitRuntimeStubSlot(targets[index]);
- offset += kJumpTableStubSlotSize;
- jtasm.NopBytes(offset - jtasm.pc_offset());
}
FlushInstructionCache(base, table_size);
}
- static void PatchJumpTableSlot(Address base, uint32_t slot_index,
- Address new_target,
- WasmCode::FlushICache flush_i_cache) {
- Address slot = base + JumpSlotIndexToOffset(slot_index);
- JumpTableAssembler jtasm(slot);
- jtasm.EmitJumpSlot(new_target);
- jtasm.NopBytes(kJumpTableSlotSize - jtasm.pc_offset());
- if (flush_i_cache) {
- FlushInstructionCache(slot, kJumpTableSlotSize);
+ static void PatchJumpTableSlot(Address jump_table_slot,
+ Address far_jump_table_slot, Address target) {
+ // First, try to patch the jump table slot.
+ JumpTableAssembler jtasm(jump_table_slot);
+ if (!jtasm.EmitJumpSlot(target)) {
+ // If that fails, we need to patch the far jump table slot, and then
+ // update the jump table slot to jump to this far jump table slot.
+ DCHECK_NE(kNullAddress, far_jump_table_slot);
+ JumpTableAssembler::PatchFarJumpSlot(far_jump_table_slot, target);
+ CHECK(jtasm.EmitJumpSlot(far_jump_table_slot));
}
+ jtasm.NopBytes(kJumpTableSlotSize - jtasm.pc_offset());
+ FlushInstructionCache(jump_table_slot, kJumpTableSlotSize);
}
private:
@@ -157,48 +174,45 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler {
#if V8_TARGET_ARCH_X64
static constexpr int kJumpTableLineSize = 64;
static constexpr int kJumpTableSlotSize = 5;
+ static constexpr int kFarJumpTableSlotSize = 16;
static constexpr int kLazyCompileTableSlotSize = 10;
- static constexpr int kJumpTableStubSlotSize = 18;
#elif V8_TARGET_ARCH_IA32
static constexpr int kJumpTableLineSize = 64;
static constexpr int kJumpTableSlotSize = 5;
+ static constexpr int kFarJumpTableSlotSize = 5;
static constexpr int kLazyCompileTableSlotSize = 10;
- static constexpr int kJumpTableStubSlotSize = 10;
#elif V8_TARGET_ARCH_ARM
static constexpr int kJumpTableLineSize = 3 * kInstrSize;
static constexpr int kJumpTableSlotSize = 3 * kInstrSize;
+ static constexpr int kFarJumpTableSlotSize = 2 * kInstrSize;
static constexpr int kLazyCompileTableSlotSize = 5 * kInstrSize;
- static constexpr int kJumpTableStubSlotSize = 5 * kInstrSize;
#elif V8_TARGET_ARCH_ARM64
static constexpr int kJumpTableLineSize = 1 * kInstrSize;
static constexpr int kJumpTableSlotSize = 1 * kInstrSize;
+ static constexpr int kFarJumpTableSlotSize = 4 * kInstrSize;
static constexpr int kLazyCompileTableSlotSize = 3 * kInstrSize;
- static constexpr int kJumpTableStubSlotSize = 6 * kInstrSize;
#elif V8_TARGET_ARCH_S390X
static constexpr int kJumpTableLineSize = 128;
static constexpr int kJumpTableSlotSize = 14;
+ static constexpr int kFarJumpTableSlotSize = 14;
static constexpr int kLazyCompileTableSlotSize = 20;
- static constexpr int kJumpTableStubSlotSize = 14;
#elif V8_TARGET_ARCH_PPC64
static constexpr int kJumpTableLineSize = 64;
static constexpr int kJumpTableSlotSize = 7 * kInstrSize;
+ static constexpr int kFarJumpTableSlotSize = 7 * kInstrSize;
static constexpr int kLazyCompileTableSlotSize = 12 * kInstrSize;
- static constexpr int kJumpTableStubSlotSize = 7 * kInstrSize;
#elif V8_TARGET_ARCH_MIPS
static constexpr int kJumpTableLineSize = 6 * kInstrSize;
static constexpr int kJumpTableSlotSize = 4 * kInstrSize;
+ static constexpr int kFarJumpTableSlotSize = 4 * kInstrSize;
static constexpr int kLazyCompileTableSlotSize = 6 * kInstrSize;
- static constexpr int kJumpTableStubSlotSize = 4 * kInstrSize;
#elif V8_TARGET_ARCH_MIPS64
static constexpr int kJumpTableLineSize = 8 * kInstrSize;
static constexpr int kJumpTableSlotSize = 6 * kInstrSize;
+ static constexpr int kFarJumpTableSlotSize = 6 * kInstrSize;
static constexpr int kLazyCompileTableSlotSize = 8 * kInstrSize;
- static constexpr int kJumpTableStubSlotSize = 6 * kInstrSize;
#else
- static constexpr int kJumpTableLineSize = 1;
- static constexpr int kJumpTableSlotSize = 1;
- static constexpr int kLazyCompileTableSlotSize = 1;
- static constexpr int kJumpTableStubSlotSize = 1;
+#error Unknown architecture.
#endif
static constexpr int kJumpTableSlotsPerLine =
@@ -218,9 +232,15 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler {
void EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target);
- void EmitRuntimeStubSlot(Address builtin_target);
+ // Returns {true} if the jump fits in the jump table slot, {false} otherwise.
+ bool EmitJumpSlot(Address target);
+
+ // Initially emit a far jump slot.
+ void EmitFarJumpSlot(Address target);
- void EmitJumpSlot(Address target);
+ // Patch an existing far jump slot, and make sure that this updated eventually
+ // becomes available to all execution units that might execute this code.
+ static void PatchFarJumpSlot(Address slot, Address target);
void NopBytes(int bytes);
};
diff --git a/deps/v8/src/wasm/memory-tracing.cc b/deps/v8/src/wasm/memory-tracing.cc
index b11a557195..300c7afcf9 100644
--- a/deps/v8/src/wasm/memory-tracing.cc
+++ b/deps/v8/src/wasm/memory-tracing.cc
@@ -16,7 +16,7 @@ namespace wasm {
void TraceMemoryOperation(ExecutionTier tier, const MemoryTracingInfo* info,
int func_index, int position, uint8_t* mem_start) {
- EmbeddedVector<char, 64> value;
+ EmbeddedVector<char, 91> value;
auto mem_rep = static_cast<MachineRepresentation>(info->mem_rep);
switch (mem_rep) {
#define TRACE_TYPE(rep, str, format, ctype1, ctype2) \
@@ -34,6 +34,25 @@ void TraceMemoryOperation(ExecutionTier tier, const MemoryTracingInfo* info,
TRACE_TYPE(kFloat32, "f32", "%f / %08x", float, uint32_t)
TRACE_TYPE(kFloat64, "f64", "%f / %016" PRIx64, double, uint64_t)
#undef TRACE_TYPE
+ case MachineRepresentation::kSimd128:
+ SNPrintF(value, "s128:%d %d %d %d / %08x %08x %08x %08x",
+ base::ReadLittleEndianValue<uint32_t>(
+ reinterpret_cast<Address>(mem_start) + info->address),
+ base::ReadLittleEndianValue<uint32_t>(
+ reinterpret_cast<Address>(mem_start) + info->address + 4),
+ base::ReadLittleEndianValue<uint32_t>(
+ reinterpret_cast<Address>(mem_start) + info->address + 8),
+ base::ReadLittleEndianValue<uint32_t>(
+ reinterpret_cast<Address>(mem_start) + info->address + 12),
+ base::ReadLittleEndianValue<uint32_t>(
+ reinterpret_cast<Address>(mem_start) + info->address),
+ base::ReadLittleEndianValue<uint32_t>(
+ reinterpret_cast<Address>(mem_start) + info->address + 4),
+ base::ReadLittleEndianValue<uint32_t>(
+ reinterpret_cast<Address>(mem_start) + info->address + 8),
+ base::ReadLittleEndianValue<uint32_t>(
+ reinterpret_cast<Address>(mem_start) + info->address + 12));
+ break;
default:
SNPrintF(value, "???");
}
diff --git a/deps/v8/src/wasm/module-compiler.cc b/deps/v8/src/wasm/module-compiler.cc
index 2847b02c64..9e08f8d109 100644
--- a/deps/v8/src/wasm/module-compiler.cc
+++ b/deps/v8/src/wasm/module-compiler.cc
@@ -14,7 +14,6 @@
#include "src/base/platform/mutex.h"
#include "src/base/platform/semaphore.h"
#include "src/base/platform/time.h"
-#include "src/base/template-utils.h"
#include "src/base/utils/random-number-generator.h"
#include "src/compiler/wasm-compiler.h"
#include "src/heap/heap-inl.h" // For CodeSpaceMemoryModificationScope.
@@ -31,7 +30,6 @@
#include "src/wasm/wasm-import-wrapper-cache.h"
#include "src/wasm/wasm-js.h"
#include "src/wasm/wasm-limits.h"
-#include "src/wasm/wasm-memory.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-opcodes.h"
#include "src/wasm/wasm-result.h"
@@ -152,6 +150,9 @@ class CompilationUnitQueues {
for (int task_id = 0; task_id < max_tasks; ++task_id) {
queues_[task_id].next_steal_task_id = next_task_id(task_id);
}
+ for (auto& atomic_counter : num_units_) {
+ std::atomic_init(&atomic_counter, size_t{0});
+ }
}
base::Optional<WasmCompilationUnit> GetNextUnit(
@@ -254,15 +255,14 @@ class CompilationUnitQueues {
};
struct BigUnitsQueue {
- BigUnitsQueue() = default;
+ BigUnitsQueue() {
+ for (auto& atomic : has_units) std::atomic_init(&atomic, false);
+ }
base::Mutex mutex;
// Can be read concurrently to check whether any elements are in the queue.
- std::atomic_bool has_units[kNumTiers] = {
- ATOMIC_VAR_INIT(false),
- ATOMIC_VAR_INIT(false)
- };
+ std::atomic<bool> has_units[kNumTiers];
// Protected by {mutex}:
std::priority_queue<BigUnit> units[kNumTiers];
@@ -271,11 +271,8 @@ class CompilationUnitQueues {
std::vector<Queue> queues_;
BigUnitsQueue big_units_queue_;
- std::atomic_size_t num_units_[kNumTiers] = {
- ATOMIC_VAR_INIT(0),
- ATOMIC_VAR_INIT(0)
- };
- std::atomic_int next_queue_to_add{0};
+ std::atomic<size_t> num_units_[kNumTiers];
+ std::atomic<int> next_queue_to_add{0};
int next_task_id(int task_id) const {
int next = task_id + 1;
@@ -482,7 +479,7 @@ class CompilationStateImpl {
// Compilation error, atomically updated. This flag can be updated and read
// using relaxed semantics.
- std::atomic_bool compile_failed_{false};
+ std::atomic<bool> compile_failed_{false};
const int max_background_tasks_ = 0;
@@ -967,6 +964,10 @@ bool ExecuteJSToWasmWrapperCompilationUnits(
return true;
}
+bool NeedsDeterministicCompile() {
+ return FLAG_trace_wasm_decoder || FLAG_wasm_num_compilation_tasks <= 1;
+}
+
// Run by the main thread and background tasks to take part in compilation.
// Returns whether any units were executed.
bool ExecuteCompilationUnits(
@@ -994,6 +995,7 @@ bool ExecuteCompilationUnits(
// These fields are initialized in a {BackgroundCompileScope} before
// starting compilation.
double deadline = 0;
+ const bool deterministic = NeedsDeterministicCompile();
base::Optional<CompilationEnv> env;
std::shared_ptr<WireBytesStorage> wire_bytes;
std::shared_ptr<const WasmModule> module;
@@ -1087,7 +1089,7 @@ bool ExecuteCompilationUnits(
}
// Get next unit.
- if (deadline < platform->MonotonicallyIncreasingTime()) {
+ if (deterministic || deadline < platform->MonotonicallyIncreasingTime()) {
unit = {};
} else {
unit = compile_scope.compilation_state()->GetNextCompilationUnit(
@@ -1199,10 +1201,6 @@ void InitializeCompilationUnits(Isolate* isolate, NativeModule* native_module) {
builder.Commit();
}
-bool NeedsDeterministicCompile() {
- return FLAG_trace_wasm_decoder || FLAG_wasm_num_compilation_tasks <= 1;
-}
-
bool MayCompriseLazyFunctions(const WasmModule* module,
const WasmFeatures& enabled_features,
bool lazy_module) {
@@ -1373,7 +1371,6 @@ std::shared_ptr<NativeModule> CompileToNativeModule(
auto native_module = isolate->wasm_engine()->NewNativeModule(
isolate, enabled, std::move(module));
native_module->SetWireBytes(std::move(wire_bytes_copy));
- native_module->SetRuntimeStubs(isolate);
CompileNativeModule(isolate, thrower, wasm_module, native_module.get());
if (thrower->error()) return {};
@@ -1468,7 +1465,7 @@ class AsyncStreamingProcessor final : public StreamingProcessor {
std::shared_ptr<StreamingDecoder> AsyncCompileJob::CreateStreamingDecoder() {
DCHECK_NULL(stream_);
stream_.reset(
- new StreamingDecoder(base::make_unique<AsyncStreamingProcessor>(this)));
+ new StreamingDecoder(std::make_unique<AsyncStreamingProcessor>(this)));
return stream_;
}
@@ -1504,7 +1501,7 @@ void AsyncCompileJob::CreateNativeModule(
// Create the module object and populate with compiled functions and
// information needed at instantiation time.
- // TODO(clemensh): For the same module (same bytes / same hash), we should
+ // TODO(clemensb): For the same module (same bytes / same hash), we should
// only have one {WasmModuleObject}. Otherwise, we might only set
// breakpoints on a (potentially empty) subset of the instances.
// Create the module object.
@@ -1512,7 +1509,6 @@ void AsyncCompileJob::CreateNativeModule(
native_module_ = isolate_->wasm_engine()->NewNativeModule(
isolate_, enabled_features_, std::move(module));
native_module_->SetWireBytes({std::move(bytes_copy_), wire_bytes_.length()});
- native_module_->SetRuntimeStubs(isolate_);
if (stream_) stream_->NotifyNativeModuleCreated(native_module_);
}
@@ -1707,7 +1703,7 @@ class AsyncCompileJob::CompileTask : public CancelableTask {
void AsyncCompileJob::StartForegroundTask() {
DCHECK_NULL(pending_foreground_task_);
- auto new_task = base::make_unique<CompileTask>(this, true);
+ auto new_task = std::make_unique<CompileTask>(this, true);
pending_foreground_task_ = new_task.get();
foreground_task_runner_->PostTask(std::move(new_task));
}
@@ -1715,7 +1711,7 @@ void AsyncCompileJob::StartForegroundTask() {
void AsyncCompileJob::ExecuteForegroundTaskImmediately() {
DCHECK_NULL(pending_foreground_task_);
- auto new_task = base::make_unique<CompileTask>(this, true);
+ auto new_task = std::make_unique<CompileTask>(this, true);
pending_foreground_task_ = new_task.get();
new_task->Run();
}
@@ -1727,7 +1723,7 @@ void AsyncCompileJob::CancelPendingForegroundTask() {
}
void AsyncCompileJob::StartBackgroundTask() {
- auto task = base::make_unique<CompileTask>(this, false);
+ auto task = std::make_unique<CompileTask>(this, false);
// If --wasm-num-compilation-tasks=0 is passed, do only spawn foreground
// tasks. This is used to make timing deterministic.
@@ -2210,11 +2206,9 @@ bool AsyncStreamingProcessor::Deserialize(Vector<const uint8_t> module_bytes,
}
int GetMaxBackgroundTasks() {
- if (NeedsDeterministicCompile()) return 1;
+ if (NeedsDeterministicCompile()) return 0;
int num_worker_threads = V8::GetCurrentPlatform()->NumberOfWorkerThreads();
- int num_compile_tasks =
- std::min(FLAG_wasm_num_compilation_tasks, num_worker_threads);
- return std::max(1, num_compile_tasks);
+ return std::min(FLAG_wasm_num_compilation_tasks, num_worker_threads);
}
CompilationStateImpl::CompilationStateImpl(
@@ -2228,7 +2222,7 @@ CompilationStateImpl::CompilationStateImpl(
? CompileMode::kTiering
: CompileMode::kRegular),
async_counters_(std::move(async_counters)),
- max_background_tasks_(GetMaxBackgroundTasks()),
+ max_background_tasks_(std::max(GetMaxBackgroundTasks(), 1)),
compilation_unit_queues_(max_background_tasks_),
available_task_ids_(max_background_tasks_) {
for (int i = 0; i < max_background_tasks_; ++i) {
@@ -2617,7 +2611,7 @@ void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module,
auto& function = module->functions[exp.index];
JSToWasmWrapperKey key(function.imported, *function.sig);
if (queue.insert(key)) {
- auto unit = base::make_unique<JSToWasmWrapperCompilationUnit>(
+ auto unit = std::make_unique<JSToWasmWrapperCompilationUnit>(
isolate, isolate->wasm_engine(), function.sig, function.imported,
enabled_features);
compilation_units.emplace(key, std::move(unit));
@@ -2628,7 +2622,7 @@ void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module,
CancelableTaskManager task_manager;
const int max_background_tasks = GetMaxBackgroundTasks();
for (int i = 0; i < max_background_tasks; ++i) {
- auto task = base::make_unique<CompileJSToWasmWrapperTask>(
+ auto task = std::make_unique<CompileJSToWasmWrapperTask>(
&task_manager, &queue, &compilation_units);
V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
}
@@ -2699,12 +2693,21 @@ Handle<Script> CreateWasmScript(Isolate* isolate,
const int kBufferSize = 32;
char buffer[kBufferSize];
+ Handle<String> url_prefix =
+ isolate->factory()->InternalizeString(StaticCharVector("wasm://wasm/"));
+
int name_chars = SNPrintF(ArrayVector(buffer), "wasm-%08x", hash);
DCHECK(name_chars >= 0 && name_chars < kBufferSize);
- MaybeHandle<String> name_str = isolate->factory()->NewStringFromOneByte(
- VectorOf(reinterpret_cast<uint8_t*>(buffer), name_chars),
- AllocationType::kOld);
- script->set_name(*name_str.ToHandleChecked());
+ Handle<String> name_str =
+ isolate->factory()
+ ->NewStringFromOneByte(
+ VectorOf(reinterpret_cast<uint8_t*>(buffer), name_chars),
+ AllocationType::kOld)
+ .ToHandleChecked();
+ script->set_name(*name_str);
+ MaybeHandle<String> url_str =
+ isolate->factory()->NewConsString(url_prefix, name_str);
+ script->set_source_url(*url_str.ToHandleChecked());
if (source_map_url.size() != 0) {
MaybeHandle<String> src_map_str = isolate->factory()->NewStringFromUtf8(
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
index 56712977b1..b89d06b881 100644
--- a/deps/v8/src/wasm/module-decoder.cc
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -6,7 +6,6 @@
#include "src/base/functional.h"
#include "src/base/platform/platform.h"
-#include "src/base/template-utils.h"
#include "src/flags/flags.h"
#include "src/init/v8.h"
#include "src/logging/counters.h"
@@ -31,6 +30,7 @@ namespace {
constexpr char kNameString[] = "name";
constexpr char kSourceMappingURLString[] = "sourceMappingURL";
constexpr char kCompilationHintsString[] = "compilationHints";
+constexpr char kDebugInfoString[] = ".debug_info";
template <size_t N>
constexpr size_t num_chars(const char (&)[N]) {
@@ -89,6 +89,8 @@ const char* SectionName(SectionCode code) {
return kNameString;
case kSourceMappingURLSectionCode:
return kSourceMappingURLString;
+ case kDebugInfoSectionCode:
+ return kDebugInfoString;
case kCompilationHintsSectionCode:
return kCompilationHintsString;
default:
@@ -304,7 +306,7 @@ class ModuleDecoderImpl : public Decoder {
CHECK_NULL(module_);
SetCounters(counters);
module_.reset(
- new WasmModule(base::make_unique<Zone>(allocator, "signatures")));
+ new WasmModule(std::make_unique<Zone>(allocator, "signatures")));
module_->initial_pages = 0;
module_->maximum_pages = 0;
module_->mem_export = false;
@@ -399,6 +401,10 @@ class ModuleDecoderImpl : public Decoder {
// sourceMappingURL is a custom section and currently can occur anywhere
// in the module. In case of multiple sourceMappingURL sections, all
// except the first occurrence are ignored.
+ case kDebugInfoSectionCode:
+ // .debug_info is a custom section containing core DWARF information
+ // if produced by compiler. Its presence likely means that Wasm was
+ // built in a debug mode.
case kCompilationHintsSectionCode:
// TODO(frgossen): report out of place compilation hints section as a
// warning.
@@ -453,6 +459,13 @@ class ModuleDecoderImpl : public Decoder {
case kSourceMappingURLSectionCode:
DecodeSourceMappingURLSection();
break;
+ case kDebugInfoSectionCode:
+ // If there is an explicit source map, prefer it over DWARF info.
+ if (!has_seen_unordered_section(kSourceMappingURLSectionCode)) {
+ module_->source_map_url.assign("wasm://dwarf");
+ }
+ consume_bytes(static_cast<uint32_t>(end_ - start_), ".debug_info");
+ break;
case kCompilationHintsSectionCode:
if (enabled_features_.compilation_hints) {
DecodeCompilationHintsSection();
@@ -798,9 +811,11 @@ class ModuleDecoderImpl : public Decoder {
const byte* pos = pc();
bool is_active;
+ bool functions_as_elements;
uint32_t table_index;
WasmInitExpr offset;
- consume_segment_header("table index", &is_active, &table_index, &offset);
+ consume_element_segment_header(&is_active, &functions_as_elements,
+ &table_index, &offset);
if (failed()) return;
if (is_active) {
@@ -815,12 +830,6 @@ class ModuleDecoderImpl : public Decoder {
table_index);
break;
}
- } else {
- ValueType type = consume_reference_type();
- if (!ValueTypes::IsSubType(kWasmFuncRef, type)) {
- error(pc_ - 1, "invalid element segment type");
- break;
- }
}
uint32_t num_elem =
@@ -833,8 +842,8 @@ class ModuleDecoderImpl : public Decoder {
WasmElemSegment* init = &module_->elem_segments.back();
for (uint32_t j = 0; j < num_elem; j++) {
- uint32_t index = is_active ? consume_element_func_index()
- : consume_passive_element();
+ uint32_t index = functions_as_elements ? consume_element_expr()
+ : consume_element_func_index();
if (failed()) break;
init->entries.push_back(index);
}
@@ -911,8 +920,7 @@ class ModuleDecoderImpl : public Decoder {
bool is_active;
uint32_t memory_index;
WasmInitExpr dest_addr;
- consume_segment_header("memory index", &is_active, &memory_index,
- &dest_addr);
+ consume_data_segment_header(&is_active, &memory_index, &dest_addr);
if (failed()) break;
if (is_active && memory_index != 0) {
@@ -1483,7 +1491,7 @@ class ModuleDecoderImpl : public Decoder {
WasmInitExpr expr;
uint32_t len = 0;
switch (opcode) {
- case kExprGetGlobal: {
+ case kExprGlobalGet: {
GlobalIndexImmediate<Decoder::kValidate> imm(this, pc() - 1);
if (module->globals.size() <= imm.index) {
error("global index is out of bounds");
@@ -1544,6 +1552,10 @@ class ModuleDecoderImpl : public Decoder {
case kExprRefFunc: {
if (enabled_features_.anyref) {
FunctionIndexImmediate<Decoder::kValidate> imm(this, pc() - 1);
+ if (module->functions.size() <= imm.index) {
+ errorf(pc() - 1, "invalid function index: %u", imm.index);
+ break;
+ }
expr.kind = WasmInitExpr::kRefFuncConst;
expr.val.function_index = imm.index;
len = imm.length;
@@ -1678,8 +1690,103 @@ class ModuleDecoderImpl : public Decoder {
return attribute;
}
- void consume_segment_header(const char* name, bool* is_active,
- uint32_t* index, WasmInitExpr* offset) {
+ void consume_element_segment_header(bool* is_active,
+ bool* functions_as_elements,
+ uint32_t* table_index,
+ WasmInitExpr* offset) {
+ const byte* pos = pc();
+ uint8_t flag;
+ if (enabled_features_.bulk_memory || enabled_features_.anyref) {
+ flag = consume_u8("flag");
+ } else {
+ uint32_t table_index = consume_u32v("table index");
+ // The only valid flag value without bulk_memory or anyref is '0'.
+ if (table_index != 0) {
+ error(
+ "Element segments with table indices require "
+ "--experimental-wasm-bulk-memory or --experimental-wasm-anyref");
+ return;
+ }
+ flag = 0;
+ }
+
+ // The mask for the bit in the flag which indicates if the segment is
+ // active or not.
+ constexpr uint8_t kIsPassiveMask = 0x01;
+ // The mask for the bit in the flag which indicates if the segment has an
+ // explicit table index field.
+ constexpr uint8_t kHasTableIndexMask = 0x02;
+ // The mask for the bit in the flag which indicates if the functions of this
+ // segment are defined as function indices (=0) or elements(=1).
+ constexpr uint8_t kFunctionsAsElementsMask = 0x04;
+ constexpr uint8_t kFullMask =
+ kIsPassiveMask | kHasTableIndexMask | kFunctionsAsElementsMask;
+
+ bool is_passive = flag & kIsPassiveMask;
+ *is_active = !is_passive;
+ *functions_as_elements = flag & kFunctionsAsElementsMask;
+ bool has_table_index = flag & kHasTableIndexMask;
+
+ if (is_passive && !enabled_features_.bulk_memory) {
+ error("Passive element segments require --experimental-wasm-bulk-memory");
+ return;
+ }
+ if (*functions_as_elements && !enabled_features_.bulk_memory) {
+ error(
+ "Illegal segment flag. Did you forget "
+ "--experimental-wasm-bulk-memory?");
+ return;
+ }
+ if (flag != 0 && !enabled_features_.bulk_memory &&
+ !enabled_features_.anyref) {
+ error(
+ "Invalid segment flag. Did you forget "
+ "--experimental-wasm-bulk-memory or --experimental-wasm-anyref?");
+ return;
+ }
+ if ((flag & kFullMask) != flag || (!(*is_active) && has_table_index)) {
+ errorf(pos, "illegal flag value %u. Must be 0, 1, 2, 4, 5 or 6", flag);
+ }
+
+ if (has_table_index) {
+ *table_index = consume_u32v("table index");
+ } else {
+ *table_index = 0;
+ }
+
+ if (*is_active) {
+ *offset = consume_init_expr(module_.get(), kWasmI32);
+ }
+
+ if (*is_active && !has_table_index) {
+ // Active segments without table indices are a special case for backwards
+ // compatibility. These cases have an implicit element kind or element
+ // type, so we are done already with the segment header.
+ return;
+ }
+
+ if (*functions_as_elements) {
+ // We have to check that there is an element type of type FuncRef. All
+ // other element types are not valid yet.
+ ValueType type = consume_reference_type();
+ if (!ValueTypes::IsSubType(kWasmFuncRef, type)) {
+ error(pc_ - 1, "invalid element segment type");
+ return;
+ }
+ } else {
+ // We have to check that there is an element kind of type Function. All
+ // other element kinds are not valid yet.
+ uint8_t val = consume_u8("element kind");
+ ImportExportKindCode kind = static_cast<ImportExportKindCode>(val);
+ if (kind != kExternalFunction) {
+ errorf(pos, "illegal element kind %x. Must be 0x00", val);
+ return;
+ }
+ }
+ }
+
+ void consume_data_segment_header(bool* is_active, uint32_t* index,
+ WasmInitExpr* offset) {
const byte* pos = pc();
uint32_t flag = consume_u32v("flag");
@@ -1715,7 +1822,7 @@ class ModuleDecoderImpl : public Decoder {
}
if (flag == SegmentFlags::kActiveWithIndex) {
*is_active = true;
- *index = consume_u32v(name);
+ *index = consume_u32v("memory index");
*offset = consume_init_expr(module_.get(), kWasmI32);
}
}
@@ -1731,7 +1838,7 @@ class ModuleDecoderImpl : public Decoder {
return index;
}
- uint32_t consume_passive_element() {
+ uint32_t consume_element_expr() {
uint32_t index = WasmElemSegment::kNullIndex;
uint8_t opcode = consume_u8("element opcode");
if (failed()) return index;
@@ -1857,6 +1964,10 @@ SectionCode ModuleDecoder::IdentifyUnknownSection(Decoder* decoder,
kCompilationHintsString,
num_chars(kCompilationHintsString)) == 0) {
return kCompilationHintsSectionCode;
+ } else if (string.length() == num_chars(kDebugInfoString) &&
+ strncmp(reinterpret_cast<const char*>(section_name_start),
+ kDebugInfoString, num_chars(kDebugInfoString)) == 0) {
+ return kDebugInfoSectionCode;
}
return kUnknownSectionCode;
}
@@ -1895,7 +2006,7 @@ FunctionResult DecodeWasmFunctionForTesting(
ModuleDecoderImpl decoder(enabled, function_start, function_end, kWasmOrigin);
decoder.SetCounters(counters);
return decoder.DecodeSingleFunction(zone, wire_bytes, module,
- base::make_unique<WasmFunction>());
+ std::make_unique<WasmFunction>());
}
AsmJsOffsetsResult DecodeAsmJsOffsets(const byte* tables_start,
diff --git a/deps/v8/src/wasm/module-decoder.h b/deps/v8/src/wasm/module-decoder.h
index 8e121c9d30..5ee324b109 100644
--- a/deps/v8/src/wasm/module-decoder.h
+++ b/deps/v8/src/wasm/module-decoder.h
@@ -5,6 +5,8 @@
#ifndef V8_WASM_MODULE_DECODER_H_
#define V8_WASM_MODULE_DECODER_H_
+#include <memory>
+
#include "src/common/globals.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/wasm-constants.h"
diff --git a/deps/v8/src/wasm/module-instantiate.cc b/deps/v8/src/wasm/module-instantiate.cc
index 976c3cde00..95d892ab50 100644
--- a/deps/v8/src/wasm/module-instantiate.cc
+++ b/deps/v8/src/wasm/module-instantiate.cc
@@ -93,7 +93,7 @@ class InstanceBuilder {
InstanceBuilder(Isolate* isolate, ErrorThrower* thrower,
Handle<WasmModuleObject> module_object,
MaybeHandle<JSReceiver> ffi,
- MaybeHandle<JSArrayBuffer> memory);
+ MaybeHandle<JSArrayBuffer> memory_buffer);
// Build an instance, in all of its glory.
MaybeHandle<WasmInstanceObject> Build();
@@ -114,7 +114,8 @@ class InstanceBuilder {
ErrorThrower* thrower_;
Handle<WasmModuleObject> module_object_;
MaybeHandle<JSReceiver> ffi_;
- MaybeHandle<JSArrayBuffer> memory_;
+ MaybeHandle<JSArrayBuffer> memory_buffer_;
+ Handle<WasmMemoryObject> memory_object_;
Handle<JSArrayBuffer> untagged_globals_;
Handle<FixedArray> tagged_globals_;
std::vector<Handle<WasmExceptionObject>> exception_wrappers_;
@@ -165,9 +166,11 @@ class InstanceBuilder {
void SanitizeImports();
- // Find the imported memory buffer if there is one. This is used to see if we
- // need to recompile with bounds checks before creating the instance.
- MaybeHandle<JSArrayBuffer> FindImportedMemoryBuffer() const;
+ // Find the imported memory if there is one.
+ bool FindImportedMemory();
+
+ // Allocate the memory.
+ bool AllocateMemory();
// Processes a single imported function.
bool ProcessImportedFunction(Handle<WasmInstanceObject> instance,
@@ -221,9 +224,6 @@ class InstanceBuilder {
// Process initialization of globals.
void InitGlobals(Handle<WasmInstanceObject> instance);
- // Allocate memory for a module instance as a new JSArrayBuffer.
- Handle<JSArrayBuffer> AllocateMemory(uint32_t initial_pages,
- uint32_t maximum_pages);
bool NeedsWrappers() const;
@@ -243,8 +243,9 @@ class InstanceBuilder {
MaybeHandle<WasmInstanceObject> InstantiateToInstanceObject(
Isolate* isolate, ErrorThrower* thrower,
Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports,
- MaybeHandle<JSArrayBuffer> memory) {
- InstanceBuilder builder(isolate, thrower, module_object, imports, memory);
+ MaybeHandle<JSArrayBuffer> memory_buffer) {
+ InstanceBuilder builder(isolate, thrower, module_object, imports,
+ memory_buffer);
auto instance = builder.Build();
if (!instance.is_null() && builder.ExecuteStartFunction()) {
return instance;
@@ -256,14 +257,14 @@ MaybeHandle<WasmInstanceObject> InstantiateToInstanceObject(
InstanceBuilder::InstanceBuilder(Isolate* isolate, ErrorThrower* thrower,
Handle<WasmModuleObject> module_object,
MaybeHandle<JSReceiver> ffi,
- MaybeHandle<JSArrayBuffer> memory)
+ MaybeHandle<JSArrayBuffer> memory_buffer)
: isolate_(isolate),
enabled_(module_object->native_module()->enabled_features()),
module_(module_object->module()),
thrower_(thrower),
module_object_(module_object),
ffi_(ffi),
- memory_(memory) {
+ memory_buffer_(memory_buffer) {
sanitized_imports_.reserve(module_->import_table.size());
}
@@ -289,7 +290,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
NativeModule* native_module = module_object_->native_module();
//--------------------------------------------------------------------------
- // Allocate the memory array buffer.
+ // Set up the memory buffer and memory objects.
//--------------------------------------------------------------------------
uint32_t initial_pages = module_->initial_pages;
auto initial_pages_counter = SELECT_WASM_COUNTER(
@@ -301,29 +302,41 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
isolate_->counters()->wasm_wasm_max_mem_pages_count();
max_pages_counter->AddSample(module_->maximum_pages);
}
- // Asm.js has memory_ already set at this point, so we don't want to
- // overwrite it.
- if (memory_.is_null()) {
- memory_ = FindImportedMemoryBuffer();
- }
- if (!memory_.is_null()) {
- // Set externally passed ArrayBuffer non detachable.
- Handle<JSArrayBuffer> memory = memory_.ToHandleChecked();
- memory->set_is_detachable(false);
-
- DCHECK_IMPLIES(native_module->use_trap_handler(),
- is_asmjs_module(module_) || memory->is_wasm_memory() ||
- memory->backing_store() == nullptr);
- } else if (initial_pages > 0 || native_module->use_trap_handler()) {
- // We need to unconditionally create a guard region if using trap handlers,
- // even when the size is zero to prevent null-dereference issues
- // (e.g. https://crbug.com/769637).
- // Allocate memory if the initial size is more than 0 pages.
- memory_ = AllocateMemory(initial_pages, module_->maximum_pages);
- if (memory_.is_null()) {
- // failed to allocate memory
- DCHECK(isolate_->has_pending_exception() || thrower_->error());
- return {};
+
+ if (is_asmjs_module(module_)) {
+ Handle<JSArrayBuffer> buffer;
+ if (memory_buffer_.ToHandle(&buffer)) {
+ // asm.js instantiation should have changed the state of the buffer.
+ CHECK(!buffer->is_detachable());
+ CHECK(buffer->is_asmjs_memory());
+ } else {
+ // Use an empty JSArrayBuffer for degenerate asm.js modules.
+ memory_buffer_ = isolate_->factory()->NewJSArrayBufferAndBackingStore(
+ 0, InitializedFlag::kUninitialized);
+ if (!memory_buffer_.ToHandle(&buffer)) {
+ thrower_->RangeError("Out of memory: asm.js memory");
+ return {};
+ }
+ buffer->set_is_asmjs_memory(true);
+ buffer->set_is_detachable(false);
+ }
+
+ // The maximum number of pages isn't strictly necessary for memory
+ // objects used for asm.js, as they are never visible, but we might
+ // as well make it accurate.
+ auto maximum_pages = static_cast<uint32_t>(
+ RoundUp(buffer->byte_length(), wasm::kWasmPageSize) /
+ wasm::kWasmPageSize);
+ memory_object_ =
+ WasmMemoryObject::New(isolate_, memory_buffer_, maximum_pages);
+ } else {
+ // Actual wasm module must have either imported or created memory.
+ CHECK(memory_buffer_.is_null());
+ if (!FindImportedMemory()) {
+ if (module_->has_memory && !AllocateMemory()) {
+ DCHECK(isolate_->has_pending_exception() || thrower_->error());
+ return {};
+ }
}
}
@@ -333,33 +346,42 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
TRACE("New module instantiation for %p\n", native_module);
Handle<WasmInstanceObject> instance =
WasmInstanceObject::New(isolate_, module_object_);
- NativeModuleModificationScope native_modification_scope(native_module);
+
+ //--------------------------------------------------------------------------
+ // Attach the memory to the instance.
+ //--------------------------------------------------------------------------
+ if (module_->has_memory) {
+ DCHECK(!memory_object_.is_null());
+ if (!instance->has_memory_object()) {
+ instance->set_memory_object(*memory_object_);
+ }
+ // Add the instance object to the list of instances for this memory.
+ WasmMemoryObject::AddInstance(isolate_, memory_object_, instance);
+
+ // Double-check the {memory} array buffer matches the instance.
+ Handle<JSArrayBuffer> memory = memory_buffer_.ToHandleChecked();
+ CHECK_EQ(instance->memory_size(), memory->byte_length());
+ CHECK_EQ(instance->memory_start(), memory->backing_store());
+ }
//--------------------------------------------------------------------------
// Set up the globals for the new instance.
//--------------------------------------------------------------------------
uint32_t untagged_globals_buffer_size = module_->untagged_globals_buffer_size;
if (untagged_globals_buffer_size > 0) {
- void* backing_store = isolate_->array_buffer_allocator()->Allocate(
- untagged_globals_buffer_size);
- if (backing_store == nullptr) {
- thrower_->RangeError("Out of memory: wasm globals");
- return {};
- }
- untagged_globals_ = isolate_->factory()->NewJSArrayBuffer(
- SharedFlag::kNotShared, AllocationType::kOld);
- constexpr bool is_external = false;
- constexpr bool is_wasm_memory = false;
- JSArrayBuffer::Setup(untagged_globals_, isolate_, is_external,
- backing_store, untagged_globals_buffer_size,
- SharedFlag::kNotShared, is_wasm_memory);
- if (untagged_globals_.is_null()) {
+ MaybeHandle<JSArrayBuffer> result =
+ isolate_->factory()->NewJSArrayBufferAndBackingStore(
+ untagged_globals_buffer_size, InitializedFlag::kZeroInitialized,
+ AllocationType::kOld);
+
+ if (!result.ToHandle(&untagged_globals_)) {
thrower_->RangeError("Out of memory: wasm globals");
return {};
}
+
+ instance->set_untagged_globals_buffer(*untagged_globals_);
instance->set_globals_start(
reinterpret_cast<byte*>(untagged_globals_->backing_store()));
- instance->set_untagged_globals_buffer(*untagged_globals_);
}
uint32_t tagged_globals_buffer_size = module_->tagged_globals_buffer_size;
@@ -425,6 +447,8 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
instance->set_indirect_function_tables(*tables);
}
+ NativeModuleModificationScope native_modification_scope(native_module);
+
//--------------------------------------------------------------------------
// Process the imports for the module.
//--------------------------------------------------------------------------
@@ -450,30 +474,6 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
InitializeExceptions(instance);
}
- //--------------------------------------------------------------------------
- // Create the WebAssembly.Memory object.
- //--------------------------------------------------------------------------
- if (module_->has_memory) {
- if (!instance->has_memory_object()) {
- // No memory object exists. Create one.
- Handle<WasmMemoryObject> memory_object = WasmMemoryObject::New(
- isolate_, memory_,
- module_->maximum_pages != 0 ? module_->maximum_pages : -1);
- instance->set_memory_object(*memory_object);
- }
-
- // Add the instance object to the list of instances for this memory.
- Handle<WasmMemoryObject> memory_object(instance->memory_object(), isolate_);
- WasmMemoryObject::AddInstance(isolate_, memory_object, instance);
-
- if (!memory_.is_null()) {
- // Double-check the {memory} array buffer matches the instance.
- Handle<JSArrayBuffer> memory = memory_.ToHandleChecked();
- CHECK_EQ(instance->memory_size(), memory->byte_length());
- CHECK_EQ(instance->memory_start(), memory->backing_store());
- }
- }
-
// The bulk memory proposal changes the MVP behavior here; the segments are
// written as if `memory.init` and `table.init` are executed directly, and
// not bounds checked ahead of time.
@@ -536,7 +536,8 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
// Debugging support.
//--------------------------------------------------------------------------
// Set all breakpoints that were set on the shared module.
- WasmModuleObject::SetBreakpointsOnNewInstance(module_object_, instance);
+ WasmModuleObject::SetBreakpointsOnNewInstance(
+ handle(module_object_->script(), isolate_), instance);
//--------------------------------------------------------------------------
// Create a wrapper for the start function.
@@ -547,7 +548,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
Handle<Code> wrapper_code =
JSToWasmWrapperCompilationUnit::CompileJSToWasmWrapper(
isolate_, function.sig, function.imported);
- // TODO(clemensh): Don't generate an exported function for the start
+ // TODO(clemensb): Don't generate an exported function for the start
// function. Use CWasmEntry instead.
start_function_ = WasmExportedFunction::New(
isolate_, instance, start_index,
@@ -807,22 +808,21 @@ void InstanceBuilder::SanitizeImports() {
}
}
-MaybeHandle<JSArrayBuffer> InstanceBuilder::FindImportedMemoryBuffer() const {
+bool InstanceBuilder::FindImportedMemory() {
DCHECK_EQ(module_->import_table.size(), sanitized_imports_.size());
for (size_t index = 0; index < module_->import_table.size(); index++) {
- const WasmImport& import = module_->import_table[index];
+ WasmImport import = module_->import_table[index];
if (import.kind == kExternalMemory) {
- const auto& value = sanitized_imports_[index].value;
- if (!value->IsWasmMemoryObject()) {
- return {};
- }
- auto memory = Handle<WasmMemoryObject>::cast(value);
- Handle<JSArrayBuffer> buffer(memory->array_buffer(), isolate_);
- return buffer;
+ auto& value = sanitized_imports_[index].value;
+ if (!value->IsWasmMemoryObject()) return false;
+ memory_object_ = Handle<WasmMemoryObject>::cast(value);
+ memory_buffer_ =
+ Handle<JSArrayBuffer>(memory_object_->array_buffer(), isolate_);
+ return true;
}
}
- return {};
+ return false;
}
bool InstanceBuilder::ProcessImportedFunction(
@@ -1016,19 +1016,19 @@ bool InstanceBuilder::ProcessImportedMemory(Handle<WasmInstanceObject> instance,
Handle<String> module_name,
Handle<String> import_name,
Handle<Object> value) {
- // Validation should have failed if more than one memory object was
- // provided.
- DCHECK(!instance->has_memory_object());
if (!value->IsWasmMemoryObject()) {
ReportLinkError("memory import must be a WebAssembly.Memory object",
import_index, module_name, import_name);
return false;
}
- auto memory = Handle<WasmMemoryObject>::cast(value);
- instance->set_memory_object(*memory);
- Handle<JSArrayBuffer> buffer(memory->array_buffer(), isolate_);
+ auto memory_object = Handle<WasmMemoryObject>::cast(value);
+
+ // The imported memory should have been already set up early.
+ CHECK_EQ(instance->memory_object(), *memory_object);
+
+ Handle<JSArrayBuffer> buffer(memory_object_->array_buffer(), isolate_);
// memory_ should have already been assigned in Build().
- DCHECK_EQ(*memory_.ToHandleChecked(), *buffer);
+ DCHECK_EQ(*memory_buffer_.ToHandleChecked(), *buffer);
uint32_t imported_cur_pages =
static_cast<uint32_t>(buffer->byte_length() / kWasmPageSize);
if (imported_cur_pages < module_->initial_pages) {
@@ -1037,7 +1037,7 @@ bool InstanceBuilder::ProcessImportedMemory(Handle<WasmInstanceObject> instance,
imported_cur_pages);
return false;
}
- int32_t imported_maximum_pages = memory->maximum_pages();
+ int32_t imported_maximum_pages = memory_object_->maximum_pages();
if (module_->has_maximum_pages) {
if (imported_maximum_pages < 0) {
thrower_->LinkError(
@@ -1186,13 +1186,8 @@ bool InstanceBuilder::ProcessImportedGlobal(Handle<WasmInstanceObject> instance,
return true;
}
- if (enabled_.bigint && global.type == kWasmI64) {
- Handle<BigInt> bigint;
-
- if (!BigInt::FromObject(isolate_, value).ToHandle(&bigint)) {
- return false;
- }
- WriteGlobalValue(global, bigint->AsInt64());
+ if (enabled_.bigint && global.type == kWasmI64 && value->IsBigInt()) {
+ WriteGlobalValue(global, BigInt::cast(*value).AsInt64());
return true;
}
@@ -1241,7 +1236,7 @@ void InstanceBuilder::CompileImportWrappers(
CancelableTaskManager task_manager;
const int max_background_tasks = GetMaxBackgroundTasks();
for (int i = 0; i < max_background_tasks; ++i) {
- auto task = base::make_unique<CompileImportWrapperTask>(
+ auto task = std::make_unique<CompileImportWrapperTask>(
&task_manager, isolate_->wasm_engine(), isolate_->counters(),
native_module, &import_wrapper_queue, &cache_scope);
V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
@@ -1411,27 +1406,28 @@ void InstanceBuilder::InitGlobals(Handle<WasmInstanceObject> instance) {
}
// Allocate memory for a module instance as a new JSArrayBuffer.
-Handle<JSArrayBuffer> InstanceBuilder::AllocateMemory(uint32_t initial_pages,
- uint32_t maximum_pages) {
+bool InstanceBuilder::AllocateMemory() {
+ auto initial_pages = module_->initial_pages;
+ auto maximum_pages = module_->has_maximum_pages ? module_->maximum_pages
+ : wasm::max_mem_pages();
if (initial_pages > max_mem_pages()) {
thrower_->RangeError("Out of memory: wasm memory too large");
- return Handle<JSArrayBuffer>::null();
- }
- const bool is_shared_memory = module_->has_shared_memory && enabled_.threads;
- Handle<JSArrayBuffer> mem_buffer;
- if (is_shared_memory) {
- if (!NewSharedArrayBuffer(isolate_, initial_pages * kWasmPageSize,
- maximum_pages * kWasmPageSize)
- .ToHandle(&mem_buffer)) {
- thrower_->RangeError("Out of memory: wasm shared memory");
- }
- } else {
- if (!NewArrayBuffer(isolate_, initial_pages * kWasmPageSize)
- .ToHandle(&mem_buffer)) {
- thrower_->RangeError("Out of memory: wasm memory");
- }
+ return false;
}
- return mem_buffer;
+ auto shared = (module_->has_shared_memory && enabled_.threads)
+ ? SharedFlag::kShared
+ : SharedFlag::kNotShared;
+
+ MaybeHandle<WasmMemoryObject> result =
+ WasmMemoryObject::New(isolate_, initial_pages, maximum_pages, shared);
+
+ if (!result.ToHandle(&memory_object_)) {
+ thrower_->RangeError("Out of memory: wasm memory");
+ return false;
+ }
+ memory_buffer_ =
+ Handle<JSArrayBuffer>(memory_object_->array_buffer(), isolate_);
+ return true;
}
bool InstanceBuilder::NeedsWrappers() const {
diff --git a/deps/v8/src/wasm/streaming-decoder.cc b/deps/v8/src/wasm/streaming-decoder.cc
index 94945ea58a..37aaf05605 100644
--- a/deps/v8/src/wasm/streaming-decoder.cc
+++ b/deps/v8/src/wasm/streaming-decoder.cc
@@ -4,7 +4,6 @@
#include "src/wasm/streaming-decoder.h"
-#include "src/base/template-utils.h"
#include "src/handles/handles.h"
#include "src/objects/descriptor-array.h"
#include "src/objects/dictionary.h"
@@ -364,14 +363,14 @@ StreamingDecoder::DecodeModuleHeader::Next(StreamingDecoder* streaming) {
TRACE_STREAMING("DecodeModuleHeader\n");
streaming->ProcessModuleHeader();
if (!streaming->ok()) return nullptr;
- return base::make_unique<DecodeSectionID>(streaming->module_offset());
+ return std::make_unique<DecodeSectionID>(streaming->module_offset());
}
std::unique_ptr<StreamingDecoder::DecodingState>
StreamingDecoder::DecodeSectionID::Next(StreamingDecoder* streaming) {
TRACE_STREAMING("DecodeSectionID: %s section\n",
SectionName(static_cast<SectionCode>(id_)));
- return base::make_unique<DecodeSectionLength>(id_, module_offset_);
+ return std::make_unique<DecodeSectionLength>(id_, module_offset_);
}
std::unique_ptr<StreamingDecoder::DecodingState>
@@ -391,7 +390,7 @@ StreamingDecoder::DecodeSectionLength::NextWithValue(
streaming->ProcessSection(buf);
if (!streaming->ok()) return nullptr;
// There is no payload, we go to the next section immediately.
- return base::make_unique<DecodeSectionID>(streaming->module_offset_);
+ return std::make_unique<DecodeSectionID>(streaming->module_offset_);
} else {
if (section_id_ == SectionCode::kCodeSectionCode) {
// Explicitly check for multiple code sections as module decoder never
@@ -404,9 +403,9 @@ StreamingDecoder::DecodeSectionLength::NextWithValue(
streaming->code_section_processed_ = true;
// We reached the code section. All functions of the code section are put
// into the same SectionBuffer.
- return base::make_unique<DecodeNumberOfFunctions>(buf);
+ return std::make_unique<DecodeNumberOfFunctions>(buf);
}
- return base::make_unique<DecodeSectionPayload>(buf);
+ return std::make_unique<DecodeSectionPayload>(buf);
}
}
@@ -415,7 +414,7 @@ StreamingDecoder::DecodeSectionPayload::Next(StreamingDecoder* streaming) {
TRACE_STREAMING("DecodeSectionPayload\n");
streaming->ProcessSection(section_buffer_);
if (!streaming->ok()) return nullptr;
- return base::make_unique<DecodeSectionID>(streaming->module_offset());
+ return std::make_unique<DecodeSectionID>(streaming->module_offset());
}
std::unique_ptr<StreamingDecoder::DecodingState>
@@ -434,14 +433,14 @@ StreamingDecoder::DecodeNumberOfFunctions::NextWithValue(
if (payload_buf.size() != bytes_consumed_) {
return streaming->Error("not all code section bytes were used");
}
- return base::make_unique<DecodeSectionID>(streaming->module_offset());
+ return std::make_unique<DecodeSectionID>(streaming->module_offset());
}
DCHECK_GE(kMaxInt, value_);
streaming->StartCodeSection(static_cast<int>(value_),
streaming->section_buffers_.back());
if (!streaming->ok()) return nullptr;
- return base::make_unique<DecodeFunctionLength>(
+ return std::make_unique<DecodeFunctionLength>(
section_buffer_, section_buffer_->payload_offset() + bytes_consumed_,
value_);
}
@@ -464,7 +463,7 @@ StreamingDecoder::DecodeFunctionLength::NextWithValue(
return streaming->Error("not enough code section bytes");
}
- return base::make_unique<DecodeFunctionBody>(
+ return std::make_unique<DecodeFunctionBody>(
section_buffer_, buffer_offset_ + bytes_consumed_, value_,
num_remaining_functions_, streaming->module_offset());
}
@@ -477,14 +476,14 @@ StreamingDecoder::DecodeFunctionBody::Next(StreamingDecoder* streaming) {
size_t end_offset = buffer_offset_ + function_body_length_;
if (num_remaining_functions_ > 0) {
- return base::make_unique<DecodeFunctionLength>(section_buffer_, end_offset,
- num_remaining_functions_);
+ return std::make_unique<DecodeFunctionLength>(section_buffer_, end_offset,
+ num_remaining_functions_);
}
// We just read the last function body. Continue with the next section.
if (end_offset != section_buffer_->length()) {
return streaming->Error("not all code section bytes were used");
}
- return base::make_unique<DecodeSectionID>(streaming->module_offset());
+ return std::make_unique<DecodeSectionID>(streaming->module_offset());
}
StreamingDecoder::StreamingDecoder(
diff --git a/deps/v8/src/wasm/value-type.h b/deps/v8/src/wasm/value-type.h
index bca5c2b941..49f348b714 100644
--- a/deps/v8/src/wasm/value-type.h
+++ b/deps/v8/src/wasm/value-type.h
@@ -44,7 +44,7 @@ using FunctionSig = Signature<ValueType>;
inline size_t hash_value(ValueType type) { return static_cast<size_t>(type); }
-// TODO(clemensh): Compute memtype and size from ValueType once we have c++14
+// TODO(clemensb): Compute memtype and size from ValueType once we have c++14
// constexpr support.
#define FOREACH_LOAD_TYPE(V) \
V(I32, , Int32, 2) \
diff --git a/deps/v8/src/wasm/wasm-code-manager.cc b/deps/v8/src/wasm/wasm-code-manager.cc
index 91cfc01cea..55695259f0 100644
--- a/deps/v8/src/wasm/wasm-code-manager.cc
+++ b/deps/v8/src/wasm/wasm-code-manager.cc
@@ -6,7 +6,7 @@
#include <iomanip>
-#include "src/base/adapters.h"
+#include "src/base/iterator.h"
#include "src/base/macros.h"
#include "src/base/platform/platform.h"
#include "src/base/small-vector.h"
@@ -192,7 +192,7 @@ void WasmCode::LogCode(Isolate* isolate) const {
Local<v8::String> source_map_str =
load_wasm_source_map(v8_isolate, source_map_url.c_str());
native_module()->SetWasmSourceMap(
- base::make_unique<WasmModuleSourceMap>(v8_isolate, source_map_str));
+ std::make_unique<WasmModuleSourceMap>(v8_isolate, source_map_str));
}
if (!name_vec.empty()) {
@@ -235,7 +235,10 @@ void WasmCode::Validate() const {
switch (mode) {
case RelocInfo::WASM_CALL: {
Address target = it.rinfo()->wasm_call_address();
- DCHECK(native_module_->is_jump_table_slot(target));
+ WasmCode* code = native_module_->Lookup(target);
+ CHECK_NOT_NULL(code);
+ CHECK_EQ(WasmCode::kJumpTable, code->kind());
+ CHECK(code->contains(target));
break;
}
case RelocInfo::WASM_STUB_CALL: {
@@ -244,7 +247,6 @@ void WasmCode::Validate() const {
CHECK_NOT_NULL(code);
#ifdef V8_EMBEDDED_BUILTINS
CHECK_EQ(WasmCode::kJumpTable, code->kind());
- CHECK_EQ(native_module()->runtime_stub_table_, code);
CHECK(code->contains(target));
#else
CHECK_EQ(WasmCode::kRuntimeStub, code->kind());
@@ -385,8 +387,6 @@ const char* GetWasmCodeKindAsString(WasmCode::Kind kind) {
return "wasm-to-capi";
case WasmCode::kWasmToJsWrapper:
return "wasm-to-js";
- case WasmCode::kRuntimeStub:
- return "runtime-stub";
case WasmCode::kInterpreterEntry:
return "interpreter entry";
case WasmCode::kJumpTable:
@@ -430,6 +430,16 @@ void WasmCode::DecrementRefCount(Vector<WasmCode* const> code_vec) {
if (engine) engine->FreeDeadCode(dead_code);
}
+WasmCodeAllocator::OptionalLock::~OptionalLock() {
+ if (allocator_) allocator_->mutex_.Unlock();
+}
+
+void WasmCodeAllocator::OptionalLock::Lock(WasmCodeAllocator* allocator) {
+ DCHECK(!is_locked());
+ allocator_ = allocator;
+ allocator->mutex_.Lock();
+}
+
WasmCodeAllocator::WasmCodeAllocator(WasmCodeManager* code_manager,
VirtualMemory code_space,
bool can_request_more,
@@ -448,6 +458,11 @@ WasmCodeAllocator::~WasmCodeAllocator() {
committed_code_space());
}
+void WasmCodeAllocator::Init(NativeModule* native_module) {
+ DCHECK_EQ(1, owned_code_space_.size());
+ native_module->AddCodeSpace(owned_code_space_[0].region(), {});
+}
+
namespace {
// On Windows, we cannot commit a region that straddles different reservations
// of virtual memory. Because we bump-allocate, and because, if we need more
@@ -487,17 +502,70 @@ base::SmallVector<base::AddressRegion, 1> SplitRangeByReservationsIfNeeded(
#endif
return split_ranges;
}
+
+int NumWasmFunctionsInFarJumpTable(uint32_t num_declared_functions) {
+ return NativeModule::kNeedsFarJumpsBetweenCodeSpaces &&
+ FLAG_wasm_far_jump_table
+ ? static_cast<int>(num_declared_functions)
+ : 0;
+}
+
+// Returns an overapproximation of the code size overhead per new code space
+// created by the jump tables.
+size_t OverheadPerCodeSpace(uint32_t num_declared_functions) {
+ // Overhead for the jump table.
+ size_t overhead = RoundUp<kCodeAlignment>(
+ JumpTableAssembler::SizeForNumberOfSlots(num_declared_functions));
+
+#if defined(V8_OS_WIN64)
+ // On Win64, we need to reserve some pages at the beginning of an executable
+ // space. See {AddCodeSpace}.
+ overhead += Heap::GetCodeRangeReservedAreaSize();
+#endif // V8_OS_WIN64
+
+ // Overhead for the far jump table.
+ overhead +=
+ RoundUp<kCodeAlignment>(JumpTableAssembler::SizeForNumberOfFarJumpSlots(
+ WasmCode::kRuntimeStubCount,
+ NumWasmFunctionsInFarJumpTable(num_declared_functions)));
+
+ return overhead;
+}
+
+size_t ReservationSize(size_t code_size_estimate, int num_declared_functions,
+ size_t total_reserved) {
+ size_t overhead = OverheadPerCodeSpace(num_declared_functions);
+
+ // Reserve a power of two at least as big as any of
+ // a) needed size + overhead (this is the minimum needed)
+ // b) 2 * overhead (to not waste too much space by overhead)
+ // c) 1/4 of current total reservation size (to grow exponentially)
+ size_t reserve_size = base::bits::RoundUpToPowerOfTwo(
+ std::max(std::max(RoundUp<kCodeAlignment>(code_size_estimate) + overhead,
+ 2 * overhead),
+ total_reserved / 4));
+
+ // Limit by the maximum supported code space size.
+ return std::min(kMaxWasmCodeSpaceSize, reserve_size);
+}
+
} // namespace
Vector<byte> WasmCodeAllocator::AllocateForCode(NativeModule* native_module,
size_t size) {
return AllocateForCodeInRegion(
- native_module, size, {kNullAddress, std::numeric_limits<size_t>::max()});
+ native_module, size, {kNullAddress, std::numeric_limits<size_t>::max()},
+ WasmCodeAllocator::OptionalLock{});
}
Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion(
- NativeModule* native_module, size_t size, base::AddressRegion region) {
- base::MutexGuard lock(&mutex_);
+ NativeModule* native_module, size_t size, base::AddressRegion region,
+ const WasmCodeAllocator::OptionalLock& optional_lock) {
+ OptionalLock new_lock;
+ if (!optional_lock.is_locked()) new_lock.Lock(this);
+ const auto& locked_lock =
+ optional_lock.is_locked() ? optional_lock : new_lock;
+ DCHECK(locked_lock.is_locked());
DCHECK_EQ(code_manager_, native_module->engine()->code_manager());
DCHECK_LT(0, size);
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
@@ -517,12 +585,10 @@ Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion(
Address hint = owned_code_space_.empty() ? kNullAddress
: owned_code_space_.back().end();
- // Reserve at least 20% of the total generated code size so far, and of
- // course at least {size}. Round up to the next power of two.
size_t total_reserved = 0;
for (auto& vmem : owned_code_space_) total_reserved += vmem.size();
- size_t reserve_size =
- base::bits::RoundUpToPowerOfTwo(std::max(size, total_reserved / 5));
+ size_t reserve_size = ReservationSize(
+ size, native_module->module()->num_declared_functions, total_reserved);
VirtualMemory new_mem =
code_manager_->TryAllocate(reserve_size, reinterpret_cast<void*>(hint));
if (!new_mem.IsReserved()) {
@@ -534,7 +600,7 @@ Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion(
code_manager_->AssignRange(new_region, native_module);
free_code_space_.Merge(new_region);
owned_code_space_.emplace_back(std::move(new_mem));
- native_module->AddCodeSpace(new_region);
+ native_module->AddCodeSpace(new_region, locked_lock);
code_space = free_code_space_.Allocate(size);
DCHECK(!code_space.is_empty());
@@ -660,10 +726,9 @@ void WasmCodeAllocator::FreeCode(Vector<WasmCode* const> codes) {
}
}
-base::AddressRegion WasmCodeAllocator::GetSingleCodeRegion() const {
+size_t WasmCodeAllocator::GetNumCodeSpaces() const {
base::MutexGuard lock(&mutex_);
- DCHECK_EQ(1, owned_code_space_.size());
- return owned_code_space_[0].region();
+ return owned_code_space_.size();
}
NativeModule::NativeModule(WasmEngine* engine, const WasmFeatures& enabled,
@@ -689,27 +754,34 @@ NativeModule::NativeModule(WasmEngine* engine, const WasmFeatures& enabled,
CompilationState::New(*shared_this, std::move(async_counters));
DCHECK_NOT_NULL(module_);
if (module_->num_declared_functions > 0) {
- code_table_.reset(new WasmCode* [module_->num_declared_functions] {});
+ code_table_ =
+ std::make_unique<WasmCode*[]>(module_->num_declared_functions);
}
- AddCodeSpace(code_allocator_.GetSingleCodeRegion());
+ code_allocator_.Init(this);
}
void NativeModule::ReserveCodeTableForTesting(uint32_t max_functions) {
WasmCodeRefScope code_ref_scope;
- DCHECK_LE(num_functions(), max_functions);
- WasmCode** new_table = new WasmCode* [max_functions] {};
+ DCHECK_LE(module_->num_declared_functions, max_functions);
+ auto new_table = std::make_unique<WasmCode*[]>(max_functions);
if (module_->num_declared_functions > 0) {
- memcpy(new_table, code_table_.get(),
- module_->num_declared_functions * sizeof(*new_table));
+ memcpy(new_table.get(), code_table_.get(),
+ module_->num_declared_functions * sizeof(WasmCode*));
}
- code_table_.reset(new_table);
+ code_table_ = std::move(new_table);
- CHECK_EQ(1, code_space_data_.size());
+ base::AddressRegion single_code_space_region;
+ {
+ base::MutexGuard guard(&allocation_mutex_);
+ CHECK_EQ(1, code_space_data_.size());
+ single_code_space_region = code_space_data_[0].region;
+ }
// Re-allocate jump table.
- code_space_data_[0].jump_table = CreateEmptyJumpTableInRegion(
+ main_jump_table_ = CreateEmptyJumpTableInRegion(
JumpTableAssembler::SizeForNumberOfSlots(max_functions),
- code_space_data_[0].region);
- main_jump_table_ = code_space_data_[0].jump_table;
+ single_code_space_region, WasmCodeAllocator::OptionalLock{});
+ base::MutexGuard guard(&allocation_mutex_);
+ code_space_data_[0].jump_table = main_jump_table_;
}
void NativeModule::LogWasmCodes(Isolate* isolate) {
@@ -731,89 +803,6 @@ CompilationEnv NativeModule::CreateCompilationEnv() const {
}
WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
- return AddAndPublishAnonymousCode(code, WasmCode::kFunction);
-}
-
-void NativeModule::UseLazyStub(uint32_t func_index) {
- DCHECK_LE(module_->num_imported_functions, func_index);
- DCHECK_LT(func_index,
- module_->num_imported_functions + module_->num_declared_functions);
-
- if (!lazy_compile_table_) {
- uint32_t num_slots = module_->num_declared_functions;
- WasmCodeRefScope code_ref_scope;
- DCHECK_EQ(1, code_space_data_.size());
- lazy_compile_table_ = CreateEmptyJumpTableInRegion(
- JumpTableAssembler::SizeForNumberOfLazyFunctions(num_slots),
- code_space_data_[0].region);
- JumpTableAssembler::GenerateLazyCompileTable(
- lazy_compile_table_->instruction_start(), num_slots,
- module_->num_imported_functions,
- runtime_stub_entry(WasmCode::kWasmCompileLazy));
- }
-
- // Add jump table entry for jump to the lazy compile stub.
- uint32_t slot_index = func_index - module_->num_imported_functions;
- DCHECK_NE(runtime_stub_entry(WasmCode::kWasmCompileLazy), kNullAddress);
- Address lazy_compile_target =
- lazy_compile_table_->instruction_start() +
- JumpTableAssembler::LazyCompileSlotIndexToOffset(slot_index);
- JumpTableAssembler::PatchJumpTableSlot(main_jump_table_->instruction_start(),
- slot_index, lazy_compile_target,
- WasmCode::kFlushICache);
-}
-
-// TODO(mstarzinger): Remove {Isolate} parameter once {V8_EMBEDDED_BUILTINS}
-// was removed and embedded builtins are no longer optional.
-void NativeModule::SetRuntimeStubs(Isolate* isolate) {
- DCHECK_EQ(kNullAddress, runtime_stub_entries_[0]); // Only called once.
-#ifdef V8_EMBEDDED_BUILTINS
- WasmCodeRefScope code_ref_scope;
- DCHECK_EQ(1, code_space_data_.size());
- WasmCode* jump_table = CreateEmptyJumpTableInRegion(
- JumpTableAssembler::SizeForNumberOfStubSlots(WasmCode::kRuntimeStubCount),
- code_space_data_[0].region);
- Address base = jump_table->instruction_start();
- EmbeddedData embedded_data = EmbeddedData::FromBlob();
-#define RUNTIME_STUB(Name) Builtins::k##Name,
-#define RUNTIME_STUB_TRAP(Name) RUNTIME_STUB(ThrowWasm##Name)
- Builtins::Name wasm_runtime_stubs[WasmCode::kRuntimeStubCount] = {
- WASM_RUNTIME_STUB_LIST(RUNTIME_STUB, RUNTIME_STUB_TRAP)};
-#undef RUNTIME_STUB
-#undef RUNTIME_STUB_TRAP
- Address builtin_address[WasmCode::kRuntimeStubCount];
- for (int i = 0; i < WasmCode::kRuntimeStubCount; ++i) {
- Builtins::Name builtin = wasm_runtime_stubs[i];
- CHECK(embedded_data.ContainsBuiltin(builtin));
- builtin_address[i] = embedded_data.InstructionStartOfBuiltin(builtin);
- runtime_stub_entries_[i] =
- base + JumpTableAssembler::StubSlotIndexToOffset(i);
- }
- JumpTableAssembler::GenerateRuntimeStubTable(base, builtin_address,
- WasmCode::kRuntimeStubCount);
- DCHECK_NULL(runtime_stub_table_);
- runtime_stub_table_ = jump_table;
-#else // V8_EMBEDDED_BUILTINS
- HandleScope scope(isolate);
- WasmCodeRefScope code_ref_scope;
- USE(runtime_stub_table_); // Actually unused, but avoids ifdef's in header.
-#define COPY_BUILTIN(Name) \
- runtime_stub_entries_[WasmCode::k##Name] = \
- AddAndPublishAnonymousCode( \
- isolate->builtins()->builtin_handle(Builtins::k##Name), \
- WasmCode::kRuntimeStub, #Name) \
- ->instruction_start();
-#define COPY_BUILTIN_TRAP(Name) COPY_BUILTIN(ThrowWasm##Name)
- WASM_RUNTIME_STUB_LIST(COPY_BUILTIN, COPY_BUILTIN_TRAP)
-#undef COPY_BUILTIN_TRAP
-#undef COPY_BUILTIN
-#endif // V8_EMBEDDED_BUILTINS
- DCHECK_NE(kNullAddress, runtime_stub_entries_[0]);
-}
-
-WasmCode* NativeModule::AddAndPublishAnonymousCode(Handle<Code> code,
- WasmCode::Kind kind,
- const char* name) {
// For off-heap builtins, we create a copy of the off-heap instruction stream
// instead of the on-heap code object containing the trampoline. Ensure that
// we do not apply the on-heap reloc info to the off-heap instructions.
@@ -859,8 +848,10 @@ WasmCode* NativeModule::AddAndPublishAnonymousCode(Handle<Code> code,
code->InstructionStart();
int mode_mask = RelocInfo::kApplyMask |
RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL);
- Address constant_pool_start =
- reinterpret_cast<Address>(dst_code_bytes.begin()) + constant_pool_offset;
+ auto jump_tables_ref =
+ FindJumpTablesForCode(reinterpret_cast<Address>(dst_code_bytes.begin()));
+ Address dst_code_addr = reinterpret_cast<Address>(dst_code_bytes.begin());
+ Address constant_pool_start = dst_code_addr + constant_pool_offset;
RelocIterator orig_it(*code, mode_mask);
for (RelocIterator it(dst_code_bytes, reloc_info.as_vector(),
constant_pool_start, mode_mask);
@@ -869,8 +860,8 @@ WasmCode* NativeModule::AddAndPublishAnonymousCode(Handle<Code> code,
if (RelocInfo::IsWasmStubCall(mode)) {
uint32_t stub_call_tag = orig_it.rinfo()->wasm_call_tag();
DCHECK_LT(stub_call_tag, WasmCode::kRuntimeStubCount);
- Address entry = runtime_stub_entry(
- static_cast<WasmCode::RuntimeStubId>(stub_call_tag));
+ Address entry = GetNearRuntimeStubEntry(
+ static_cast<WasmCode::RuntimeStubId>(stub_call_tag), jump_tables_ref);
it.rinfo()->set_wasm_stub_call_address(entry, SKIP_ICACHE_FLUSH);
} else {
it.rinfo()->apply(delta);
@@ -880,7 +871,6 @@ WasmCode* NativeModule::AddAndPublishAnonymousCode(Handle<Code> code,
// Flush the i-cache after relocation.
FlushInstructionCache(dst_code_bytes.begin(), dst_code_bytes.size());
- DCHECK_NE(kind, WasmCode::Kind::kInterpreterEntry);
std::unique_ptr<WasmCode> new_code{new WasmCode{
this, // native_module
kAnonymousFuncIndex, // index
@@ -895,24 +885,63 @@ WasmCode* NativeModule::AddAndPublishAnonymousCode(Handle<Code> code,
OwnedVector<ProtectedInstructionData>{}, // protected_instructions
std::move(reloc_info), // reloc_info
std::move(source_pos), // source positions
- kind, // kind
+ WasmCode::kFunction, // kind
ExecutionTier::kNone}}; // tier
- new_code->MaybePrint(name);
+ new_code->MaybePrint(nullptr);
new_code->Validate();
return PublishCode(std::move(new_code));
}
+void NativeModule::UseLazyStub(uint32_t func_index) {
+ DCHECK_LE(module_->num_imported_functions, func_index);
+ DCHECK_LT(func_index,
+ module_->num_imported_functions + module_->num_declared_functions);
+
+ if (!lazy_compile_table_) {
+ uint32_t num_slots = module_->num_declared_functions;
+ WasmCodeRefScope code_ref_scope;
+ base::AddressRegion single_code_space_region;
+ {
+ base::MutexGuard guard(&allocation_mutex_);
+ DCHECK_EQ(1, code_space_data_.size());
+ single_code_space_region = code_space_data_[0].region;
+ }
+ lazy_compile_table_ = CreateEmptyJumpTableInRegion(
+ JumpTableAssembler::SizeForNumberOfLazyFunctions(num_slots),
+ single_code_space_region, WasmCodeAllocator::OptionalLock{});
+ JumpTableAssembler::GenerateLazyCompileTable(
+ lazy_compile_table_->instruction_start(), num_slots,
+ module_->num_imported_functions,
+ GetNearRuntimeStubEntry(
+ WasmCode::kWasmCompileLazy,
+ FindJumpTablesForCode(lazy_compile_table_->instruction_start())));
+ }
+
+ // Add jump table entry for jump to the lazy compile stub.
+ uint32_t slot_index = func_index - module_->num_imported_functions;
+ DCHECK_NULL(code_table_[slot_index]);
+ Address lazy_compile_target =
+ lazy_compile_table_->instruction_start() +
+ JumpTableAssembler::LazyCompileSlotIndexToOffset(slot_index);
+ base::MutexGuard guard(&allocation_mutex_);
+ PatchJumpTablesLocked(slot_index, lazy_compile_target);
+}
+
std::unique_ptr<WasmCode> NativeModule::AddCode(
uint32_t index, const CodeDesc& desc, uint32_t stack_slots,
uint32_t tagged_parameter_slots,
OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions,
OwnedVector<const byte> source_position_table, WasmCode::Kind kind,
ExecutionTier tier) {
- return AddCodeWithCodeSpace(
- index, desc, stack_slots, tagged_parameter_slots,
- std::move(protected_instructions), std::move(source_position_table), kind,
- tier, code_allocator_.AllocateForCode(this, desc.instr_size));
+ Vector<byte> code_space =
+ code_allocator_.AllocateForCode(this, desc.instr_size);
+ auto jump_table_ref =
+ FindJumpTablesForCode(reinterpret_cast<Address>(code_space.begin()));
+ return AddCodeWithCodeSpace(index, desc, stack_slots, tagged_parameter_slots,
+ std::move(protected_instructions),
+ std::move(source_position_table), kind, tier,
+ code_space, jump_table_ref);
}
std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
@@ -920,7 +949,8 @@ std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
uint32_t tagged_parameter_slots,
OwnedVector<ProtectedInstructionData> protected_instructions,
OwnedVector<const byte> source_position_table, WasmCode::Kind kind,
- ExecutionTier tier, Vector<uint8_t> dst_code_bytes) {
+ ExecutionTier tier, Vector<uint8_t> dst_code_bytes,
+ const JumpTablesRef& jump_tables_ref) {
OwnedVector<byte> reloc_info;
if (desc.reloc_size > 0) {
reloc_info = OwnedVector<byte>::New(desc.reloc_size);
@@ -949,21 +979,21 @@ std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
int mode_mask = RelocInfo::kApplyMask |
RelocInfo::ModeMask(RelocInfo::WASM_CALL) |
RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL);
- Address constant_pool_start =
- reinterpret_cast<Address>(dst_code_bytes.begin()) + constant_pool_offset;
+ Address code_start = reinterpret_cast<Address>(dst_code_bytes.begin());
+ Address constant_pool_start = code_start + constant_pool_offset;
for (RelocIterator it(dst_code_bytes, reloc_info.as_vector(),
constant_pool_start, mode_mask);
!it.done(); it.next()) {
RelocInfo::Mode mode = it.rinfo()->rmode();
if (RelocInfo::IsWasmCall(mode)) {
uint32_t call_tag = it.rinfo()->wasm_call_tag();
- Address target = GetCallTargetForFunction(call_tag);
+ Address target = GetNearCallTargetForFunction(call_tag, jump_tables_ref);
it.rinfo()->set_wasm_call_address(target, SKIP_ICACHE_FLUSH);
} else if (RelocInfo::IsWasmStubCall(mode)) {
uint32_t stub_call_tag = it.rinfo()->wasm_call_tag();
DCHECK_LT(stub_call_tag, WasmCode::kRuntimeStubCount);
- Address entry = runtime_stub_entry(
- static_cast<WasmCode::RuntimeStubId>(stub_call_tag));
+ Address entry = GetNearRuntimeStubEntry(
+ static_cast<WasmCode::RuntimeStubId>(stub_call_tag), jump_tables_ref);
it.rinfo()->set_wasm_stub_call_address(entry, SKIP_ICACHE_FLUSH);
} else {
it.rinfo()->apply(delta);
@@ -1036,12 +1066,9 @@ WasmCode* NativeModule::PublishCodeLocked(std::unique_ptr<WasmCode> code) {
// Populate optimized code to the jump table unless there is an active
// redirection to the interpreter that should be preserved.
- DCHECK_IMPLIES(
- main_jump_table_ == nullptr,
- engine_->code_manager()->IsImplicitAllocationsDisabledForTesting());
- bool update_jump_table = update_code_table &&
- !has_interpreter_redirection(code->index()) &&
- main_jump_table_;
+ DCHECK_NOT_NULL(main_jump_table_);
+ bool update_jump_table =
+ update_code_table && !has_interpreter_redirection(code->index());
// Ensure that interpreter entries always populate to the jump table.
if (code->kind_ == WasmCode::Kind::kInterpreterEntry) {
@@ -1050,9 +1077,7 @@ WasmCode* NativeModule::PublishCodeLocked(std::unique_ptr<WasmCode> code) {
}
if (update_jump_table) {
- JumpTableAssembler::PatchJumpTableSlot(
- main_jump_table_->instruction_start(), slot_idx,
- code->instruction_start(), WasmCode::kFlushICache);
+ PatchJumpTablesLocked(slot_idx, code->instruction_start());
}
}
WasmCodeRefScope::AddRef(code.get());
@@ -1120,11 +1145,12 @@ WasmModuleSourceMap* NativeModule::GetWasmSourceMap() const {
}
WasmCode* NativeModule::CreateEmptyJumpTableInRegion(
- uint32_t jump_table_size, base::AddressRegion region) {
+ uint32_t jump_table_size, base::AddressRegion region,
+ const WasmCodeAllocator::OptionalLock& allocator_lock) {
// Only call this if we really need a jump table.
DCHECK_LT(0, jump_table_size);
- Vector<uint8_t> code_space =
- code_allocator_.AllocateForCodeInRegion(this, jump_table_size, region);
+ Vector<uint8_t> code_space = code_allocator_.AllocateForCodeInRegion(
+ this, jump_table_size, region, allocator_lock);
DCHECK(!code_space.empty());
ZapCode(reinterpret_cast<Address>(code_space.begin()), code_space.size());
std::unique_ptr<WasmCode> code{new WasmCode{
@@ -1146,12 +1172,63 @@ WasmCode* NativeModule::CreateEmptyJumpTableInRegion(
return PublishCode(std::move(code));
}
-void NativeModule::AddCodeSpace(base::AddressRegion region) {
+void NativeModule::PatchJumpTablesLocked(uint32_t slot_index, Address target) {
+ // The caller must hold the {allocation_mutex_}, thus we fail to lock it here.
+ DCHECK(!allocation_mutex_.TryLock());
+
+ for (auto& code_space_data : code_space_data_) {
+ DCHECK_IMPLIES(code_space_data.jump_table, code_space_data.far_jump_table);
+ if (!code_space_data.jump_table) continue;
+ PatchJumpTableLocked(code_space_data, slot_index, target);
+ }
+}
+
+void NativeModule::PatchJumpTableLocked(const CodeSpaceData& code_space_data,
+ uint32_t slot_index, Address target) {
+ // The caller must hold the {allocation_mutex_}, thus we fail to lock it here.
+ DCHECK(!allocation_mutex_.TryLock());
+
+ DCHECK_NOT_NULL(code_space_data.jump_table);
+ DCHECK_NOT_NULL(code_space_data.far_jump_table);
+
+ DCHECK_LT(slot_index, module_->num_declared_functions);
+ Address jump_table_slot =
+ code_space_data.jump_table->instruction_start() +
+ JumpTableAssembler::JumpSlotIndexToOffset(slot_index);
+ uint32_t far_jump_table_offset = JumpTableAssembler::FarJumpSlotIndexToOffset(
+ WasmCode::kRuntimeStubCount + slot_index);
+ // Only pass the far jump table start if the far jump table actually has a
+ // slot for this function index (i.e. does not only contain runtime stubs).
+ bool has_far_jump_slot =
+ far_jump_table_offset <
+ code_space_data.far_jump_table->instructions().size();
+ Address far_jump_table_start =
+ code_space_data.far_jump_table->instruction_start();
+ Address far_jump_table_slot =
+ has_far_jump_slot ? far_jump_table_start + far_jump_table_offset
+ : kNullAddress;
+ JumpTableAssembler::PatchJumpTableSlot(jump_table_slot, far_jump_table_slot,
+ target);
+}
+
+void NativeModule::AddCodeSpace(
+ base::AddressRegion region,
+ const WasmCodeAllocator::OptionalLock& allocator_lock) {
+#ifndef V8_EMBEDDED_BUILTINS
+ // The far jump table contains far jumps to the embedded builtins. This
+ // requires a build with embedded builtins enabled.
+ FATAL(
+ "WebAssembly is not supported in no-embed builds. no-embed builds are "
+ "deprecated. See\n"
+ " - https://groups.google.com/d/msg/v8-users/9F53xqBjpkI/9WmKSbcWBAAJ\n"
+ " - https://crbug.com/v8/8519\n"
+ " - https://crbug.com/v8/8531\n");
+#endif // V8_EMBEDDED_BUILTINS
+
// Each code space must be at least twice as large as the overhead per code
// space. Otherwise, we are wasting too much memory.
- const bool is_first_code_space = code_space_data_.empty();
- const bool implicit_alloc_disabled =
- engine_->code_manager()->IsImplicitAllocationsDisabledForTesting();
+ DCHECK_GE(region.size(),
+ 2 * OverheadPerCodeSpace(module()->num_declared_functions));
#if defined(V8_OS_WIN64)
// On some platforms, specifically Win64, we need to reserve some pages at
@@ -1160,32 +1237,82 @@ void NativeModule::AddCodeSpace(base::AddressRegion region) {
// https://cs.chromium.org/chromium/src/components/crash/content/app/crashpad_win.cc?rcl=fd680447881449fba2edcf0589320e7253719212&l=204
// for details.
if (engine_->code_manager()
- ->CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
- !implicit_alloc_disabled) {
+ ->CanRegisterUnwindInfoForNonABICompliantCodeRange()) {
size_t size = Heap::GetCodeRangeReservedAreaSize();
DCHECK_LT(0, size);
- Vector<byte> padding = code_allocator_.AllocateForCode(this, size);
- CHECK(region.contains(reinterpret_cast<Address>(padding.begin()),
- padding.size()));
+ Vector<byte> padding = code_allocator_.AllocateForCodeInRegion(
+ this, size, region, allocator_lock);
+ CHECK_EQ(reinterpret_cast<Address>(padding.begin()), region.begin());
+ win64_unwindinfo::RegisterNonABICompliantCodeRange(
+ reinterpret_cast<void*>(region.begin()), region.size());
}
#endif // V8_OS_WIN64
WasmCodeRefScope code_ref_scope;
WasmCode* jump_table = nullptr;
+ WasmCode* far_jump_table = nullptr;
const uint32_t num_wasm_functions = module_->num_declared_functions;
const bool has_functions = num_wasm_functions > 0;
+ const bool is_first_code_space = code_space_data_.empty();
+ // TODO(clemensb): Avoid additional jump table if the code space is close
+ // enough to another existing code space.
const bool needs_jump_table =
- has_functions && is_first_code_space && !implicit_alloc_disabled;
+ has_functions && (kNeedsFarJumpsBetweenCodeSpaces || is_first_code_space);
if (needs_jump_table) {
jump_table = CreateEmptyJumpTableInRegion(
- JumpTableAssembler::SizeForNumberOfSlots(num_wasm_functions), region);
+ JumpTableAssembler::SizeForNumberOfSlots(num_wasm_functions), region,
+ allocator_lock);
CHECK(region.contains(jump_table->instruction_start()));
}
+ // Always allocate a far jump table, because it contains the runtime stubs.
+ int num_function_slots = NumWasmFunctionsInFarJumpTable(num_wasm_functions);
+ far_jump_table = CreateEmptyJumpTableInRegion(
+ JumpTableAssembler::SizeForNumberOfFarJumpSlots(
+ WasmCode::kRuntimeStubCount, num_function_slots),
+ region, allocator_lock);
+ CHECK(region.contains(far_jump_table->instruction_start()));
+ EmbeddedData embedded_data = EmbeddedData::FromBlob();
+#define RUNTIME_STUB(Name) Builtins::k##Name,
+#define RUNTIME_STUB_TRAP(Name) RUNTIME_STUB(ThrowWasm##Name)
+ Builtins::Name stub_names[WasmCode::kRuntimeStubCount] = {
+ WASM_RUNTIME_STUB_LIST(RUNTIME_STUB, RUNTIME_STUB_TRAP)};
+#undef RUNTIME_STUB
+#undef RUNTIME_STUB_TRAP
+ Address builtin_addresses[WasmCode::kRuntimeStubCount];
+ for (int i = 0; i < WasmCode::kRuntimeStubCount; ++i) {
+ Builtins::Name builtin = stub_names[i];
+ CHECK(embedded_data.ContainsBuiltin(builtin));
+ builtin_addresses[i] = embedded_data.InstructionStartOfBuiltin(builtin);
+ }
+ JumpTableAssembler::GenerateFarJumpTable(
+ far_jump_table->instruction_start(), builtin_addresses,
+ WasmCode::kRuntimeStubCount, num_function_slots);
+
if (is_first_code_space) main_jump_table_ = jump_table;
- code_space_data_.push_back(CodeSpaceData{region, jump_table});
+ base::MutexGuard guard(&allocation_mutex_);
+ code_space_data_.push_back(CodeSpaceData{region, jump_table, far_jump_table});
+
+ if (jump_table && !is_first_code_space) {
+ // Patch the new jump table(s) with existing functions. If this is the first
+ // code space, there cannot be any functions that have been compiled yet.
+ const CodeSpaceData& new_code_space_data = code_space_data_.back();
+ for (uint32_t slot_index = 0; slot_index < num_wasm_functions;
+ ++slot_index) {
+ if (code_table_[slot_index]) {
+ PatchJumpTableLocked(new_code_space_data, slot_index,
+ code_table_[slot_index]->instruction_start());
+ } else if (lazy_compile_table_) {
+ Address lazy_compile_target =
+ lazy_compile_table_->instruction_start() +
+ JumpTableAssembler::LazyCompileSlotIndexToOffset(slot_index);
+ PatchJumpTableLocked(new_code_space_data, slot_index,
+ lazy_compile_target);
+ }
+ }
+ }
}
namespace {
@@ -1241,26 +1368,86 @@ Address NativeModule::GetCallTargetForFunction(uint32_t func_index) const {
return main_jump_table_->instruction_start() + slot_offset;
}
+NativeModule::JumpTablesRef NativeModule::FindJumpTablesForCode(
+ Address code_addr) const {
+ base::MutexGuard guard(&allocation_mutex_);
+ for (auto& code_space_data : code_space_data_) {
+ const bool jump_table_reachable =
+ !kNeedsFarJumpsBetweenCodeSpaces ||
+ code_space_data.region.contains(code_addr);
+ if (jump_table_reachable && code_space_data.far_jump_table) {
+ // We might not have a jump table if we have no functions.
+ return {code_space_data.jump_table
+ ? code_space_data.jump_table->instruction_start()
+ : kNullAddress,
+ code_space_data.far_jump_table->instruction_start()};
+ }
+ }
+ FATAL("code_addr is not part of a code space");
+}
+
+Address NativeModule::GetNearCallTargetForFunction(
+ uint32_t func_index, const JumpTablesRef& jump_tables) const {
+ uint32_t slot_offset = GetJumpTableOffset(func_index);
+ return jump_tables.jump_table_start + slot_offset;
+}
+
+Address NativeModule::GetNearRuntimeStubEntry(
+ WasmCode::RuntimeStubId index, const JumpTablesRef& jump_tables) const {
+ auto offset = JumpTableAssembler::FarJumpSlotIndexToOffset(index);
+ return jump_tables.far_jump_table_start + offset;
+}
+
uint32_t NativeModule::GetFunctionIndexFromJumpTableSlot(
Address slot_address) const {
- DCHECK(is_jump_table_slot(slot_address));
- uint32_t slot_offset = static_cast<uint32_t>(
- slot_address - main_jump_table_->instruction_start());
+ WasmCodeRefScope code_refs;
+ WasmCode* code = Lookup(slot_address);
+ DCHECK_NOT_NULL(code);
+ DCHECK_EQ(WasmCode::kJumpTable, code->kind());
+ uint32_t slot_offset =
+ static_cast<uint32_t>(slot_address - code->instruction_start());
uint32_t slot_idx = JumpTableAssembler::SlotOffsetToIndex(slot_offset);
DCHECK_LT(slot_idx, module_->num_declared_functions);
+ DCHECK_EQ(slot_address,
+ code->instruction_start() +
+ JumpTableAssembler::JumpSlotIndexToOffset(slot_idx));
return module_->num_imported_functions + slot_idx;
}
-const char* NativeModule::GetRuntimeStubName(Address runtime_stub_entry) const {
-#define RETURN_NAME(Name) \
- if (runtime_stub_entries_[WasmCode::k##Name] == runtime_stub_entry) { \
- return #Name; \
+WasmCode::RuntimeStubId NativeModule::GetRuntimeStubId(Address target) const {
+ base::MutexGuard guard(&allocation_mutex_);
+
+ for (auto& code_space_data : code_space_data_) {
+ if (code_space_data.far_jump_table->contains(target)) {
+ uint32_t offset = static_cast<uint32_t>(
+ target - code_space_data.far_jump_table->instruction_start());
+ uint32_t index = JumpTableAssembler::FarJumpSlotOffsetToIndex(offset);
+ if (index >= WasmCode::kRuntimeStubCount) continue;
+ if (JumpTableAssembler::FarJumpSlotIndexToOffset(index) != offset) {
+ continue;
+ }
+ return static_cast<WasmCode::RuntimeStubId>(index);
+ }
}
-#define RETURN_NAME_TRAP(Name) RETURN_NAME(ThrowWasm##Name)
- WASM_RUNTIME_STUB_LIST(RETURN_NAME, RETURN_NAME_TRAP)
-#undef RETURN_NAME_TRAP
-#undef RETURN_NAME
- return "<unknown>";
+
+ // Invalid address.
+ return WasmCode::kRuntimeStubCount;
+}
+
+const char* NativeModule::GetRuntimeStubName(Address target) const {
+ WasmCode::RuntimeStubId stub_id = GetRuntimeStubId(target);
+
+#define RUNTIME_STUB_NAME(Name) #Name,
+#define RUNTIME_STUB_NAME_TRAP(Name) "ThrowWasm" #Name,
+ constexpr const char* runtime_stub_names[] = {WASM_RUNTIME_STUB_LIST(
+ RUNTIME_STUB_NAME, RUNTIME_STUB_NAME_TRAP) "<unknown>"};
+#undef RUNTIME_STUB_NAME
+#undef RUNTIME_STUB_NAME_TRAP
+ STATIC_ASSERT(arraysize(runtime_stub_names) ==
+ WasmCode::kRuntimeStubCount + 1);
+
+ DCHECK_GT(arraysize(runtime_stub_names), stub_id);
+ return runtime_stub_names[stub_id];
}
NativeModule::~NativeModule() {
@@ -1275,10 +1462,8 @@ NativeModule::~NativeModule() {
import_wrapper_cache_.reset();
}
-WasmCodeManager::WasmCodeManager(WasmMemoryTracker* memory_tracker,
- size_t max_committed)
- : memory_tracker_(memory_tracker),
- max_committed_code_space_(max_committed),
+WasmCodeManager::WasmCodeManager(size_t max_committed)
+ : max_committed_code_space_(max_committed),
critical_committed_code_space_(max_committed / 2) {
DCHECK_LE(max_committed, kMaxWasmCodeMemory);
}
@@ -1350,12 +1535,12 @@ VirtualMemory WasmCodeManager::TryAllocate(size_t size, void* hint) {
DCHECK_GT(size, 0);
size_t allocate_page_size = page_allocator->AllocatePageSize();
size = RoundUp(size, allocate_page_size);
- if (!memory_tracker_->ReserveAddressSpace(size)) return {};
+ if (!BackingStore::ReserveAddressSpace(size)) return {};
if (hint == nullptr) hint = page_allocator->GetRandomMmapAddr();
VirtualMemory mem(page_allocator, size, hint, allocate_page_size);
if (!mem.IsReserved()) {
- memory_tracker_->ReleaseReservation(size);
+ BackingStore::ReleaseReservation(size);
return {};
}
TRACE_HEAP("VMem alloc: 0x%" PRIxPTR ":0x%" PRIxPTR " (%zu)\n", mem.address(),
@@ -1369,13 +1554,6 @@ VirtualMemory WasmCodeManager::TryAllocate(size_t size, void* hint) {
return mem;
}
-void WasmCodeManager::SetMaxCommittedMemoryForTesting(size_t limit) {
- // This has to be set before committing any memory.
- DCHECK_EQ(0, total_committed_code_space_.load());
- max_committed_code_space_ = limit;
- critical_committed_code_space_.store(limit / 2);
-}
-
// static
size_t WasmCodeManager::EstimateNativeModuleCodeSize(const WasmModule* module) {
constexpr size_t kCodeSizeMultiplier = 4;
@@ -1387,8 +1565,6 @@ size_t WasmCodeManager::EstimateNativeModuleCodeSize(const WasmModule* module) {
for (auto& function : module->functions) {
estimate += kCodeOverhead + kCodeSizeMultiplier * function.code.length();
}
- estimate +=
- JumpTableAssembler::SizeForNumberOfSlots(module->num_declared_functions);
estimate += kImportSize * module->num_imported_functions;
return estimate;
@@ -1425,9 +1601,20 @@ std::shared_ptr<NativeModule> WasmCodeManager::NewNativeModule(
committed + (max_committed_code_space_ - committed) / 2);
}
- // If the code must be contiguous, reserve enough address space up front.
+ // If we cannot add code space later, reserve enough address space up front.
size_t code_vmem_size =
- kRequiresCodeRange ? kMaxWasmCodeMemory : code_size_estimate;
+ can_request_more ? ReservationSize(code_size_estimate,
+ module->num_declared_functions, 0)
+ : kMaxWasmCodeSpaceSize;
+
+ // The '--wasm-max-code-space-reservation' testing flag can be used to reduce
+ // the maximum size of the initial code space reservation (in MB).
+ if (FLAG_wasm_max_initial_code_space_reservation > 0) {
+ size_t flag_max_bytes =
+ static_cast<size_t>(FLAG_wasm_max_initial_code_space_reservation) * MB;
+ if (flag_max_bytes < code_vmem_size) code_vmem_size = flag_max_bytes;
+ }
+
// Try up to two times; getting rid of dead JSArrayBuffer allocations might
// require two GCs because the first GC maybe incremental and may have
// floating garbage.
@@ -1456,14 +1643,6 @@ std::shared_ptr<NativeModule> WasmCodeManager::NewNativeModule(
TRACE_HEAP("New NativeModule %p: Mem: %" PRIuPTR ",+%zu\n", ret.get(), start,
size);
-#if defined(V8_OS_WIN64)
- if (CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
- !implicit_allocations_disabled_for_testing_) {
- win64_unwindinfo::RegisterNonABICompliantCodeRange(
- reinterpret_cast<void*>(start), size);
- }
-#endif // V8_OS_WIN64
-
base::MutexGuard lock(&native_modules_mutex_);
lookup_map_.insert(std::make_pair(start, std::make_pair(end, ret.get())));
return ret;
@@ -1519,6 +1698,9 @@ std::vector<WasmCode*> NativeModule::AddCompiledCode(
}
Vector<byte> code_space =
code_allocator_.AllocateForCode(this, total_code_space);
+ // Lookup the jump tables to use once, then use for all code objects.
+ auto jump_tables_ref =
+ FindJumpTablesForCode(reinterpret_cast<Address>(code_space.begin()));
std::vector<std::unique_ptr<WasmCode>> generated_code;
generated_code.reserve(results.size());
@@ -1533,7 +1715,7 @@ std::vector<WasmCode*> NativeModule::AddCompiledCode(
result.func_index, result.code_desc, result.frame_slot_count,
result.tagged_parameter_slots, std::move(result.protected_instructions),
std::move(result.source_positions), GetCodeKind(result),
- result.result_tier, this_code_space));
+ result.result_tier, this_code_space, jump_tables_ref));
}
DCHECK_EQ(0, code_space.size());
@@ -1567,6 +1749,10 @@ void NativeModule::FreeCode(Vector<WasmCode* const> codes) {
}
}
+size_t NativeModule::GetNumberOfCodeSpacesForTesting() const {
+ return code_allocator_.GetNumCodeSpaces();
+}
+
void WasmCodeManager::FreeNativeModule(Vector<VirtualMemory> owned_code_space,
size_t committed_size) {
base::MutexGuard lock(&native_modules_mutex_);
@@ -1576,15 +1762,14 @@ void WasmCodeManager::FreeNativeModule(Vector<VirtualMemory> owned_code_space,
code_space.address(), code_space.end(), code_space.size());
#if defined(V8_OS_WIN64)
- if (CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
- !implicit_allocations_disabled_for_testing_) {
+ if (CanRegisterUnwindInfoForNonABICompliantCodeRange()) {
win64_unwindinfo::UnregisterNonABICompliantCodeRange(
reinterpret_cast<void*>(code_space.address()));
}
#endif // V8_OS_WIN64
lookup_map_.erase(code_space.address());
- memory_tracker_->ReleaseReservation(code_space.size());
+ BackingStore::ReleaseReservation(code_space.size());
code_space.Free();
DCHECK(!code_space.IsReserved());
}
@@ -1616,7 +1801,7 @@ WasmCode* WasmCodeManager::LookupCode(Address pc) const {
}
// TODO(v8:7424): Code protection scopes are not yet supported with shared code
-// enabled and need to be revisited to work with --wasm-shared-code as well.
+// enabled and need to be revisited.
NativeModuleModificationScope::NativeModuleModificationScope(
NativeModule* native_module)
: native_module_(native_module) {
diff --git a/deps/v8/src/wasm/wasm-code-manager.h b/deps/v8/src/wasm/wasm-code-manager.h
index c2e5249e5e..7deea9032a 100644
--- a/deps/v8/src/wasm/wasm-code-manager.h
+++ b/deps/v8/src/wasm/wasm-code-manager.h
@@ -39,7 +39,6 @@ class NativeModule;
class WasmCodeManager;
struct WasmCompilationResult;
class WasmEngine;
-class WasmMemoryTracker;
class WasmImportWrapperCache;
struct WasmModule;
@@ -79,7 +78,6 @@ class V8_EXPORT_PRIVATE WasmCode final {
kFunction,
kWasmToCapiWrapper,
kWasmToJsWrapper,
- kRuntimeStub,
kInterpreterEntry,
kJumpTable
};
@@ -282,11 +280,33 @@ const char* GetWasmCodeKindAsString(WasmCode::Kind);
// Manages the code reservations and allocations of a single {NativeModule}.
class WasmCodeAllocator {
public:
+ // {OptionalLock} is passed between {WasmCodeAllocator} and {NativeModule} to
+ // indicate that the lock on the {WasmCodeAllocator} is already taken. It's
+ // optional to allow to also call methods without holding the lock.
+ class OptionalLock {
+ public:
+ // External users can only instantiate a non-locked {OptionalLock}.
+ OptionalLock() = default;
+ ~OptionalLock();
+ bool is_locked() const { return allocator_ != nullptr; }
+
+ private:
+ friend class WasmCodeAllocator;
+ // {Lock} is called from the {WasmCodeAllocator} if no locked {OptionalLock}
+ // is passed.
+ void Lock(WasmCodeAllocator*);
+
+ WasmCodeAllocator* allocator_ = nullptr;
+ };
+
WasmCodeAllocator(WasmCodeManager*, VirtualMemory code_space,
bool can_request_more,
std::shared_ptr<Counters> async_counters);
~WasmCodeAllocator();
+ // Call before use, after the {NativeModule} is set up completely.
+ void Init(NativeModule*);
+
size_t committed_code_space() const {
return committed_code_space_.load(std::memory_order_acquire);
}
@@ -303,7 +323,8 @@ class WasmCodeAllocator {
// Allocate code space within a specific region. Returns a valid buffer or
// fails with OOM (crash).
Vector<byte> AllocateForCodeInRegion(NativeModule*, size_t size,
- base::AddressRegion);
+ base::AddressRegion,
+ const WasmCodeAllocator::OptionalLock&);
// Sets permissions of all owned code space to executable, or read-write (if
// {executable} is false). Returns true on success.
@@ -312,9 +333,8 @@ class WasmCodeAllocator {
// Free memory pages of all given code objects. Used for wasm code GC.
void FreeCode(Vector<WasmCode* const>);
- // Returns the region of the single code space managed by this code allocator.
- // Will fail if more than one code space has been created.
- base::AddressRegion GetSingleCodeRegion() const;
+ // Retrieve the number of separately reserved code spaces.
+ size_t GetNumCodeSpaces() const;
private:
// The engine-wide wasm code manager.
@@ -344,6 +364,8 @@ class WasmCodeAllocator {
bool is_executable_ = false;
+ // TODO(clemensb): Remove this field once multiple code spaces are supported
+ // everywhere.
const bool can_request_more_memory_;
std::shared_ptr<Counters> async_counters_;
@@ -352,9 +374,9 @@ class WasmCodeAllocator {
class V8_EXPORT_PRIVATE NativeModule final {
public:
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390X || V8_TARGET_ARCH_ARM64
- static constexpr bool kCanAllocateMoreMemory = false;
+ static constexpr bool kNeedsFarJumpsBetweenCodeSpaces = true;
#else
- static constexpr bool kCanAllocateMoreMemory = true;
+ static constexpr bool kNeedsFarJumpsBetweenCodeSpaces = false;
#endif
// {AddCode} is thread safe w.r.t. other calls to {AddCode} or methods adding
@@ -394,11 +416,6 @@ class V8_EXPORT_PRIVATE NativeModule final {
// table with trampolines accordingly.
void UseLazyStub(uint32_t func_index);
- // Initializes all runtime stubs by setting up entry addresses in the runtime
- // stub table. It must be called exactly once per native module before adding
- // other WasmCode so that runtime stub ids can be resolved during relocation.
- void SetRuntimeStubs(Isolate* isolate);
-
// Creates a snapshot of the current state of the code table. This is useful
// to get a consistent view of the table (e.g. used by the serializer).
std::vector<WasmCode*> SnapshotCodeTable() const;
@@ -409,13 +426,6 @@ class V8_EXPORT_PRIVATE NativeModule final {
void SetWasmSourceMap(std::unique_ptr<WasmModuleSourceMap> source_map);
WasmModuleSourceMap* GetWasmSourceMap() const;
- Address runtime_stub_entry(WasmCode::RuntimeStubId index) const {
- DCHECK_LT(index, WasmCode::kRuntimeStubCount);
- Address entry_address = runtime_stub_entries_[index];
- DCHECK_NE(kNullAddress, entry_address);
- return entry_address;
- }
-
Address jump_table_start() const {
return main_jump_table_ ? main_jump_table_->instruction_start()
: kNullAddress;
@@ -423,16 +433,33 @@ class V8_EXPORT_PRIVATE NativeModule final {
uint32_t GetJumpTableOffset(uint32_t func_index) const;
- bool is_jump_table_slot(Address address) const {
- return main_jump_table_->contains(address);
- }
-
// Returns the canonical target to call for the given function (the slot in
// the first jump table).
Address GetCallTargetForFunction(uint32_t func_index) const;
- // Reverse lookup from a given call target (i.e. a jump table slot as the
- // above {GetCallTargetForFunction} returns) to a function index.
+ struct JumpTablesRef {
+ const Address jump_table_start;
+ const Address far_jump_table_start;
+ };
+
+ // Finds the jump tables that should be used for the code at {code_addr}. This
+ // information is then passed to {GetNearCallTargetForFunction} and
+ // {GetNearRuntimeStubEntry} to avoid the overhead of looking this information
+ // up there.
+ JumpTablesRef FindJumpTablesForCode(Address code_addr) const;
+
+ // Similarly to {GetCallTargetForFunction}, but uses the jump table previously
+ // looked up via {FindJumpTablesForCode}.
+ Address GetNearCallTargetForFunction(uint32_t func_index,
+ const JumpTablesRef&) const;
+
+ // Get a runtime stub entry (which is a far jump table slot) in the jump table
+ // previously looked up via {FindJumpTablesForCode}.
+ Address GetNearRuntimeStubEntry(WasmCode::RuntimeStubId index,
+ const JumpTablesRef&) const;
+
+ // Reverse lookup from a given call target (which must be a jump table slot)
+ // to a function index.
uint32_t GetFunctionIndexFromJumpTableSlot(Address slot_address) const;
bool SetExecutable(bool executable) {
@@ -481,7 +508,11 @@ class V8_EXPORT_PRIVATE NativeModule final {
const WasmFeatures& enabled_features() const { return enabled_features_; }
- const char* GetRuntimeStubName(Address runtime_stub_entry) const;
+ // Returns the runtime stub id that corresponds to the given address (which
+ // must be a far jump table slot). Returns {kRuntimeStubCount} on failure.
+ WasmCode::RuntimeStubId GetRuntimeStubId(Address runtime_stub_target) const;
+
+ const char* GetRuntimeStubName(Address runtime_stub_target) const;
// Sample the current code size of this modules to the given counters.
enum CodeSamplingTime : int8_t { kAfterBaseline, kAfterTopTier, kSampling };
@@ -501,6 +532,9 @@ class V8_EXPORT_PRIVATE NativeModule final {
// its accounting.
void FreeCode(Vector<WasmCode* const>);
+ // Retrieve the number of separately reserved code spaces for this module.
+ size_t GetNumberOfCodeSpacesForTesting() const;
+
private:
friend class WasmCode;
friend class WasmCodeAllocator;
@@ -510,6 +544,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
struct CodeSpaceData {
base::AddressRegion region;
WasmCode* jump_table;
+ WasmCode* far_jump_table;
};
// Private constructor, called via {WasmCodeManager::NewNativeModule()}.
@@ -525,17 +560,23 @@ class V8_EXPORT_PRIVATE NativeModule final {
OwnedVector<trap_handler::ProtectedInstructionData>
protected_instructions,
OwnedVector<const byte> source_position_table, WasmCode::Kind kind,
- ExecutionTier tier, Vector<uint8_t> code_space);
+ ExecutionTier tier, Vector<uint8_t> code_space,
+ const JumpTablesRef& jump_tables_ref);
- // Add and publish anonymous code.
- WasmCode* AddAndPublishAnonymousCode(Handle<Code>, WasmCode::Kind kind,
- const char* name = nullptr);
+ WasmCode* CreateEmptyJumpTableInRegion(
+ uint32_t jump_table_size, base::AddressRegion,
+ const WasmCodeAllocator::OptionalLock&);
- WasmCode* CreateEmptyJumpTableInRegion(uint32_t jump_table_size,
- base::AddressRegion);
+ // Hold the {allocation_mutex_} when calling one of these methods.
+ // {slot_index} is the index in the declared functions, i.e. function index
+ // minus the number of imported functions.
+ void PatchJumpTablesLocked(uint32_t slot_index, Address target);
+ void PatchJumpTableLocked(const CodeSpaceData&, uint32_t slot_index,
+ Address target);
// Called by the {WasmCodeAllocator} to register a new code space.
- void AddCodeSpace(base::AddressRegion);
+ void AddCodeSpace(base::AddressRegion,
+ const WasmCodeAllocator::OptionalLock&);
// Hold the {allocation_mutex_} when calling this method.
bool has_interpreter_redirection(uint32_t func_index) {
@@ -580,12 +621,6 @@ class V8_EXPORT_PRIVATE NativeModule final {
// {WireBytesStorage}, held by background compile tasks.
std::shared_ptr<OwnedVector<const uint8_t>> wire_bytes_;
- // Contains entry points for runtime stub calls via {WASM_STUB_CALL}.
- Address runtime_stub_entries_[WasmCode::kRuntimeStubCount] = {kNullAddress};
-
- // Jump table used for runtime stubs (i.e. trampolines to embedded builtins).
- WasmCode* runtime_stub_table_ = nullptr;
-
// Jump table used by external calls (from JS). Wasm calls use one of the jump
// tables stored in {code_space_data_}.
WasmCode* main_jump_table_ = nullptr;
@@ -612,7 +647,11 @@ class V8_EXPORT_PRIVATE NativeModule final {
// instruction start address of the value.
std::map<Address, std::unique_ptr<WasmCode>> owned_code_;
- std::unique_ptr<WasmCode* []> code_table_;
+ // Table of the latest code object per function, updated on initial
+ // compilation and tier up. The number of entries is
+ // {WasmModule::num_declared_functions}, i.e. there are no entries for
+ // imported functions.
+ std::unique_ptr<WasmCode*[]> code_table_;
// Null if no redirections exist, otherwise a bitset over all functions in
// this module marking those functions that have been redirected.
@@ -634,8 +673,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
class V8_EXPORT_PRIVATE WasmCodeManager final {
public:
- explicit WasmCodeManager(WasmMemoryTracker* memory_tracker,
- size_t max_committed);
+ explicit WasmCodeManager(size_t max_committed);
#ifdef DEBUG
~WasmCodeManager() {
@@ -654,16 +692,6 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
return total_committed_code_space_.load();
}
- void SetMaxCommittedMemoryForTesting(size_t limit);
-
- void DisableImplicitAllocationsForTesting() {
- implicit_allocations_disabled_for_testing_ = true;
- }
-
- bool IsImplicitAllocationsDisabledForTesting() const {
- return implicit_allocations_disabled_for_testing_;
- }
-
static size_t EstimateNativeModuleCodeSize(const WasmModule* module);
static size_t EstimateNativeModuleNonCodeSize(const WasmModule* module);
@@ -686,11 +714,7 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
void AssignRange(base::AddressRegion, NativeModule*);
- WasmMemoryTracker* const memory_tracker_;
-
- size_t max_committed_code_space_;
-
- bool implicit_allocations_disabled_for_testing_ = false;
+ const size_t max_committed_code_space_;
std::atomic<size_t> total_committed_code_space_{0};
// If the committed code space exceeds {critical_committed_code_space_}, then
diff --git a/deps/v8/src/wasm/wasm-constants.h b/deps/v8/src/wasm/wasm-constants.h
index fbbe19396c..2b5cb6c9ec 100644
--- a/deps/v8/src/wasm/wasm-constants.h
+++ b/deps/v8/src/wasm/wasm-constants.h
@@ -81,6 +81,7 @@ enum SectionCode : int8_t {
// to be consistent.
kNameSectionCode, // Name section (encoded as a string)
kSourceMappingURLSectionCode, // Source Map URL section
+ kDebugInfoSectionCode, // DWARF section .debug_info
kCompilationHintsSectionCode, // Compilation hints section
// Helper values
diff --git a/deps/v8/src/wasm/wasm-debug.cc b/deps/v8/src/wasm/wasm-debug.cc
index 2955bc602f..ea989c081d 100644
--- a/deps/v8/src/wasm/wasm-debug.cc
+++ b/deps/v8/src/wasm/wasm-debug.cc
@@ -184,7 +184,7 @@ class InterpreterHandle {
argument_values.begin());
bool finished = false;
while (!finished) {
- // TODO(clemensh): Add occasional StackChecks.
+ // TODO(clemensb): Add occasional StackChecks.
WasmInterpreter::State state = ContinueExecution(thread);
switch (state) {
case WasmInterpreter::State::PAUSED:
@@ -277,9 +277,10 @@ class InterpreterHandle {
if (isolate_->debug()->break_points_active()) {
Handle<WasmModuleObject> module_object(
GetInstanceObject()->module_object(), isolate_);
+ Handle<Script> script(module_object->script(), isolate_);
int position = GetTopPosition(module_object);
Handle<FixedArray> breakpoints;
- if (WasmModuleObject::CheckBreakPoints(isolate_, module_object, position)
+ if (WasmModuleObject::CheckBreakPoints(isolate_, script, position)
.ToHandle(&breakpoints)) {
// We hit one or several breakpoints. Clear stepping, notify the
// listeners and return.
@@ -318,7 +319,8 @@ class InterpreterHandle {
DCHECK_LT(0, thread->GetFrameCount());
auto frame = thread->GetFrame(thread->GetFrameCount() - 1);
- return module_object->GetFunctionOffset(frame->function()->func_index) +
+ return GetWasmFunctionOffset(module_object->module(),
+ frame->function()->func_index) +
frame->pc();
}
@@ -502,9 +504,11 @@ wasm::InterpreterHandle* GetInterpreterHandleOrNull(WasmDebugInfo debug_info) {
Handle<WasmDebugInfo> WasmDebugInfo::New(Handle<WasmInstanceObject> instance) {
DCHECK(!instance->has_debug_info());
Factory* factory = instance->GetIsolate()->factory();
+ Handle<Cell> stack_cell = factory->NewCell(factory->empty_fixed_array());
Handle<WasmDebugInfo> debug_info = Handle<WasmDebugInfo>::cast(
factory->NewStruct(WASM_DEBUG_INFO_TYPE, AllocationType::kOld));
debug_info->set_wasm_instance(*instance);
+ debug_info->set_interpreter_reference_stack(*stack_cell);
instance->set_debug_info(*debug_info);
return debug_info;
}
@@ -524,6 +528,7 @@ wasm::WasmInterpreter* WasmDebugInfo::SetupForTesting(
return interp_handle->raw()->interpreter();
}
+// static
void WasmDebugInfo::SetBreakpoint(Handle<WasmDebugInfo> debug_info,
int func_index, int offset) {
Isolate* isolate = debug_info->GetIsolate();
@@ -533,6 +538,18 @@ void WasmDebugInfo::SetBreakpoint(Handle<WasmDebugInfo> debug_info,
handle->interpreter()->SetBreakpoint(func, offset, true);
}
+// static
+void WasmDebugInfo::ClearBreakpoint(Handle<WasmDebugInfo> debug_info,
+ int func_index, int offset) {
+ Isolate* isolate = debug_info->GetIsolate();
+ auto* handle = GetOrCreateInterpreterHandle(isolate, debug_info);
+ // TODO(leese): If there are no more breakpoints left it would be good to
+ // undo redirecting to the interpreter.
+ const wasm::WasmFunction* func = &handle->module()->functions[func_index];
+ handle->interpreter()->SetBreakpoint(func, offset, false);
+}
+
+// static
void WasmDebugInfo::RedirectToInterpreter(Handle<WasmDebugInfo> debug_info,
Vector<int> func_indexes) {
Isolate* isolate = debug_info->GetIsolate();
@@ -635,8 +652,8 @@ Handle<Code> WasmDebugInfo::GetCWasmEntry(Handle<WasmDebugInfo> debug_info,
if (index == -1) {
index = static_cast<int32_t>(map->FindOrInsert(*sig));
if (index == entries->length()) {
- entries = isolate->factory()->CopyFixedArrayAndGrow(
- entries, entries->length(), AllocationType::kOld);
+ entries =
+ isolate->factory()->CopyFixedArrayAndGrow(entries, entries->length());
debug_info->set_c_wasm_entries(*entries);
}
DCHECK(entries->get(index).IsUndefined(isolate));
diff --git a/deps/v8/src/wasm/wasm-engine.cc b/deps/v8/src/wasm/wasm-engine.cc
index 97111f8349..adb566cb41 100644
--- a/deps/v8/src/wasm/wasm-engine.cc
+++ b/deps/v8/src/wasm/wasm-engine.cc
@@ -211,8 +211,7 @@ struct WasmEngine::NativeModuleInfo {
int8_t num_code_gcs_triggered = 0;
};
-WasmEngine::WasmEngine()
- : code_manager_(&memory_tracker_, FLAG_wasm_max_code_space * MB) {}
+WasmEngine::WasmEngine() : code_manager_(FLAG_wasm_max_code_space * MB) {}
WasmEngine::~WasmEngine() {
// Synchronize on all background compile tasks.
@@ -307,7 +306,7 @@ MaybeHandle<WasmModuleObject> WasmEngine::SyncCompile(
CreateWasmScript(isolate, bytes, native_module->module()->source_map_url);
// Create the module object.
- // TODO(clemensh): For the same module (same bytes / same hash), we should
+ // TODO(clemensb): For the same module (same bytes / same hash), we should
// only have one WasmModuleObject. Otherwise, we might only set
// breakpoints on a (potentially empty) subset of the instances.
@@ -337,7 +336,7 @@ void WasmEngine::AsyncInstantiate(
ErrorThrower thrower(isolate, "WebAssembly.instantiate()");
// Instantiate a TryCatch so that caught exceptions won't progagate out.
// They will still be set as pending exceptions on the isolate.
- // TODO(clemensh): Avoid TryCatch, use Execution::TryCall internally to invoke
+ // TODO(clemensb): Avoid TryCatch, use Execution::TryCall internally to invoke
// start function and report thrown exception explicitly via out argument.
v8::TryCatch catcher(reinterpret_cast<v8::Isolate*>(isolate));
catcher.SetVerbose(false);
@@ -567,7 +566,7 @@ int GetGCTimeMicros(base::TimeTicks start) {
void WasmEngine::AddIsolate(Isolate* isolate) {
base::MutexGuard guard(&mutex_);
DCHECK_EQ(0, isolates_.count(isolate));
- isolates_.emplace(isolate, base::make_unique<IsolateInfo>(isolate));
+ isolates_.emplace(isolate, std::make_unique<IsolateInfo>(isolate));
// Install sampling GC callback.
// TODO(v8:7424): For now we sample module sizes in a GC callback. This will
@@ -631,7 +630,7 @@ void WasmEngine::LogCode(WasmCode* code) {
IsolateInfo* info = isolates_[isolate].get();
if (info->log_codes == false) continue;
if (info->log_codes_task == nullptr) {
- auto new_task = base::make_unique<LogCodesTask>(
+ auto new_task = std::make_unique<LogCodesTask>(
&mutex_, &info->log_codes_task, isolate, this);
info->log_codes_task = new_task.get();
info->foreground_task_runner->PostTask(std::move(new_task));
@@ -676,7 +675,8 @@ std::shared_ptr<NativeModule> WasmEngine::NewNativeModule(
size_t code_size_estimate =
wasm::WasmCodeManager::EstimateNativeModuleCodeSize(module.get());
return NewNativeModule(isolate, enabled, code_size_estimate,
- wasm::NativeModule::kCanAllocateMoreMemory,
+ !wasm::NativeModule::kNeedsFarJumpsBetweenCodeSpaces ||
+ FLAG_wasm_far_jump_table,
std::move(module));
}
@@ -688,7 +688,7 @@ std::shared_ptr<NativeModule> WasmEngine::NewNativeModule(
can_request_more, std::move(module));
base::MutexGuard lock(&mutex_);
auto pair = native_modules_.insert(std::make_pair(
- native_module.get(), base::make_unique<NativeModuleInfo>()));
+ native_module.get(), std::make_unique<NativeModuleInfo>()));
DCHECK(pair.second); // inserted new entry.
pair.first->second.get()->isolates.insert(isolate);
isolates_[isolate]->native_modules.insert(native_module.get());
@@ -768,7 +768,7 @@ void WasmEngine::SampleTopTierCodeSizeInAllIsolates(
DCHECK_EQ(1, isolates_.count(isolate));
IsolateInfo* info = isolates_[isolate].get();
info->foreground_task_runner->PostTask(
- base::make_unique<SampleTopTierCodeSizeTask>(isolate, native_module));
+ std::make_unique<SampleTopTierCodeSizeTask>(isolate, native_module));
}
}
@@ -880,7 +880,7 @@ void WasmEngine::TriggerGC(int8_t gc_sequence_index) {
for (auto* isolate : native_modules_[entry.first]->isolates) {
auto& gc_task = current_gc_info_->outstanding_isolates[isolate];
if (!gc_task) {
- auto new_task = base::make_unique<WasmGCForegroundTask>(isolate);
+ auto new_task = std::make_unique<WasmGCForegroundTask>(isolate);
gc_task = new_task.get();
DCHECK_EQ(1, isolates_.count(isolate));
isolates_[isolate]->foreground_task_runner->PostTask(
diff --git a/deps/v8/src/wasm/wasm-engine.h b/deps/v8/src/wasm/wasm-engine.h
index 401cf2b880..424f85fa79 100644
--- a/deps/v8/src/wasm/wasm-engine.h
+++ b/deps/v8/src/wasm/wasm-engine.h
@@ -10,7 +10,6 @@
#include "src/tasks/cancelable-task.h"
#include "src/wasm/wasm-code-manager.h"
-#include "src/wasm/wasm-memory.h"
#include "src/wasm/wasm-tier.h"
#include "src/zone/accounting-allocator.h"
@@ -23,6 +22,7 @@ class CompilationStatistics;
class HeapNumber;
class WasmInstanceObject;
class WasmModuleObject;
+class JSArrayBuffer;
namespace wasm {
@@ -120,8 +120,6 @@ class V8_EXPORT_PRIVATE WasmEngine {
WasmCodeManager* code_manager() { return &code_manager_; }
- WasmMemoryTracker* memory_tracker() { return &memory_tracker_; }
-
AccountingAllocator* allocator() { return &allocator_; }
// Compilation statistics for TurboFan compilations.
@@ -156,8 +154,8 @@ class V8_EXPORT_PRIVATE WasmEngine {
template <typename T, typename... Args>
std::unique_ptr<T> NewBackgroundCompileTask(Args&&... args) {
- return base::make_unique<T>(&background_compile_task_manager_,
- std::forward<Args>(args)...);
+ return std::make_unique<T>(&background_compile_task_manager_,
+ std::forward<Args>(args)...);
}
// Trigger code logging for this WasmCode in all Isolates which have access to
@@ -243,7 +241,6 @@ class V8_EXPORT_PRIVATE WasmEngine {
// calling this method.
void PotentiallyFinishCurrentGC();
- WasmMemoryTracker memory_tracker_;
WasmCodeManager code_manager_;
AccountingAllocator allocator_;
diff --git a/deps/v8/src/wasm/wasm-external-refs.cc b/deps/v8/src/wasm/wasm-external-refs.cc
index 9ca45183ef..13c159c0ef 100644
--- a/deps/v8/src/wasm/wasm-external-refs.cc
+++ b/deps/v8/src/wasm/wasm-external-refs.cc
@@ -247,6 +247,10 @@ int32_t int64_mod_wrapper(Address data) {
if (divisor == 0) {
return 0;
}
+ if (divisor == -1 && dividend == std::numeric_limits<int64_t>::min()) {
+ WriteUnalignedValue<int64_t>(data, 0);
+ return 1;
+ }
WriteUnalignedValue<int64_t>(data, dividend % divisor);
return 1;
}
diff --git a/deps/v8/src/wasm/wasm-feature-flags.h b/deps/v8/src/wasm/wasm-feature-flags.h
index 36f9ebd8a4..b18fa90acf 100644
--- a/deps/v8/src/wasm/wasm-feature-flags.h
+++ b/deps/v8/src/wasm/wasm-feature-flags.h
@@ -10,12 +10,12 @@
V(eh, "exception handling opcodes", false) \
V(threads, "thread opcodes", false) \
V(simd, "SIMD opcodes", false) \
- V(bigint, "JS BigInt support", false) \
V(return_call, "return call opcodes", false) \
V(compilation_hints, "compilation hints section", false)
#define FOREACH_WASM_STAGING_FEATURE_FLAG(V) \
V(anyref, "anyref opcodes", false) \
+ V(bigint, "JS BigInt support", false) \
V(type_reflection, "wasm type reflection in JS", false)
#define FOREACH_WASM_SHIPPED_FEATURE_FLAG(V) \
diff --git a/deps/v8/src/wasm/wasm-interpreter.cc b/deps/v8/src/wasm/wasm-interpreter.cc
index 299128860d..7c41f6a8e0 100644
--- a/deps/v8/src/wasm/wasm-interpreter.cc
+++ b/deps/v8/src/wasm/wasm-interpreter.cc
@@ -1128,13 +1128,41 @@ class ThreadImpl {
};
public:
+ // The {ReferenceStackScope} sets up the reference stack in the interpreter.
+ // The handle to the reference stack has to be re-initialized everytime we
+ // call into the interpreter because there is no HandleScope that could
+ // contain that handle. A global handle is not an option because it can lead
+ // to a memory leak if a reference to the {WasmInstanceObject} is put onto the
+ // reference stack and thereby transitively keeps the interpreter alive.
+ class ReferenceStackScope {
+ public:
+ explicit ReferenceStackScope(ThreadImpl* impl) : impl_(impl) {
+ // The reference stack is already initialized, we don't have to do
+ // anything.
+ if (!impl_->reference_stack_cell_.is_null()) return;
+ impl_->reference_stack_cell_ = handle(
+ impl_->instance_object_->debug_info().interpreter_reference_stack(),
+ impl_->isolate_);
+ // We initialized the reference stack, so we also have to reset it later.
+ do_reset_stack_ = true;
+ }
+
+ ~ReferenceStackScope() {
+ if (do_reset_stack_) {
+ impl_->reference_stack_cell_ = Handle<Cell>();
+ }
+ }
+
+ private:
+ ThreadImpl* impl_;
+ bool do_reset_stack_ = false;
+ };
+
ThreadImpl(Zone* zone, CodeMap* codemap,
- Handle<WasmInstanceObject> instance_object,
- Handle<Cell> reference_stack_cell)
+ Handle<WasmInstanceObject> instance_object)
: codemap_(codemap),
isolate_(instance_object->GetIsolate()),
instance_object_(instance_object),
- reference_stack_cell_(reference_stack_cell),
frames_(zone),
activations_(zone) {}
@@ -1394,6 +1422,7 @@ class ThreadImpl {
};
friend class InterpretedFrameImpl;
+ friend class ReferenceStackScope;
CodeMap* codemap_;
Isolate* isolate_;
@@ -1663,9 +1692,15 @@ class ThreadImpl {
template <typename ctype, typename mtype>
bool ExecuteLoad(Decoder* decoder, InterpreterCode* code, pc_t pc,
- int* const len, MachineRepresentation rep) {
- MemoryAccessImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc),
- sizeof(ctype));
+ int* const len, MachineRepresentation rep,
+ int prefix_len = 0) {
+ // Some opcodes have a prefix byte, and MemoryAccessImmediate assumes that
+ // the memarg is 1 byte from pc. We don't increment pc at the caller,
+ // because we want to keep pc to the start of the operation to keep trap
+ // reporting and tracing accurate, otherwise those will report at the middle
+ // of an opcode.
+ MemoryAccessImmediate<Decoder::kNoValidate> imm(
+ decoder, code->at(pc + prefix_len), sizeof(ctype));
uint32_t index = Pop().to<uint32_t>();
Address addr = BoundsCheckMem<mtype>(imm.offset, index);
if (!addr) {
@@ -1690,9 +1725,15 @@ class ThreadImpl {
template <typename ctype, typename mtype>
bool ExecuteStore(Decoder* decoder, InterpreterCode* code, pc_t pc,
- int* const len, MachineRepresentation rep) {
- MemoryAccessImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc),
- sizeof(ctype));
+ int* const len, MachineRepresentation rep,
+ int prefix_len = 0) {
+ // Some opcodes have a prefix byte, and MemoryAccessImmediate assumes that
+ // the memarg is 1 byte from pc. We don't increment pc at the caller,
+ // because we want to keep pc to the start of the operation to keep trap
+ // reporting and tracing accurate, otherwise those will report at the middle
+ // of an opcode.
+ MemoryAccessImmediate<Decoder::kNoValidate> imm(
+ decoder, code->at(pc + prefix_len), sizeof(ctype));
ctype val = Pop().to<ctype>();
uint32_t index = Pop().to<uint32_t>();
@@ -2223,9 +2264,22 @@ class ThreadImpl {
EXTRACT_LANE_CASE(F32x4, f32x4)
EXTRACT_LANE_CASE(I64x2, i64x2)
EXTRACT_LANE_CASE(I32x4, i32x4)
- EXTRACT_LANE_CASE(I16x8, i16x8)
- EXTRACT_LANE_CASE(I8x16, i8x16)
#undef EXTRACT_LANE_CASE
+#define EXTRACT_LANE_EXTEND_CASE(format, name, sign, type) \
+ case kExpr##format##ExtractLane##sign: { \
+ SimdLaneImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc)); \
+ *len += 1; \
+ WasmValue val = Pop(); \
+ Simd128 s = val.to_s128(); \
+ auto ss = s.to_##name(); \
+ Push(WasmValue(static_cast<type>(ss.val[LANE(imm.lane, ss)]))); \
+ return true; \
+ }
+ EXTRACT_LANE_EXTEND_CASE(I16x8, i16x8, S, int32_t)
+ EXTRACT_LANE_EXTEND_CASE(I16x8, i16x8, U, uint32_t)
+ EXTRACT_LANE_EXTEND_CASE(I8x16, i8x16, S, int32_t)
+ EXTRACT_LANE_EXTEND_CASE(I8x16, i8x16, U, uint32_t)
+#undef EXTRACT_LANE_EXTEND_CASE
#define BINOP_CASE(op, name, stype, count, expr) \
case kExpr##op: { \
WasmValue v2 = Pop(); \
@@ -2317,8 +2371,10 @@ class ThreadImpl {
}
UNOP_CASE(F64x2Abs, f64x2, float2, 2, std::abs(a))
UNOP_CASE(F64x2Neg, f64x2, float2, 2, -a)
+ UNOP_CASE(F64x2Sqrt, f64x2, float2, 2, std::sqrt(a))
UNOP_CASE(F32x4Abs, f32x4, float4, 4, std::abs(a))
UNOP_CASE(F32x4Neg, f32x4, float4, 4, -a)
+ UNOP_CASE(F32x4Sqrt, f32x4, float4, 4, std::sqrt(a))
UNOP_CASE(F32x4RecipApprox, f32x4, float4, 4, base::Recip(a))
UNOP_CASE(F32x4RecipSqrtApprox, f32x4, float4, 4, base::RecipSqrt(a))
UNOP_CASE(I64x2Neg, i64x2, int2, 2, base::NegateWithWraparound(a))
@@ -2431,10 +2487,12 @@ class ThreadImpl {
#undef REPLACE_LANE_CASE
case kExprS128LoadMem:
return ExecuteLoad<Simd128, Simd128>(decoder, code, pc, len,
- MachineRepresentation::kSimd128);
+ MachineRepresentation::kSimd128,
+ /*prefix_len=*/1);
case kExprS128StoreMem:
return ExecuteStore<Simd128, Simd128>(decoder, code, pc, len,
- MachineRepresentation::kSimd128);
+ MachineRepresentation::kSimd128,
+ /*prefix_len=*/1);
#define SHIFT_CASE(op, name, stype, count, expr) \
case kExpr##op: { \
uint32_t shift = Pop().to<uint32_t>(); \
@@ -2448,19 +2506,26 @@ class ThreadImpl {
Push(WasmValue(Simd128(res))); \
return true; \
}
- SHIFT_CASE(I64x2Shl, i64x2, int2, 2, static_cast<uint64_t>(a) << shift)
- SHIFT_CASE(I64x2ShrS, i64x2, int2, 2, a >> shift)
- SHIFT_CASE(I64x2ShrU, i64x2, int2, 2, static_cast<uint64_t>(a) >> shift)
- SHIFT_CASE(I32x4Shl, i32x4, int4, 4, static_cast<uint32_t>(a) << shift)
- SHIFT_CASE(I32x4ShrS, i32x4, int4, 4, a >> shift)
- SHIFT_CASE(I32x4ShrU, i32x4, int4, 4, static_cast<uint32_t>(a) >> shift)
- SHIFT_CASE(I16x8Shl, i16x8, int8, 8, static_cast<uint16_t>(a) << shift)
- SHIFT_CASE(I16x8ShrS, i16x8, int8, 8, a >> shift)
- SHIFT_CASE(I16x8ShrU, i16x8, int8, 8, static_cast<uint16_t>(a) >> shift)
- SHIFT_CASE(I8x16Shl, i8x16, int16, 16, static_cast<uint8_t>(a) << shift)
- SHIFT_CASE(I8x16ShrS, i8x16, int16, 16, a >> shift)
+ SHIFT_CASE(I64x2Shl, i64x2, int2, 2,
+ static_cast<uint64_t>(a) << (shift % 64))
+ SHIFT_CASE(I64x2ShrS, i64x2, int2, 2, a >> (shift % 64))
+ SHIFT_CASE(I64x2ShrU, i64x2, int2, 2,
+ static_cast<uint64_t>(a) >> (shift % 64))
+ SHIFT_CASE(I32x4Shl, i32x4, int4, 4,
+ static_cast<uint32_t>(a) << (shift % 32))
+ SHIFT_CASE(I32x4ShrS, i32x4, int4, 4, a >> (shift % 32))
+ SHIFT_CASE(I32x4ShrU, i32x4, int4, 4,
+ static_cast<uint32_t>(a) >> (shift % 32))
+ SHIFT_CASE(I16x8Shl, i16x8, int8, 8,
+ static_cast<uint16_t>(a) << (shift % 16))
+ SHIFT_CASE(I16x8ShrS, i16x8, int8, 8, a >> (shift % 16))
+ SHIFT_CASE(I16x8ShrU, i16x8, int8, 8,
+ static_cast<uint16_t>(a) >> (shift % 16))
+ SHIFT_CASE(I8x16Shl, i8x16, int16, 16,
+ static_cast<uint8_t>(a) << (shift % 8))
+ SHIFT_CASE(I8x16ShrS, i8x16, int16, 16, a >> (shift % 8))
SHIFT_CASE(I8x16ShrU, i8x16, int16, 16,
- static_cast<uint8_t>(a) >> shift)
+ static_cast<uint8_t>(a) >> (shift % 8))
#undef SHIFT_CASE
#define CONVERT_CASE(op, src_type, name, dst_type, count, start_index, ctype, \
expr) \
@@ -2564,6 +2629,18 @@ class ThreadImpl {
ADD_HORIZ_CASE(F32x4AddHoriz, f32x4, float4, 4)
ADD_HORIZ_CASE(I16x8AddHoriz, i16x8, int8, 8)
#undef ADD_HORIZ_CASE
+ case kExprS8x16Swizzle: {
+ int16 v2 = Pop().to_s128().to_i8x16();
+ int16 v1 = Pop().to_s128().to_i8x16();
+ int16 res;
+ for (size_t i = 0; i < kSimd128Size; ++i) {
+ int lane = v2.val[LANE(i, v1)];
+ res.val[LANE(i, v1)] =
+ lane < kSimd128Size && lane >= 0 ? v1.val[LANE(lane, v1)] : 0;
+ }
+ Push(WasmValue(Simd128(res)));
+ return true;
+ }
case kExprS8x16Shuffle: {
Simd8x16ShuffleImmediate<Decoder::kNoValidate> imm(decoder,
code->at(pc));
@@ -2604,6 +2681,23 @@ class ThreadImpl {
REDUCTION_CASE(S1x8AllTrue, i16x8, int8, 8, &)
REDUCTION_CASE(S1x16AllTrue, i8x16, int16, 16, &)
#undef REDUCTION_CASE
+#define QFM_CASE(op, name, stype, count, operation) \
+ case kExpr##op: { \
+ stype c = Pop().to_s128().to_##name(); \
+ stype b = Pop().to_s128().to_##name(); \
+ stype a = Pop().to_s128().to_##name(); \
+ stype res; \
+ for (size_t i = 0; i < count; i++) { \
+ res.val[i] = a.val[i] operation(b.val[i] * c.val[i]); \
+ } \
+ Push(WasmValue(Simd128(res))); \
+ return true; \
+ }
+ QFM_CASE(F32x4Qfma, f32x4, float4, 4, +)
+ QFM_CASE(F32x4Qfms, f32x4, float4, 4, -)
+ QFM_CASE(F64x2Qfma, f64x2, float2, 2, +)
+ QFM_CASE(F64x2Qfms, f64x2, float2, 2, -)
+#undef QFM_CASE
default:
return false;
}
@@ -2658,7 +2752,7 @@ class ThreadImpl {
WasmExceptionTag::cast(instance_object_->exceptions_table().get(index)),
isolate_);
uint32_t encoded_size = WasmExceptionPackage::GetEncodedSize(exception);
- Handle<Object> exception_object =
+ Handle<WasmExceptionPackage> exception_object =
WasmExceptionPackage::New(isolate_, exception_tag, encoded_size);
Handle<FixedArray> encoded_values = Handle<FixedArray>::cast(
WasmExceptionPackage::GetExceptionValues(isolate_, exception_object));
@@ -2727,8 +2821,9 @@ class ThreadImpl {
// Determines whether the given exception has a tag matching the expected tag
// for the given index within the exception table of the current instance.
bool MatchingExceptionTag(Handle<Object> exception_object, uint32_t index) {
- Handle<Object> caught_tag =
- WasmExceptionPackage::GetExceptionTag(isolate_, exception_object);
+ if (!exception_object->IsWasmExceptionPackage(isolate_)) return false;
+ Handle<Object> caught_tag = WasmExceptionPackage::GetExceptionTag(
+ isolate_, Handle<WasmExceptionPackage>::cast(exception_object));
Handle<Object> expected_tag =
handle(instance_object_->exceptions_table().get(index), isolate_);
DCHECK(expected_tag->IsWasmExceptionTag());
@@ -2755,8 +2850,9 @@ class ThreadImpl {
// the encoded values match the expected signature of the exception.
void DoUnpackException(const WasmException* exception,
Handle<Object> exception_object) {
- Handle<FixedArray> encoded_values = Handle<FixedArray>::cast(
- WasmExceptionPackage::GetExceptionValues(isolate_, exception_object));
+ Handle<FixedArray> encoded_values =
+ Handle<FixedArray>::cast(WasmExceptionPackage::GetExceptionValues(
+ isolate_, Handle<WasmExceptionPackage>::cast(exception_object)));
// Decode the exception values from the given exception package and push
// them onto the operand stack. This encoding has to be in sync with other
// backends so that exceptions can be passed between them.
@@ -3054,14 +3150,14 @@ class ThreadImpl {
len = 1 + imm.length;
break;
}
- case kExprGetLocal: {
+ case kExprLocalGet: {
LocalIndexImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
HandleScope handle_scope(isolate_); // Avoid leaking handles.
Push(GetStackValue(frames_.back().sp + imm.index));
len = 1 + imm.length;
break;
}
- case kExprSetLocal: {
+ case kExprLocalSet: {
LocalIndexImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
HandleScope handle_scope(isolate_); // Avoid leaking handles.
WasmValue val = Pop();
@@ -3069,7 +3165,7 @@ class ThreadImpl {
len = 1 + imm.length;
break;
}
- case kExprTeeLocal: {
+ case kExprLocalTee: {
LocalIndexImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
HandleScope handle_scope(isolate_); // Avoid leaking handles.
WasmValue val = Pop();
@@ -3231,7 +3327,7 @@ class ThreadImpl {
}
} break;
- case kExprGetGlobal: {
+ case kExprGlobalGet: {
GlobalIndexImmediate<Decoder::kNoValidate> imm(&decoder,
code->at(pc));
HandleScope handle_scope(isolate_);
@@ -3239,7 +3335,7 @@ class ThreadImpl {
len = 1 + imm.length;
break;
}
- case kExprSetGlobal: {
+ case kExprGlobalSet: {
GlobalIndexImmediate<Decoder::kNoValidate> imm(&decoder,
code->at(pc));
const WasmGlobal* global = &module()->globals[imm.index];
@@ -3770,7 +3866,8 @@ class ThreadImpl {
static WasmCode* GetTargetCode(Isolate* isolate, Address target) {
WasmCodeManager* code_manager = isolate->wasm_engine()->code_manager();
NativeModule* native_module = code_manager->LookupNativeModule(target);
- if (native_module->is_jump_table_slot(target)) {
+ WasmCode* code = native_module->Lookup(target);
+ if (code->kind() == WasmCode::kJumpTable) {
uint32_t func_index =
native_module->GetFunctionIndexFromJumpTableSlot(target);
@@ -3784,7 +3881,6 @@ class ThreadImpl {
return native_module->GetCode(func_index);
}
- WasmCode* code = native_module->Lookup(target);
DCHECK_EQ(code->instruction_start(), target);
return code;
}
@@ -3888,12 +3984,14 @@ class InterpretedFrameImpl {
}
WasmValue GetLocalValue(int index) const {
+ ThreadImpl::ReferenceStackScope stack_scope(thread_);
DCHECK_LE(0, index);
DCHECK_GT(GetLocalCount(), index);
return thread_->GetStackValue(static_cast<int>(frame()->sp) + index);
}
WasmValue GetStackValue(int index) const {
+ ThreadImpl::ReferenceStackScope stack_scope(thread_);
DCHECK_LE(0, index);
// Index must be within the number of stack values of this frame.
DCHECK_GT(GetStackHeight(), index);
@@ -3941,21 +4039,33 @@ const InterpretedFrameImpl* ToImpl(const InterpretedFrame* frame) {
// translation unit anyway.
//============================================================================
WasmInterpreter::State WasmInterpreter::Thread::state() {
- return ToImpl(this)->state();
+ ThreadImpl* impl = ToImpl(this);
+ ThreadImpl::ReferenceStackScope stack_scope(impl);
+ return impl->state();
}
void WasmInterpreter::Thread::InitFrame(const WasmFunction* function,
WasmValue* args) {
- ToImpl(this)->InitFrame(function, args);
+ ThreadImpl* impl = ToImpl(this);
+ ThreadImpl::ReferenceStackScope stack_scope(impl);
+ impl->InitFrame(function, args);
}
WasmInterpreter::State WasmInterpreter::Thread::Run(int num_steps) {
- return ToImpl(this)->Run(num_steps);
+ ThreadImpl* impl = ToImpl(this);
+ ThreadImpl::ReferenceStackScope stack_scope(impl);
+ return impl->Run(num_steps);
}
void WasmInterpreter::Thread::Pause() { return ToImpl(this)->Pause(); }
-void WasmInterpreter::Thread::Reset() { return ToImpl(this)->Reset(); }
+void WasmInterpreter::Thread::Reset() {
+ ThreadImpl* impl = ToImpl(this);
+ ThreadImpl::ReferenceStackScope stack_scope(impl);
+ return impl->Reset();
+}
WasmInterpreter::Thread::ExceptionHandlingResult
WasmInterpreter::Thread::RaiseException(Isolate* isolate,
Handle<Object> exception) {
- return ToImpl(this)->RaiseException(isolate, exception);
+ ThreadImpl* impl = ToImpl(this);
+ ThreadImpl::ReferenceStackScope stack_scope(impl);
+ return impl->RaiseException(isolate, exception);
}
pc_t WasmInterpreter::Thread::GetBreakpointPc() {
return ToImpl(this)->GetBreakpointPc();
@@ -3969,7 +4079,9 @@ WasmInterpreter::FramePtr WasmInterpreter::Thread::GetFrame(int index) {
return FramePtr(ToFrame(new InterpretedFrameImpl(ToImpl(this), index)));
}
WasmValue WasmInterpreter::Thread::GetReturnValue(int index) {
- return ToImpl(this)->GetReturnValue(index);
+ ThreadImpl* impl = ToImpl(this);
+ ThreadImpl::ReferenceStackScope stack_scope(impl);
+ return impl->GetReturnValue(index);
}
TrapReason WasmInterpreter::Thread::GetTrapReason() {
return ToImpl(this)->GetTrapReason();
@@ -3996,41 +4108,38 @@ uint32_t WasmInterpreter::Thread::NumActivations() {
return ToImpl(this)->NumActivations();
}
uint32_t WasmInterpreter::Thread::StartActivation() {
- return ToImpl(this)->StartActivation();
+ ThreadImpl* impl = ToImpl(this);
+ ThreadImpl::ReferenceStackScope stack_scope(impl);
+ return impl->StartActivation();
}
void WasmInterpreter::Thread::FinishActivation(uint32_t id) {
- ToImpl(this)->FinishActivation(id);
+ ThreadImpl* impl = ToImpl(this);
+ ThreadImpl::ReferenceStackScope stack_scope(impl);
+ impl->FinishActivation(id);
}
uint32_t WasmInterpreter::Thread::ActivationFrameBase(uint32_t id) {
- return ToImpl(this)->ActivationFrameBase(id);
+ ThreadImpl* impl = ToImpl(this);
+ ThreadImpl::ReferenceStackScope stack_scope(impl);
+ return impl->ActivationFrameBase(id);
}
//============================================================================
// The implementation details of the interpreter.
//============================================================================
-class WasmInterpreterInternals : public ZoneObject {
+class WasmInterpreterInternals {
public:
// Create a copy of the module bytes for the interpreter, since the passed
// pointer might be invalidated after constructing the interpreter.
const ZoneVector<uint8_t> module_bytes_;
CodeMap codemap_;
- ZoneVector<ThreadImpl> threads_;
+ std::vector<ThreadImpl> threads_;
WasmInterpreterInternals(Zone* zone, const WasmModule* module,
const ModuleWireBytes& wire_bytes,
Handle<WasmInstanceObject> instance_object)
: module_bytes_(wire_bytes.start(), wire_bytes.end(), zone),
- codemap_(module, module_bytes_.data(), zone),
- threads_(zone) {
- Isolate* isolate = instance_object->GetIsolate();
- Handle<Cell> reference_stack = isolate->global_handles()->Create(
- *isolate->factory()->NewCell(isolate->factory()->empty_fixed_array()));
- threads_.emplace_back(zone, &codemap_, instance_object, reference_stack);
- }
-
- ~WasmInterpreterInternals() {
- DCHECK_EQ(1, threads_.size());
- GlobalHandles::Destroy(threads_[0].reference_stack_cell().location());
+ codemap_(module, module_bytes_.data(), zone) {
+ threads_.emplace_back(zone, &codemap_, instance_object);
}
};
@@ -4059,10 +4168,12 @@ WasmInterpreter::WasmInterpreter(Isolate* isolate, const WasmModule* module,
const ModuleWireBytes& wire_bytes,
Handle<WasmInstanceObject> instance_object)
: zone_(isolate->allocator(), ZONE_NAME),
- internals_(new (&zone_) WasmInterpreterInternals(
+ internals_(new WasmInterpreterInternals(
&zone_, module, wire_bytes, MakeWeak(isolate, instance_object))) {}
-WasmInterpreter::~WasmInterpreter() { internals_->~WasmInterpreterInternals(); }
+// The destructor is here so we can forward declare {WasmInterpreterInternals}
+// used in the {unique_ptr} in the header.
+WasmInterpreter::~WasmInterpreter() {}
void WasmInterpreter::Run() { internals_->threads_[0].Run(); }
diff --git a/deps/v8/src/wasm/wasm-interpreter.h b/deps/v8/src/wasm/wasm-interpreter.h
index da0ce01835..4eb0675aba 100644
--- a/deps/v8/src/wasm/wasm-interpreter.h
+++ b/deps/v8/src/wasm/wasm-interpreter.h
@@ -5,6 +5,8 @@
#ifndef V8_WASM_WASM_INTERPRETER_H_
#define V8_WASM_WASM_INTERPRETER_H_
+#include <memory>
+
#include "src/wasm/wasm-opcodes.h"
#include "src/wasm/wasm-value.h"
#include "src/zone/zone-containers.h"
@@ -131,7 +133,7 @@ class V8_EXPORT_PRIVATE WasmInterpreter {
// Stack inspection and modification.
pc_t GetBreakpointPc();
- // TODO(clemensh): Make this uint32_t.
+ // TODO(clemensb): Make this uint32_t.
int GetFrameCount();
// The InterpretedFrame is only valid as long as the Thread is paused.
FramePtr GetFrame(int index);
@@ -170,9 +172,12 @@ class V8_EXPORT_PRIVATE WasmInterpreter {
uint32_t ActivationFrameBase(uint32_t activation_id);
};
+ MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(WasmInterpreter);
+
WasmInterpreter(Isolate* isolate, const WasmModule* module,
const ModuleWireBytes& wire_bytes,
Handle<WasmInstanceObject> instance);
+
~WasmInterpreter();
//==========================================================================
@@ -214,7 +219,7 @@ class V8_EXPORT_PRIVATE WasmInterpreter {
private:
Zone zone_;
- WasmInterpreterInternals* internals_;
+ std::unique_ptr<WasmInterpreterInternals> internals_;
};
} // namespace wasm
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index f10f5ff2bf..80d2fcb059 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -26,7 +26,6 @@
#include "src/wasm/streaming-decoder.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-limits.h"
-#include "src/wasm/wasm-memory.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-serialization.h"
@@ -207,20 +206,20 @@ i::wasm::ModuleWireBytes GetFirstArgumentAsBytes(
if (source->IsArrayBuffer()) {
// A raw array buffer was passed.
Local<ArrayBuffer> buffer = Local<ArrayBuffer>::Cast(source);
- ArrayBuffer::Contents contents = buffer->GetContents();
+ auto backing_store = buffer->GetBackingStore();
- start = reinterpret_cast<const uint8_t*>(contents.Data());
- length = contents.ByteLength();
+ start = reinterpret_cast<const uint8_t*>(backing_store->Data());
+ length = backing_store->ByteLength();
*is_shared = buffer->IsSharedArrayBuffer();
} else if (source->IsTypedArray()) {
// A TypedArray was passed.
Local<TypedArray> array = Local<TypedArray>::Cast(source);
Local<ArrayBuffer> buffer = array->Buffer();
- ArrayBuffer::Contents contents = buffer->GetContents();
+ auto backing_store = buffer->GetBackingStore();
- start =
- reinterpret_cast<const uint8_t*>(contents.Data()) + array->ByteOffset();
+ start = reinterpret_cast<const uint8_t*>(backing_store->Data()) +
+ array->ByteOffset();
length = array->ByteLength();
*is_shared = buffer->IsSharedArrayBuffer();
} else {
@@ -434,8 +433,8 @@ class AsyncInstantiateCompileResultResolver
finished_ = true;
isolate_->wasm_engine()->AsyncInstantiate(
isolate_,
- base::make_unique<InstantiateBytesResultResolver>(isolate_, promise_,
- result),
+ std::make_unique<InstantiateBytesResultResolver>(isolate_, promise_,
+ result),
result, maybe_imports_);
}
@@ -597,7 +596,7 @@ void WebAssemblyCompileStreaming(
i::Handle<i::Managed<WasmStreaming>> data =
i::Managed<WasmStreaming>::Allocate(
i_isolate, 0,
- base::make_unique<WasmStreaming::WasmStreamingImpl>(
+ std::make_unique<WasmStreaming::WasmStreamingImpl>(
isolate, kAPIMethodName, resolver));
DCHECK_NOT_NULL(i_isolate->wasm_streaming_callback());
@@ -876,7 +875,7 @@ void WebAssemblyInstantiateStreaming(
i::Handle<i::Managed<WasmStreaming>> data =
i::Managed<WasmStreaming>::Allocate(
i_isolate, 0,
- base::make_unique<WasmStreaming::WasmStreamingImpl>(
+ std::make_unique<WasmStreaming::WasmStreamingImpl>(
isolate, kAPIMethodName, compilation_resolver));
DCHECK_NOT_NULL(i_isolate->wasm_streaming_callback());
@@ -1156,7 +1155,7 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
- bool is_shared_memory = false;
+ auto shared = i::SharedFlag::kNotShared;
auto enabled_features = i::wasm::WasmFeaturesFromIsolate(i_isolate);
if (enabled_features.threads) {
// Shared property of descriptor
@@ -1165,10 +1164,11 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
descriptor->Get(context, shared_key);
v8::Local<v8::Value> value;
if (maybe_value.ToLocal(&value)) {
- is_shared_memory = value->BooleanValue(isolate);
+ shared = value->BooleanValue(isolate) ? i::SharedFlag::kShared
+ : i::SharedFlag::kNotShared;
}
// Throw TypeError if shared is true, and the descriptor has no "maximum"
- if (is_shared_memory && maximum == -1) {
+ if (shared == i::SharedFlag::kShared && maximum == -1) {
thrower.TypeError(
"If shared is true, maximum property should be defined.");
return;
@@ -1177,13 +1177,12 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
i::Handle<i::JSObject> memory_obj;
if (!i::WasmMemoryObject::New(i_isolate, static_cast<uint32_t>(initial),
- static_cast<uint32_t>(maximum),
- is_shared_memory)
+ static_cast<uint32_t>(maximum), shared)
.ToHandle(&memory_obj)) {
thrower.RangeError("could not allocate memory");
return;
}
- if (is_shared_memory) {
+ if (shared == i::SharedFlag::kShared) {
i::Handle<i::JSArrayBuffer> buffer(
i::Handle<i::WasmMemoryObject>::cast(memory_obj)->array_buffer(),
i_isolate);
@@ -2034,8 +2033,8 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
JSFunction::EnsureHasInitialMap(module_constructor);
Handle<JSObject> module_proto(
JSObject::cast(module_constructor->instance_prototype()), isolate);
- Handle<Map> module_map =
- isolate->factory()->NewMap(i::WASM_MODULE_TYPE, WasmModuleObject::kSize);
+ Handle<Map> module_map = isolate->factory()->NewMap(
+ i::WASM_MODULE_OBJECT_TYPE, WasmModuleObject::kSize);
JSFunction::SetInitialMap(module_constructor, module_map, module_proto);
InstallFunc(isolate, module_constructor, "imports", WebAssemblyModuleImports,
1);
@@ -2055,7 +2054,7 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
Handle<JSObject> instance_proto(
JSObject::cast(instance_constructor->instance_prototype()), isolate);
Handle<Map> instance_map = isolate->factory()->NewMap(
- i::WASM_INSTANCE_TYPE, WasmInstanceObject::kSize);
+ i::WASM_INSTANCE_OBJECT_TYPE, WasmInstanceObject::kSize);
JSFunction::SetInitialMap(instance_constructor, instance_map, instance_proto);
InstallGetter(isolate, instance_proto, "exports",
WebAssemblyInstanceGetExports);
@@ -2075,8 +2074,8 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
JSFunction::EnsureHasInitialMap(table_constructor);
Handle<JSObject> table_proto(
JSObject::cast(table_constructor->instance_prototype()), isolate);
- Handle<Map> table_map =
- isolate->factory()->NewMap(i::WASM_TABLE_TYPE, WasmTableObject::kSize);
+ Handle<Map> table_map = isolate->factory()->NewMap(i::WASM_TABLE_OBJECT_TYPE,
+ WasmTableObject::kSize);
JSFunction::SetInitialMap(table_constructor, table_map, table_proto);
InstallGetter(isolate, table_proto, "length", WebAssemblyTableGetLength);
InstallFunc(isolate, table_proto, "grow", WebAssemblyTableGrow, 1);
@@ -2096,8 +2095,8 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
JSFunction::EnsureHasInitialMap(memory_constructor);
Handle<JSObject> memory_proto(
JSObject::cast(memory_constructor->instance_prototype()), isolate);
- Handle<Map> memory_map =
- isolate->factory()->NewMap(i::WASM_MEMORY_TYPE, WasmMemoryObject::kSize);
+ Handle<Map> memory_map = isolate->factory()->NewMap(
+ i::WASM_MEMORY_OBJECT_TYPE, WasmMemoryObject::kSize);
JSFunction::SetInitialMap(memory_constructor, memory_map, memory_proto);
InstallFunc(isolate, memory_proto, "grow", WebAssemblyMemoryGrow, 1);
InstallGetter(isolate, memory_proto, "buffer", WebAssemblyMemoryGetBuffer);
@@ -2115,8 +2114,8 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
JSFunction::EnsureHasInitialMap(global_constructor);
Handle<JSObject> global_proto(
JSObject::cast(global_constructor->instance_prototype()), isolate);
- Handle<Map> global_map =
- isolate->factory()->NewMap(i::WASM_GLOBAL_TYPE, WasmGlobalObject::kSize);
+ Handle<Map> global_map = isolate->factory()->NewMap(
+ i::WASM_GLOBAL_OBJECT_TYPE, WasmGlobalObject::kSize);
JSFunction::SetInitialMap(global_constructor, global_map, global_proto);
InstallFunc(isolate, global_proto, "valueOf", WebAssemblyGlobalValueOf, 0);
InstallGetterSetter(isolate, global_proto, "value", WebAssemblyGlobalGetValue,
@@ -2137,7 +2136,7 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
Handle<JSObject> exception_proto(
JSObject::cast(exception_constructor->instance_prototype()), isolate);
Handle<Map> exception_map = isolate->factory()->NewMap(
- i::WASM_EXCEPTION_TYPE, WasmExceptionObject::kSize);
+ i::WASM_EXCEPTION_OBJECT_TYPE, WasmExceptionObject::kSize);
JSFunction::SetInitialMap(exception_constructor, exception_map,
exception_proto);
}
diff --git a/deps/v8/src/wasm/wasm-limits.h b/deps/v8/src/wasm/wasm-limits.h
index c7c95aca26..6dc652aba2 100644
--- a/deps/v8/src/wasm/wasm-limits.h
+++ b/deps/v8/src/wasm/wasm-limits.h
@@ -36,6 +36,7 @@ constexpr size_t kV8MaxWasmFunctionLocals = 50000;
constexpr size_t kV8MaxWasmFunctionParams = 1000;
constexpr size_t kV8MaxWasmFunctionMultiReturns = 1000;
constexpr size_t kV8MaxWasmFunctionReturns = 1;
+constexpr size_t kV8MaxWasmFunctionBrTableSize = 65520;
// Don't use this limit directly, but use the value of FLAG_wasm_max_table_size.
constexpr size_t kV8MaxWasmTableSize = 10000000;
constexpr size_t kV8MaxWasmTableInitEntries = 10000000;
diff --git a/deps/v8/src/wasm/wasm-memory.cc b/deps/v8/src/wasm/wasm-memory.cc
index f203649542..bbb0d67f9c 100644
--- a/deps/v8/src/wasm/wasm-memory.cc
+++ b/deps/v8/src/wasm/wasm-memory.cc
@@ -566,7 +566,7 @@ MaybeHandle<JSArrayBuffer> AllocateAndSetupArrayBuffer(Isolate* isolate,
WasmMemoryTracker* memory_tracker = isolate->wasm_engine()->memory_tracker();
- // Set by TryAllocateBackingStore or GetEmptyBackingStore
+ // Set by TryAllocateBackingStore.
void* allocation_base = nullptr;
size_t allocation_length = 0;
diff --git a/deps/v8/src/wasm/wasm-memory.h b/deps/v8/src/wasm/wasm-memory.h
deleted file mode 100644
index ecb6203ac5..0000000000
--- a/deps/v8/src/wasm/wasm-memory.h
+++ /dev/null
@@ -1,289 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_WASM_WASM_MEMORY_H_
-#define V8_WASM_WASM_MEMORY_H_
-
-#include <atomic>
-#include <unordered_map>
-#include <unordered_set>
-
-#include "src/base/platform/mutex.h"
-#include "src/flags/flags.h"
-#include "src/handles/handles.h"
-#include "src/objects/js-array-buffer.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-
-// The {WasmMemoryTracker} tracks reservations and allocations for wasm memory
-// and wasm code. There is an upper limit on the total reserved memory which is
-// checked by this class. Allocations are stored so we can look them up when an
-// array buffer dies and figure out the reservation and allocation bounds for
-// that buffer.
-class WasmMemoryTracker {
- public:
- WasmMemoryTracker() = default;
- V8_EXPORT_PRIVATE ~WasmMemoryTracker();
-
- // ReserveAddressSpace attempts to increase the reserved address space counter
- // by {num_bytes}. Returns true if successful (meaning it is okay to go ahead
- // and reserve {num_bytes} bytes), false otherwise.
- bool ReserveAddressSpace(size_t num_bytes);
-
- void RegisterAllocation(Isolate* isolate, void* allocation_base,
- size_t allocation_length, void* buffer_start,
- size_t buffer_length);
-
- struct SharedMemoryObjectState {
- Handle<WasmMemoryObject> memory_object;
- Isolate* isolate;
-
- SharedMemoryObjectState() = default;
- SharedMemoryObjectState(Handle<WasmMemoryObject> memory_object,
- Isolate* isolate)
- : memory_object(memory_object), isolate(isolate) {}
- };
-
- struct AllocationData {
- void* allocation_base = nullptr;
- size_t allocation_length = 0;
- void* buffer_start = nullptr;
- size_t buffer_length = 0;
- bool is_shared = false;
- // Wasm memories are growable by default, this will be false only when
- // shared with an asmjs module.
- bool is_growable = true;
-
- // Track Wasm Memory instances across isolates, this is populated on
- // PostMessage using persistent handles for memory objects.
- std::vector<WasmMemoryTracker::SharedMemoryObjectState>
- memory_object_vector;
-
- private:
- AllocationData() = default;
- AllocationData(void* allocation_base, size_t allocation_length,
- void* buffer_start, size_t buffer_length)
- : allocation_base(allocation_base),
- allocation_length(allocation_length),
- buffer_start(buffer_start),
- buffer_length(buffer_length) {
- DCHECK_LE(reinterpret_cast<uintptr_t>(allocation_base),
- reinterpret_cast<uintptr_t>(buffer_start));
- DCHECK_GE(
- reinterpret_cast<uintptr_t>(allocation_base) + allocation_length,
- reinterpret_cast<uintptr_t>(buffer_start));
- DCHECK_GE(
- reinterpret_cast<uintptr_t>(allocation_base) + allocation_length,
- reinterpret_cast<uintptr_t>(buffer_start) + buffer_length);
- }
-
- friend WasmMemoryTracker;
- };
-
- // Allow tests to allocate a backing store the same way as we do it for
- // WebAssembly memory. This is used in unit tests for trap handler to
- // generate the same signals/exceptions for invalid memory accesses as
- // we would get with WebAssembly memory.
- V8_EXPORT_PRIVATE void* TryAllocateBackingStoreForTesting(
- Heap* heap, size_t size, void** allocation_base,
- size_t* allocation_length);
-
- // Free memory allocated with TryAllocateBackingStoreForTesting.
- V8_EXPORT_PRIVATE void FreeBackingStoreForTesting(base::AddressRegion memory,
- void* buffer_start);
-
- // Decreases the amount of reserved address space.
- void ReleaseReservation(size_t num_bytes);
-
- V8_EXPORT_PRIVATE bool IsWasmMemory(const void* buffer_start);
-
- bool IsWasmSharedMemory(const void* buffer_start);
-
- // Returns a pointer to a Wasm buffer's allocation data, or nullptr if the
- // buffer is not tracked.
- V8_EXPORT_PRIVATE const AllocationData* FindAllocationData(
- const void* buffer_start);
-
- // Free Memory allocated by the Wasm memory tracker
- bool FreeWasmMemory(Isolate* isolate, const void* buffer_start);
-
- void MarkWasmMemoryNotGrowable(Handle<JSArrayBuffer> buffer);
-
- bool IsWasmMemoryGrowable(Handle<JSArrayBuffer> buffer);
-
- // When WebAssembly.Memory is transferred over PostMessage, register the
- // allocation as shared and track the memory objects that will need
- // updating if memory is resized.
- void RegisterWasmMemoryAsShared(Handle<WasmMemoryObject> object,
- Isolate* isolate);
-
- // This method is called when the underlying backing store is grown, but
- // instances that share the backing_store have not yet been updated.
- void SetPendingUpdateOnGrow(Handle<JSArrayBuffer> old_buffer,
- size_t new_size);
-
- // Interrupt handler for GROW_SHARED_MEMORY interrupt. Update memory objects
- // and instances that share the memory objects after a Grow call.
- void UpdateSharedMemoryInstances(Isolate* isolate);
-
- // Due to timing of when buffers are garbage collected, vs. when isolate
- // object handles are destroyed, it is possible to leak global handles. To
- // avoid this, cleanup any global handles on isolate destruction if any exist.
- void DeleteSharedMemoryObjectsOnIsolate(Isolate* isolate);
-
- // Allocation results are reported to UMA
- //
- // See wasm_memory_allocation_result in counters.h
- enum class AllocationStatus {
- kSuccess, // Succeeded on the first try
-
- kSuccessAfterRetry, // Succeeded after garbage collection
-
- kAddressSpaceLimitReachedFailure, // Failed because Wasm is at its address
- // space limit
-
- kOtherFailure // Failed for an unknown reason
- };
-
- private:
- // Helper methods to free memory only if not shared by other isolates, memory
- // objects.
- void FreeMemoryIfNotShared_Locked(Isolate* isolate,
- const void* backing_store);
- bool CanFreeSharedMemory_Locked(const void* backing_store);
- void RemoveSharedBufferState_Locked(Isolate* isolate,
- const void* backing_store);
-
- // Registers the allocation as shared, and tracks all the memory objects
- // associates with this allocation across isolates.
- void RegisterSharedWasmMemory_Locked(Handle<WasmMemoryObject> object,
- Isolate* isolate);
-
- // Map the new size after grow to the buffer backing store, so that instances
- // and memory objects that share the WebAssembly.Memory across isolates can
- // be updated..
- void AddBufferToGrowMap_Locked(Handle<JSArrayBuffer> old_buffer,
- size_t new_size);
-
- // Trigger a GROW_SHARED_MEMORY interrupt on all the isolates that have memory
- // objects that share this buffer.
- void TriggerSharedGrowInterruptOnAllIsolates_Locked(
- Handle<JSArrayBuffer> old_buffer);
-
- // When isolates hit a stack check, update the memory objects associated with
- // that isolate.
- void UpdateSharedMemoryStateOnInterrupt_Locked(Isolate* isolate,
- void* backing_store,
- size_t new_size);
-
- // Check if all the isolates that share a backing_store have hit a stack
- // check. If a stack check is hit, and the backing store is pending grow,
- // this isolate will have updated memory objects.
- bool AreAllIsolatesUpdated_Locked(const void* backing_store);
-
- // If a grow call is made to a buffer with a pending grow, and all the
- // isolates that share this buffer have not hit a StackCheck, clear the set of
- // already updated instances so they can be updated with the new size on the
- // most recent grow call.
- void ClearUpdatedInstancesOnPendingGrow_Locked(const void* backing_store);
-
- // Helper functions to update memory objects on grow, and maintain state for
- // which isolates hit a stack check.
- void UpdateMemoryObjectsForIsolate_Locked(Isolate* isolate,
- void* backing_store,
- size_t new_size);
- bool MemoryObjectsNeedUpdate_Locked(Isolate* isolate,
- const void* backing_store);
-
- // Destroy global handles to memory objects, and remove backing store from
- // isolates_per_buffer on Free.
- void DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(
- Isolate* isolate, const void* backing_store);
- void DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(
- const void* backing_store);
-
- void RemoveIsolateFromBackingStore_Locked(Isolate* isolate,
- const void* backing_store);
-
- // Removes an allocation from the tracker.
- AllocationData ReleaseAllocation_Locked(Isolate* isolate,
- const void* buffer_start);
-
- // Clients use a two-part process. First they "reserve" the address space,
- // which signifies an intent to actually allocate it. This determines whether
- // doing the allocation would put us over our limit. Once there is a
- // reservation, clients can do the allocation and register the result.
- //
- // We should always have:
- // allocated_address_space_ <= reserved_address_space_ <= kAddressSpaceLimit
- std::atomic<size_t> reserved_address_space_{0};
-
- // Used to protect access to the allocated address space counter and
- // allocation map. This is needed because Wasm memories can be freed on
- // another thread by the ArrayBufferTracker.
- base::Mutex mutex_;
-
- size_t allocated_address_space_ = 0;
-
- //////////////////////////////////////////////////////////////////////////////
- // Protected by {mutex_}:
-
- // Track Wasm memory allocation information. This is keyed by the start of the
- // buffer, rather than by the start of the allocation.
- std::unordered_map<const void*, AllocationData> allocations_;
-
- // Maps each buffer to the isolates that share the backing store.
- std::unordered_map<const void*, std::unordered_set<Isolate*>>
- isolates_per_buffer_;
-
- // Maps which isolates have had a grow interrupt handled on the buffer. This
- // is maintained to ensure that the instances are updated with the right size
- // on Grow.
- std::unordered_map<const void*, std::unordered_set<Isolate*>>
- isolates_updated_on_grow_;
-
- // Maps backing stores(void*) to the size of the underlying memory in
- // (size_t). An entry to this map is made on a grow call to the corresponding
- // backing store. On consecutive grow calls to the same backing store,
- // the size entry is updated. This entry is made right after the mprotect
- // call to change the protections on a backing_store, so the memory objects
- // have not been updated yet. The backing store entry in this map is erased
- // when all the memory objects, or instances that share this backing store
- // have their bounds updated.
- std::unordered_map<void*, size_t> grow_update_map_;
-
- // End of fields protected by {mutex_}.
- //////////////////////////////////////////////////////////////////////////////
-
- DISALLOW_COPY_AND_ASSIGN(WasmMemoryTracker);
-};
-
-// Attempts to allocate an array buffer with guard regions suitable for trap
-// handling. If address space is not available, it will return a buffer with
-// mini-guards that will require bounds checks.
-V8_EXPORT_PRIVATE MaybeHandle<JSArrayBuffer> NewArrayBuffer(Isolate*,
- size_t size);
-
-// Attempts to allocate a SharedArrayBuffer with guard regions suitable for
-// trap handling. If address space is not available, it will try to reserve
-// up to the maximum for that memory. If all else fails, it will return a
-// buffer with mini-guards of initial size.
-V8_EXPORT_PRIVATE MaybeHandle<JSArrayBuffer> NewSharedArrayBuffer(
- Isolate*, size_t initial_size, size_t max_size);
-
-Handle<JSArrayBuffer> SetupArrayBuffer(
- Isolate*, void* backing_store, size_t size, bool is_external,
- SharedFlag shared = SharedFlag::kNotShared);
-
-V8_EXPORT_PRIVATE void DetachMemoryBuffer(Isolate* isolate,
- Handle<JSArrayBuffer> buffer,
- bool free_memory);
-
-} // namespace wasm
-} // namespace internal
-} // namespace v8
-
-#endif // V8_WASM_WASM_MEMORY_H_
diff --git a/deps/v8/src/wasm/wasm-module-builder.cc b/deps/v8/src/wasm/wasm-module-builder.cc
index d3874e1a34..0bbc104070 100644
--- a/deps/v8/src/wasm/wasm-module-builder.cc
+++ b/deps/v8/src/wasm/wasm-module-builder.cc
@@ -71,15 +71,15 @@ uint32_t WasmFunctionBuilder::AddLocal(ValueType type) {
}
void WasmFunctionBuilder::EmitGetLocal(uint32_t local_index) {
- EmitWithU32V(kExprGetLocal, local_index);
+ EmitWithU32V(kExprLocalGet, local_index);
}
void WasmFunctionBuilder::EmitSetLocal(uint32_t local_index) {
- EmitWithU32V(kExprSetLocal, local_index);
+ EmitWithU32V(kExprLocalSet, local_index);
}
void WasmFunctionBuilder::EmitTeeLocal(uint32_t local_index) {
- EmitWithU32V(kExprTeeLocal, local_index);
+ EmitWithU32V(kExprLocalTee, local_index);
}
void WasmFunctionBuilder::EmitCode(const byte* code, uint32_t code_size) {
@@ -505,7 +505,7 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
buffer->write_f64(global.init.val.f64_const);
break;
case WasmInitExpr::kGlobalIndex:
- buffer->write_u8(kExprGetGlobal);
+ buffer->write_u8(kExprGlobalGet);
buffer->write_u32v(global.init.val.global_index);
break;
case WasmInitExpr::kRefNullConst:
diff --git a/deps/v8/src/wasm/wasm-module.cc b/deps/v8/src/wasm/wasm-module.cc
index 5a10368a8b..033f12ae24 100644
--- a/deps/v8/src/wasm/wasm-module.cc
+++ b/deps/v8/src/wasm/wasm-module.cc
@@ -22,6 +22,7 @@
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-result.h"
+#include "src/wasm/wasm-text.h"
namespace v8 {
namespace internal {
@@ -58,6 +59,57 @@ int GetExportWrapperIndex(const WasmModule* module, const FunctionSig* sig,
return result;
}
+// static
+int GetWasmFunctionOffset(const WasmModule* module, uint32_t func_index) {
+ const std::vector<WasmFunction>& functions = module->functions;
+ if (static_cast<uint32_t>(func_index) >= functions.size()) return -1;
+ DCHECK_GE(kMaxInt, functions[func_index].code.offset());
+ return static_cast<int>(functions[func_index].code.offset());
+}
+
+// static
+int GetContainingWasmFunction(const WasmModule* module, uint32_t byte_offset) {
+ const std::vector<WasmFunction>& functions = module->functions;
+
+ // Binary search for a function containing the given position.
+ int left = 0; // inclusive
+ int right = static_cast<int>(functions.size()); // exclusive
+ if (right == 0) return false;
+ while (right - left > 1) {
+ int mid = left + (right - left) / 2;
+ if (functions[mid].code.offset() <= byte_offset) {
+ left = mid;
+ } else {
+ right = mid;
+ }
+ }
+ // If the found function does not contains the given position, return -1.
+ const WasmFunction& func = functions[left];
+ if (byte_offset < func.code.offset() ||
+ byte_offset >= func.code.end_offset()) {
+ return -1;
+ }
+
+ return left;
+}
+
+// static
+v8::debug::WasmDisassembly DisassembleWasmFunction(
+ const WasmModule* module, const ModuleWireBytes& wire_bytes,
+ int func_index) {
+ if (func_index < 0 ||
+ static_cast<uint32_t>(func_index) >= module->functions.size())
+ return {};
+
+ std::ostringstream disassembly_os;
+ v8::debug::WasmDisassembly::OffsetTable offset_table;
+
+ PrintWasmText(module, wire_bytes, static_cast<uint32_t>(func_index),
+ disassembly_os, &offset_table);
+
+ return {disassembly_os.str(), std::move(offset_table)};
+}
+
void WasmModule::AddFunctionNameForTesting(int function_index,
WireBytesRef name) {
if (!function_names) {
@@ -475,21 +527,19 @@ Handle<JSArray> GetCustomSections(Isolate* isolate,
// Make a copy of the payload data in the section.
size_t size = section.payload.length();
- void* memory =
- size == 0 ? nullptr : isolate->array_buffer_allocator()->Allocate(size);
-
- if (size && !memory) {
+ MaybeHandle<JSArrayBuffer> result =
+ isolate->factory()->NewJSArrayBufferAndBackingStore(
+ size, InitializedFlag::kUninitialized);
+ Handle<JSArrayBuffer> array_buffer;
+ if (!result.ToHandle(&array_buffer)) {
thrower->RangeError("out of memory allocating custom section data");
return Handle<JSArray>();
}
- Handle<JSArrayBuffer> buffer =
- isolate->factory()->NewJSArrayBuffer(SharedFlag::kNotShared);
- constexpr bool is_external = false;
- JSArrayBuffer::Setup(buffer, isolate, is_external, memory, size);
- memcpy(memory, wire_bytes.begin() + section.payload.offset(),
+ memcpy(array_buffer->backing_store(),
+ wire_bytes.begin() + section.payload.offset(),
section.payload.length());
- matching_sections.push_back(buffer);
+ matching_sections.push_back(array_buffer);
}
int num_custom_sections = static_cast<int>(matching_sections.size());
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
index 69c57725de..79c3b23a33 100644
--- a/deps/v8/src/wasm/wasm-module.h
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -16,9 +16,13 @@
#include "src/wasm/wasm-opcodes.h"
namespace v8 {
+
+namespace debug {
+struct WasmDisassembly;
+}
+
namespace internal {
-class WasmDebugInfo;
class WasmModuleObject;
namespace wasm {
@@ -240,6 +244,25 @@ V8_EXPORT_PRIVATE int MaxNumExportWrappers(const WasmModule* module);
int GetExportWrapperIndex(const WasmModule* module, const FunctionSig* sig,
bool is_import);
+// Return the byte offset of the function identified by the given index.
+// The offset will be relative to the start of the module bytes.
+// Returns -1 if the function index is invalid.
+int GetWasmFunctionOffset(const WasmModule* module, uint32_t func_index);
+
+// Returns the function containing the given byte offset.
+// Returns -1 if the byte offset is not contained in any function of this
+// module.
+int GetContainingWasmFunction(const WasmModule* module, uint32_t byte_offset);
+
+// Compute the disassembly of a wasm function.
+// Returns the disassembly string and a list of <byte_offset, line, column>
+// entries, mapping wasm byte offsets to line and column in the disassembly.
+// The list is guaranteed to be ordered by the byte_offset.
+// Returns an empty string and empty vector if the function index is invalid.
+V8_EXPORT_PRIVATE debug::WasmDisassembly DisassembleWasmFunction(
+ const WasmModule* module, const ModuleWireBytes& wire_bytes,
+ int func_index);
+
// Interface to the storage (wire bytes) of a wasm module.
// It is illegal for anyone receiving a ModuleWireBytes to store pointers based
// on module_bytes, as this storage is only guaranteed to be alive as long as
@@ -290,15 +313,6 @@ struct WasmFunctionName {
std::ostream& operator<<(std::ostream& os, const WasmFunctionName& name);
-// Get the debug info associated with the given wasm object.
-// If no debug info exists yet, it is created automatically.
-Handle<WasmDebugInfo> GetDebugInfo(Handle<JSObject> wasm);
-
-V8_EXPORT_PRIVATE MaybeHandle<WasmModuleObject> CreateModuleObjectFromBytes(
- Isolate* isolate, const byte* start, const byte* end, ErrorThrower* thrower,
- ModuleOrigin origin, Handle<Script> asm_js_script,
- Vector<const byte> asm_offset_table);
-
V8_EXPORT_PRIVATE bool IsWasmCodegenAllowed(Isolate* isolate,
Handle<Context> context);
diff --git a/deps/v8/src/wasm/wasm-objects-inl.h b/deps/v8/src/wasm/wasm-objects-inl.h
index 66d3a2716e..a7f74381ae 100644
--- a/deps/v8/src/wasm/wasm-objects-inl.h
+++ b/deps/v8/src/wasm/wasm-objects-inl.h
@@ -88,12 +88,8 @@ ACCESSORS(WasmModuleObject, managed_native_module, Managed<wasm::NativeModule>,
kNativeModuleOffset)
ACCESSORS(WasmModuleObject, export_wrappers, FixedArray, kExportWrappersOffset)
ACCESSORS(WasmModuleObject, script, Script, kScriptOffset)
-ACCESSORS(WasmModuleObject, weak_instance_list, WeakArrayList,
- kWeakInstanceListOffset)
OPTIONAL_ACCESSORS(WasmModuleObject, asm_js_offset_table, ByteArray,
kAsmJsOffsetTableOffset)
-OPTIONAL_ACCESSORS(WasmModuleObject, breakpoint_infos, FixedArray,
- kBreakPointInfosOffset)
wasm::NativeModule* WasmModuleObject::native_module() const {
return managed_native_module().raw();
}
@@ -102,13 +98,9 @@ WasmModuleObject::shared_native_module() const {
return managed_native_module().get();
}
const wasm::WasmModule* WasmModuleObject::module() const {
- // TODO(clemensh): Remove this helper (inline in callers).
+ // TODO(clemensb): Remove this helper (inline in callers).
return native_module()->module();
}
-void WasmModuleObject::reset_breakpoint_infos() {
- WRITE_FIELD(*this, kBreakPointInfosOffset,
- GetReadOnlyRoots().undefined_value());
-}
bool WasmModuleObject::is_asm_js() {
bool asm_js = is_asmjs_module(module());
DCHECK_EQ(asm_js, script().IsUserJavaScript());
@@ -309,6 +301,10 @@ ACCESSORS(WasmExceptionObject, serialized_signature, PodArray<wasm::ValueType>,
kSerializedSignatureOffset)
ACCESSORS(WasmExceptionObject, exception_tag, HeapObject, kExceptionTagOffset)
+// WasmExceptionPackage
+OBJECT_CONSTRUCTORS_IMPL(WasmExceptionPackage, JSReceiver)
+CAST_ACCESSOR(WasmExceptionPackage)
+
// WasmExportedFunction
WasmExportedFunction::WasmExportedFunction(Address ptr) : JSFunction(ptr) {
SLOW_DCHECK(IsWasmExportedFunction(*this));
@@ -382,6 +378,8 @@ ACCESSORS(WasmIndirectFunctionTable, refs, FixedArray, kRefsOffset)
// WasmDebugInfo
ACCESSORS(WasmDebugInfo, wasm_instance, WasmInstanceObject, kInstanceOffset)
ACCESSORS(WasmDebugInfo, interpreter_handle, Object, kInterpreterHandleOffset)
+ACCESSORS(WasmDebugInfo, interpreter_reference_stack, Cell,
+ kInterpreterReferenceStackOffset)
OPTIONAL_ACCESSORS(WasmDebugInfo, locals_names, FixedArray, kLocalsNamesOffset)
OPTIONAL_ACCESSORS(WasmDebugInfo, c_wasm_entries, FixedArray,
kCWasmEntriesOffset)
diff --git a/deps/v8/src/wasm/wasm-objects.cc b/deps/v8/src/wasm/wasm-objects.cc
index d9417943a8..14e682ce23 100644
--- a/deps/v8/src/wasm/wasm-objects.cc
+++ b/deps/v8/src/wasm/wasm-objects.cc
@@ -25,10 +25,8 @@
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-limits.h"
-#include "src/wasm/wasm-memory.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
-#include "src/wasm/wasm-text.h"
#define TRACE(...) \
do { \
@@ -244,37 +242,40 @@ Handle<WasmModuleObject> WasmModuleObject::New(
isolate->factory()->NewJSObject(isolate->wasm_module_constructor()));
module_object->set_export_wrappers(*export_wrappers);
if (script->type() == Script::TYPE_WASM) {
- script->set_wasm_module_object(*module_object);
+ script->set_wasm_breakpoint_infos(
+ ReadOnlyRoots(isolate).empty_fixed_array());
+ script->set_wasm_managed_native_module(*managed_native_module);
+ script->set_wasm_weak_instance_list(
+ ReadOnlyRoots(isolate).empty_weak_array_list());
}
module_object->set_script(*script);
- module_object->set_weak_instance_list(
- ReadOnlyRoots(isolate).empty_weak_array_list());
module_object->set_managed_native_module(*managed_native_module);
return module_object;
}
-bool WasmModuleObject::SetBreakPoint(Handle<WasmModuleObject> module_object,
- int* position,
+// static
+bool WasmModuleObject::SetBreakPoint(Handle<Script> script, int* position,
Handle<BreakPoint> break_point) {
- Isolate* isolate = module_object->GetIsolate();
+ Isolate* isolate = script->GetIsolate();
// Find the function for this breakpoint.
- int func_index = module_object->GetContainingFunction(*position);
+ const WasmModule* module = script->wasm_native_module()->module();
+ int func_index = GetContainingWasmFunction(module, *position);
if (func_index < 0) return false;
- const WasmFunction& func = module_object->module()->functions[func_index];
+ const WasmFunction& func = module->functions[func_index];
int offset_in_func = *position - func.code.offset();
// According to the current design, we should only be called with valid
// breakable positions.
- DCHECK(IsBreakablePosition(module_object->native_module(), func_index,
+ DCHECK(IsBreakablePosition(script->wasm_native_module(), func_index,
offset_in_func));
// Insert new break point into break_positions of module object.
- WasmModuleObject::AddBreakpoint(module_object, *position, break_point);
+ WasmModuleObject::AddBreakpointToInfo(script, *position, break_point);
- // Iterate over all instances of this module and tell them to set this new
- // breakpoint. We do this using the weak list of all instances.
- Handle<WeakArrayList> weak_instance_list(module_object->weak_instance_list(),
+ // Iterate over all instances and tell them to set this new breakpoint.
+ // We do this using the weak list of all instances from the script.
+ Handle<WeakArrayList> weak_instance_list(script->wasm_weak_instance_list(),
isolate);
for (int i = 0; i < weak_instance_list->length(); ++i) {
MaybeObject maybe_instance = weak_instance_list->Get(i);
@@ -291,6 +292,42 @@ bool WasmModuleObject::SetBreakPoint(Handle<WasmModuleObject> module_object,
return true;
}
+// static
+bool WasmModuleObject::ClearBreakPoint(Handle<Script> script, int position,
+ Handle<BreakPoint> break_point) {
+ Isolate* isolate = script->GetIsolate();
+
+ // Find the function for this breakpoint.
+ const WasmModule* module = script->wasm_native_module()->module();
+ int func_index = GetContainingWasmFunction(module, position);
+ if (func_index < 0) return false;
+ const WasmFunction& func = module->functions[func_index];
+ int offset_in_func = position - func.code.offset();
+
+ if (!WasmModuleObject::RemoveBreakpointFromInfo(script, position,
+ break_point)) {
+ return false;
+ }
+
+ // Iterate over all instances and tell them to remove this breakpoint.
+ // We do this using the weak list of all instances from the script.
+ Handle<WeakArrayList> weak_instance_list(script->wasm_weak_instance_list(),
+ isolate);
+ for (int i = 0; i < weak_instance_list->length(); ++i) {
+ MaybeObject maybe_instance = weak_instance_list->Get(i);
+ if (maybe_instance->IsWeak()) {
+ Handle<WasmInstanceObject> instance(
+ WasmInstanceObject::cast(maybe_instance->GetHeapObjectAssumeWeak()),
+ isolate);
+ Handle<WasmDebugInfo> debug_info =
+ WasmInstanceObject::GetOrCreateDebugInfo(instance);
+ WasmDebugInfo::ClearBreakpoint(debug_info, func_index, offset_in_func);
+ }
+ }
+
+ return true;
+}
+
namespace {
int GetBreakpointPos(Isolate* isolate, Object break_point_info_or_undef) {
@@ -323,17 +360,17 @@ int FindBreakpointInfoInsertPos(Isolate* isolate,
} // namespace
-void WasmModuleObject::AddBreakpoint(Handle<WasmModuleObject> module_object,
- int position,
- Handle<BreakPoint> break_point) {
- Isolate* isolate = module_object->GetIsolate();
+// static
+void WasmModuleObject::AddBreakpointToInfo(Handle<Script> script, int position,
+ Handle<BreakPoint> break_point) {
+ Isolate* isolate = script->GetIsolate();
Handle<FixedArray> breakpoint_infos;
- if (module_object->has_breakpoint_infos()) {
- breakpoint_infos = handle(module_object->breakpoint_infos(), isolate);
+ if (script->has_wasm_breakpoint_infos()) {
+ breakpoint_infos = handle(script->wasm_breakpoint_infos(), isolate);
} else {
breakpoint_infos =
isolate->factory()->NewFixedArray(4, AllocationType::kOld);
- module_object->set_breakpoint_infos(*breakpoint_infos);
+ script->set_wasm_breakpoint_infos(*breakpoint_infos);
}
int insert_pos =
@@ -357,7 +394,7 @@ void WasmModuleObject::AddBreakpoint(Handle<WasmModuleObject> module_object,
if (need_realloc) {
new_breakpoint_infos = isolate->factory()->NewFixedArray(
2 * breakpoint_infos->length(), AllocationType::kOld);
- module_object->set_breakpoint_infos(*new_breakpoint_infos);
+ script->set_wasm_breakpoint_infos(*new_breakpoint_infos);
// Copy over the entries [0, insert_pos).
for (int i = 0; i < insert_pos; ++i)
new_breakpoint_infos->set(i, breakpoint_infos->get(i));
@@ -379,16 +416,45 @@ void WasmModuleObject::AddBreakpoint(Handle<WasmModuleObject> module_object,
new_breakpoint_infos->set(insert_pos, *breakpoint_info);
}
+// static
+bool WasmModuleObject::RemoveBreakpointFromInfo(
+ Handle<Script> script, int position, Handle<BreakPoint> break_point) {
+ if (!script->has_wasm_breakpoint_infos()) return false;
+
+ Isolate* isolate = script->GetIsolate();
+ Handle<FixedArray> breakpoint_infos(script->wasm_breakpoint_infos(), isolate);
+
+ int pos = FindBreakpointInfoInsertPos(isolate, breakpoint_infos, position);
+
+ // Does a BreakPointInfo object already exist for this position?
+ if (pos == breakpoint_infos->length()) return false;
+
+ Handle<BreakPointInfo> info(BreakPointInfo::cast(breakpoint_infos->get(pos)),
+ isolate);
+ BreakPointInfo::ClearBreakPoint(isolate, info, break_point);
+
+ // Check if there are no more breakpoints at this location.
+ if (info->GetBreakPointCount(isolate) == 0) {
+ // Update array by moving breakpoints up one position.
+ for (int i = pos; i < breakpoint_infos->length() - 1; i++) {
+ Object entry = breakpoint_infos->get(i + 1);
+ breakpoint_infos->set(i, entry);
+ if (entry.IsUndefined(isolate)) break;
+ }
+ // Make sure last array element is empty as a result.
+ breakpoint_infos->set_undefined(breakpoint_infos->length() - 1);
+ }
+ return true;
+}
+
void WasmModuleObject::SetBreakpointsOnNewInstance(
- Handle<WasmModuleObject> module_object,
- Handle<WasmInstanceObject> instance) {
- if (!module_object->has_breakpoint_infos()) return;
- Isolate* isolate = module_object->GetIsolate();
+ Handle<Script> script, Handle<WasmInstanceObject> instance) {
+ if (!script->has_wasm_breakpoint_infos()) return;
+ Isolate* isolate = script->GetIsolate();
Handle<WasmDebugInfo> debug_info =
WasmInstanceObject::GetOrCreateDebugInfo(instance);
- Handle<FixedArray> breakpoint_infos(module_object->breakpoint_infos(),
- isolate);
+ Handle<FixedArray> breakpoint_infos(script->wasm_breakpoint_infos(), isolate);
// If the array exists, it should not be empty.
DCHECK_LT(0, breakpoint_infos->length());
@@ -404,9 +470,10 @@ void WasmModuleObject::SetBreakpointsOnNewInstance(
int position = breakpoint_info->source_position();
// Find the function for this breakpoint, and set the breakpoint.
- int func_index = module_object->GetContainingFunction(position);
+ const WasmModule* module = script->wasm_native_module()->module();
+ int func_index = GetContainingWasmFunction(module, position);
DCHECK_LE(0, func_index);
- const WasmFunction& func = module_object->module()->functions[func_index];
+ const WasmFunction& func = module->functions[func_index];
int offset_in_func = position - func.code.offset();
WasmDebugInfo::SetBreakpoint(debug_info, func_index, offset_in_func);
}
@@ -497,7 +564,7 @@ int WasmModuleObject::GetSourcePosition(Handle<WasmModuleObject> module_object,
if (module->origin == wasm::kWasmOrigin) {
// for non-asm.js modules, we just add the function's start offset
// to make a module-relative position.
- return byte_offset + module_object->GetFunctionOffset(func_index);
+ return byte_offset + GetWasmFunctionOffset(module, func_index);
}
// asm.js modules have an additional offset table that must be searched.
@@ -529,31 +596,15 @@ int WasmModuleObject::GetSourcePosition(Handle<WasmModuleObject> module_object,
return offset_table->get_int(kOTESize * left + idx);
}
-v8::debug::WasmDisassembly WasmModuleObject::DisassembleFunction(
- int func_index) {
- DisallowHeapAllocation no_gc;
-
- if (func_index < 0 ||
- static_cast<uint32_t>(func_index) >= module()->functions.size())
- return {};
-
- wasm::ModuleWireBytes wire_bytes(native_module()->wire_bytes());
-
- std::ostringstream disassembly_os;
- v8::debug::WasmDisassembly::OffsetTable offset_table;
-
- PrintWasmText(module(), wire_bytes, static_cast<uint32_t>(func_index),
- disassembly_os, &offset_table);
-
- return {disassembly_os.str(), std::move(offset_table)};
-}
-
+// static
bool WasmModuleObject::GetPossibleBreakpoints(
- const v8::debug::Location& start, const v8::debug::Location& end,
+ wasm::NativeModule* native_module, const v8::debug::Location& start,
+ const v8::debug::Location& end,
std::vector<v8::debug::BreakLocation>* locations) {
DisallowHeapAllocation no_gc;
- const std::vector<WasmFunction>& functions = module()->functions;
+ const std::vector<WasmFunction>& functions =
+ native_module->module()->functions;
if (start.GetLineNumber() < 0 || start.GetColumnNumber() < 0 ||
(!end.IsEmpty() &&
(end.GetLineNumber() < 0 || end.GetColumnNumber() < 0)))
@@ -595,7 +646,7 @@ bool WasmModuleObject::GetPossibleBreakpoints(
AccountingAllocator alloc;
Zone tmp(&alloc, ZONE_NAME);
- const byte* module_start = native_module()->wire_bytes().begin();
+ const byte* module_start = native_module->wire_bytes().begin();
for (uint32_t func_idx = start_func_index; func_idx <= end_func_index;
++func_idx) {
@@ -620,12 +671,12 @@ bool WasmModuleObject::GetPossibleBreakpoints(
return true;
}
+// static
MaybeHandle<FixedArray> WasmModuleObject::CheckBreakPoints(
- Isolate* isolate, Handle<WasmModuleObject> module_object, int position) {
- if (!module_object->has_breakpoint_infos()) return {};
+ Isolate* isolate, Handle<Script> script, int position) {
+ if (!script->has_wasm_breakpoint_infos()) return {};
- Handle<FixedArray> breakpoint_infos(module_object->breakpoint_infos(),
- isolate);
+ Handle<FixedArray> breakpoint_infos(script->wasm_breakpoint_infos(), isolate);
int insert_pos =
FindBreakpointInfoInsertPos(isolate, breakpoint_infos, position);
if (insert_pos >= breakpoint_infos->length()) return {};
@@ -709,60 +760,6 @@ Vector<const uint8_t> WasmModuleObject::GetRawFunctionName(
return Vector<const uint8_t>::cast(name);
}
-int WasmModuleObject::GetFunctionOffset(uint32_t func_index) {
- const std::vector<WasmFunction>& functions = module()->functions;
- if (static_cast<uint32_t>(func_index) >= functions.size()) return -1;
- DCHECK_GE(kMaxInt, functions[func_index].code.offset());
- return static_cast<int>(functions[func_index].code.offset());
-}
-
-int WasmModuleObject::GetContainingFunction(uint32_t byte_offset) {
- const std::vector<WasmFunction>& functions = module()->functions;
-
- // Binary search for a function containing the given position.
- int left = 0; // inclusive
- int right = static_cast<int>(functions.size()); // exclusive
- if (right == 0) return false;
- while (right - left > 1) {
- int mid = left + (right - left) / 2;
- if (functions[mid].code.offset() <= byte_offset) {
- left = mid;
- } else {
- right = mid;
- }
- }
- // If the found function does not contains the given position, return -1.
- const WasmFunction& func = functions[left];
- if (byte_offset < func.code.offset() ||
- byte_offset >= func.code.end_offset()) {
- return -1;
- }
-
- return left;
-}
-
-bool WasmModuleObject::GetPositionInfo(uint32_t position,
- Script::PositionInfo* info) {
- if (script().source_mapping_url().IsString()) {
- if (module()->functions.size() == 0) return false;
- info->line = 0;
- info->column = position;
- info->line_start = module()->functions[0].code.offset();
- info->line_end = module()->functions.back().code.end_offset();
- return true;
- }
- int func_index = GetContainingFunction(position);
- if (func_index < 0) return false;
-
- const WasmFunction& function = module()->functions[func_index];
-
- info->line = func_index;
- info->column = position - function.code.offset();
- info->line_start = function.code.offset();
- info->line_end = function.code.end_offset();
- return true;
-}
-
Handle<WasmTableObject> WasmTableObject::New(Isolate* isolate,
wasm::ValueType type,
uint32_t initial, bool has_maximum,
@@ -1217,66 +1214,17 @@ void WasmIndirectFunctionTable::Resize(Isolate* isolate,
}
namespace {
-bool AdjustBufferPermissions(Isolate* isolate, Handle<JSArrayBuffer> old_buffer,
- size_t new_size) {
- if (new_size > old_buffer->allocation_length()) return false;
- void* old_mem_start = old_buffer->backing_store();
- size_t old_size = old_buffer->byte_length();
- if (old_size != new_size) {
- DCHECK_NOT_NULL(old_mem_start);
- DCHECK_GE(new_size, old_size);
- // If adjusting permissions fails, propagate error back to return
- // failure to grow.
- if (!i::SetPermissions(GetPlatformPageAllocator(), old_mem_start, new_size,
- PageAllocator::kReadWrite)) {
- return false;
- }
- reinterpret_cast<v8::Isolate*>(isolate)
- ->AdjustAmountOfExternalAllocatedMemory(new_size - old_size);
- }
- return true;
-}
-MaybeHandle<JSArrayBuffer> MemoryGrowBuffer(Isolate* isolate,
- Handle<JSArrayBuffer> old_buffer,
- size_t new_size) {
- CHECK_EQ(0, new_size % wasm::kWasmPageSize);
- // Reusing the backing store from externalized buffers causes problems with
- // Blink's array buffers. The connection between the two is lost, which can
- // lead to Blink not knowing about the other reference to the buffer and
- // freeing it too early.
- if (old_buffer->is_external() || new_size > old_buffer->allocation_length()) {
- // We couldn't reuse the old backing store, so create a new one and copy the
- // old contents in.
- Handle<JSArrayBuffer> new_buffer;
- if (!wasm::NewArrayBuffer(isolate, new_size).ToHandle(&new_buffer)) {
- return {};
- }
- void* old_mem_start = old_buffer->backing_store();
- size_t old_size = old_buffer->byte_length();
- if (old_size == 0) return new_buffer;
- memcpy(new_buffer->backing_store(), old_mem_start, old_size);
- DCHECK(old_buffer.is_null() || !old_buffer->is_shared());
- constexpr bool free_memory = true;
- i::wasm::DetachMemoryBuffer(isolate, old_buffer, free_memory);
- return new_buffer;
- } else {
- if (!AdjustBufferPermissions(isolate, old_buffer, new_size)) return {};
- // NOTE: We must allocate a new array buffer here because the spec
- // assumes that ArrayBuffers do not change size.
- void* backing_store = old_buffer->backing_store();
- bool is_external = old_buffer->is_external();
- // Disconnect buffer early so GC won't free it.
- i::wasm::DetachMemoryBuffer(isolate, old_buffer, false);
- Handle<JSArrayBuffer> new_buffer =
- wasm::SetupArrayBuffer(isolate, backing_store, new_size, is_external);
- return new_buffer;
- }
-}
-
-// May GC, because SetSpecializationMemInfoFrom may GC
void SetInstanceMemory(Handle<WasmInstanceObject> instance,
Handle<JSArrayBuffer> buffer) {
+ bool is_wasm_module = instance->module()->origin == wasm::kWasmOrigin;
+ bool use_trap_handler =
+ instance->module_object().native_module()->use_trap_handler();
+ // Wasm modules compiled to use the trap handler don't have bounds checks,
+ // so they must have a memory that has guard regions.
+ CHECK_IMPLIES(is_wasm_module && use_trap_handler,
+ buffer->GetBackingStore()->has_guard_regions());
+
instance->SetRawMemory(reinterpret_cast<byte*>(buffer->backing_store()),
buffer->byte_length());
#if DEBUG
@@ -1294,7 +1242,6 @@ void SetInstanceMemory(Handle<WasmInstanceObject> instance,
}
#endif
}
-
} // namespace
Handle<WasmMemoryObject> WasmMemoryObject::New(
@@ -1302,44 +1249,54 @@ Handle<WasmMemoryObject> WasmMemoryObject::New(
uint32_t maximum) {
Handle<JSArrayBuffer> buffer;
if (!maybe_buffer.ToHandle(&buffer)) {
- // If no buffer was provided, create a 0-length one.
- buffer = wasm::SetupArrayBuffer(isolate, nullptr, 0, false);
+ // If no buffer was provided, create a zero-length one.
+ auto backing_store =
+ BackingStore::AllocateWasmMemory(isolate, 0, 0, SharedFlag::kNotShared);
+ buffer = isolate->factory()->NewJSArrayBuffer(std::move(backing_store));
}
- // TODO(kschimpf): Do we need to add an argument that defines the
- // style of memory the user prefers (with/without trap handling), so
- // that the memory will match the style of the compiled wasm module.
- // See issue v8:7143
Handle<JSFunction> memory_ctor(
isolate->native_context()->wasm_memory_constructor(), isolate);
- auto memory_obj = Handle<WasmMemoryObject>::cast(
+ auto memory_object = Handle<WasmMemoryObject>::cast(
isolate->factory()->NewJSObject(memory_ctor, AllocationType::kOld));
- memory_obj->set_array_buffer(*buffer);
- memory_obj->set_maximum_pages(maximum);
+ memory_object->set_array_buffer(*buffer);
+ memory_object->set_maximum_pages(maximum);
- return memory_obj;
+ if (buffer->is_shared()) {
+ auto backing_store = buffer->GetBackingStore();
+ backing_store->AttachSharedWasmMemoryObject(isolate, memory_object);
+ }
+
+ return memory_object;
}
MaybeHandle<WasmMemoryObject> WasmMemoryObject::New(Isolate* isolate,
uint32_t initial,
uint32_t maximum,
- bool is_shared_memory) {
- Handle<JSArrayBuffer> buffer;
- size_t size = static_cast<size_t>(i::wasm::kWasmPageSize) *
- static_cast<size_t>(initial);
- if (is_shared_memory) {
- size_t max_size = static_cast<size_t>(i::wasm::kWasmPageSize) *
- static_cast<size_t>(maximum);
- if (!i::wasm::NewSharedArrayBuffer(isolate, size, max_size)
- .ToHandle(&buffer)) {
- return {};
- }
- } else {
- if (!i::wasm::NewArrayBuffer(isolate, size).ToHandle(&buffer)) {
- return {};
- }
+ SharedFlag shared) {
+ auto heuristic_maximum = maximum;
+#ifdef V8_TARGET_ARCH_32_BIT
+ // TODO(wasm): use a better heuristic for reserving more than the initial
+ // number of pages on 32-bit systems. Being too greedy in reserving capacity
+ // limits the number of memories that can be allocated, causing OOMs in many
+ // tests. For now, on 32-bit we never reserve more than initial, unless the
+ // memory is shared.
+ if (shared == SharedFlag::kNotShared || !FLAG_wasm_grow_shared_memory) {
+ heuristic_maximum = initial;
}
+#endif
+
+ auto backing_store = BackingStore::AllocateWasmMemory(
+ isolate, initial, heuristic_maximum, shared);
+
+ if (!backing_store) return {};
+
+ Handle<JSArrayBuffer> buffer =
+ (shared == SharedFlag::kShared)
+ ? isolate->factory()->NewJSSharedArrayBuffer(std::move(backing_store))
+ : isolate->factory()->NewJSArrayBuffer(std::move(backing_store));
+
return New(isolate, buffer, maximum);
}
@@ -1383,11 +1340,11 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate,
uint32_t pages) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "GrowMemory");
Handle<JSArrayBuffer> old_buffer(memory_object->array_buffer(), isolate);
- if (old_buffer->is_shared() && !FLAG_wasm_grow_shared_memory) return -1;
- auto* memory_tracker = isolate->wasm_engine()->memory_tracker();
- if (!memory_tracker->IsWasmMemoryGrowable(old_buffer)) return -1;
+ // Any buffer used as an asmjs memory cannot be detached, and
+ // therefore this memory cannot be grown.
+ if (old_buffer->is_asmjs_memory()) return -1;
- // Checks for maximum memory size, compute new size.
+ // Checks for maximum memory size.
uint32_t maximum_pages = wasm::max_mem_pages();
if (memory_object->has_maximum_pages()) {
maximum_pages = std::min(
@@ -1402,47 +1359,49 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate,
(pages > wasm::max_mem_pages() - old_pages)) { // exceeds limit
return -1;
}
- size_t new_size =
- static_cast<size_t>(old_pages + pages) * wasm::kWasmPageSize;
+ std::shared_ptr<BackingStore> backing_store = old_buffer->GetBackingStore();
+ if (!backing_store) return -1;
+
+ // Compute new size.
+ size_t new_pages = old_pages + pages;
+ size_t new_byte_length = new_pages * wasm::kWasmPageSize;
- // Memory is grown, but the memory objects and instances are not yet updated.
- // Handle this in the interrupt handler so that it's safe for all the isolates
- // that share this buffer to be updated safely.
- Handle<JSArrayBuffer> new_buffer;
+ // Try to handle shared memory first.
if (old_buffer->is_shared()) {
- // Adjust protections for the buffer.
- if (!AdjustBufferPermissions(isolate, old_buffer, new_size)) {
- return -1;
- }
- void* backing_store = old_buffer->backing_store();
- if (memory_tracker->IsWasmSharedMemory(backing_store)) {
- // This memory is shared between different isolates.
- DCHECK(old_buffer->is_shared());
- // Update pending grow state, and trigger a grow interrupt on all the
- // isolates that share this buffer.
- memory_tracker->SetPendingUpdateOnGrow(old_buffer, new_size);
- // Handle interrupts for this isolate so that the instances with this
- // isolate are updated.
- isolate->stack_guard()->HandleInterrupts();
- // Failure to allocate, or adjust pemissions already handled here, and
- // updates to instances handled in the interrupt handler safe to return.
- return static_cast<uint32_t>(old_size / wasm::kWasmPageSize);
+ if (FLAG_wasm_grow_shared_memory) {
+ // Shared memories can only be grown in place; no copying.
+ if (backing_store->GrowWasmMemoryInPlace(isolate, pages, maximum_pages)) {
+ BackingStore::BroadcastSharedWasmMemoryGrow(isolate, backing_store,
+ new_pages);
+ // Broadcasting the update should update this memory object too.
+ CHECK_NE(*old_buffer, memory_object->array_buffer());
+ CHECK_EQ(new_byte_length, memory_object->array_buffer().byte_length());
+ return static_cast<int32_t>(old_pages); // success
+ }
}
- // SharedArrayBuffer, but not shared across isolates. Setup a new buffer
- // with updated permissions and update the instances.
- new_buffer =
- wasm::SetupArrayBuffer(isolate, backing_store, new_size,
- old_buffer->is_external(), SharedFlag::kShared);
+ return -1;
+ }
+
+ // Try to grow non-shared memory in-place.
+ if (backing_store->GrowWasmMemoryInPlace(isolate, pages, maximum_pages)) {
+ // Detach old and create a new one with the grown backing store.
+ old_buffer->Detach(true);
+ Handle<JSArrayBuffer> new_buffer =
+ isolate->factory()->NewJSArrayBuffer(std::move(backing_store));
memory_object->update_instances(isolate, new_buffer);
- } else {
- if (!MemoryGrowBuffer(isolate, old_buffer, new_size)
- .ToHandle(&new_buffer)) {
- return -1;
- }
+ return static_cast<int32_t>(old_pages); // success
}
- // Update instances if any.
+ // Try allocating a new backing store and copying.
+ std::unique_ptr<BackingStore> new_backing_store =
+ backing_store->CopyWasmMemory(isolate, new_pages);
+ if (!new_backing_store) return -1;
+
+ // Detach old and create a new one with the new backing store.
+ old_buffer->Detach(true);
+ Handle<JSArrayBuffer> new_buffer =
+ isolate->factory()->NewJSArrayBuffer(std::move(new_backing_store));
memory_object->update_instances(isolate, new_buffer);
- return static_cast<uint32_t>(old_size / wasm::kWasmPageSize);
+ return static_cast<int32_t>(old_pages); // success
}
// static
@@ -1476,18 +1435,15 @@ MaybeHandle<WasmGlobalObject> WasmGlobalObject::New(
global_obj->set_tagged_buffer(*tagged_buffer);
} else {
DCHECK(maybe_tagged_buffer.is_null());
- Handle<JSArrayBuffer> untagged_buffer;
uint32_t type_size = wasm::ValueTypes::ElementSizeInBytes(type);
+
+ Handle<JSArrayBuffer> untagged_buffer;
if (!maybe_untagged_buffer.ToHandle(&untagged_buffer)) {
- // If no buffer was provided, create one long enough for the given type.
- untagged_buffer = isolate->factory()->NewJSArrayBuffer(
- SharedFlag::kNotShared, AllocationType::kOld);
-
- const bool initialize = true;
- if (!JSArrayBuffer::SetupAllocatingData(untagged_buffer, isolate,
- type_size, initialize)) {
- return {};
- }
+ MaybeHandle<JSArrayBuffer> result =
+ isolate->factory()->NewJSArrayBufferAndBackingStore(
+ offset + type_size, InitializedFlag::kZeroInitialized);
+
+ if (!result.ToHandle(&untagged_buffer)) return {};
}
// Check that the offset is in bounds.
@@ -1725,13 +1681,16 @@ Handle<WasmInstanceObject> WasmInstanceObject::New(
instance->set_jump_table_start(
module_object->native_module()->jump_table_start());
- // Insert the new instance into the modules weak list of instances.
+ // Insert the new instance into the scripts weak list of instances. This list
+ // is used for breakpoints affecting all instances belonging to the script.
// TODO(mstarzinger): Allow to reuse holes in the {WeakArrayList} below.
- Handle<WeakArrayList> weak_instance_list(module_object->weak_instance_list(),
- isolate);
- weak_instance_list = WeakArrayList::AddToEnd(
- isolate, weak_instance_list, MaybeObjectHandle::Weak(instance));
- module_object->set_weak_instance_list(*weak_instance_list);
+ if (module_object->script().type() == Script::TYPE_WASM) {
+ Handle<WeakArrayList> weak_instance_list(
+ module_object->script().wasm_weak_instance_list(), isolate);
+ weak_instance_list = WeakArrayList::AddToEnd(
+ isolate, weak_instance_list, MaybeObjectHandle::Weak(instance));
+ module_object->script().set_wasm_weak_instance_list(*weak_instance_list);
+ }
InitDataSegmentArrays(instance, module_object);
InitElemSegmentArrays(instance, module_object);
@@ -2040,7 +1999,7 @@ bool WasmCapiFunction::IsSignatureEqual(const wasm::FunctionSig* sig) const {
}
// static
-Handle<JSReceiver> WasmExceptionPackage::New(
+Handle<WasmExceptionPackage> WasmExceptionPackage::New(
Isolate* isolate, Handle<WasmExceptionTag> exception_tag, int size) {
Handle<Object> exception = isolate->factory()->NewWasmRuntimeError(
MessageTemplate::kWasmExceptionError);
@@ -2055,37 +2014,31 @@ Handle<JSReceiver> WasmExceptionPackage::New(
values, StoreOrigin::kMaybeKeyed,
Just(ShouldThrow::kThrowOnError))
.is_null());
- return Handle<JSReceiver>::cast(exception);
+ return Handle<WasmExceptionPackage>::cast(exception);
}
// static
Handle<Object> WasmExceptionPackage::GetExceptionTag(
- Isolate* isolate, Handle<Object> exception_object) {
- if (exception_object->IsJSReceiver()) {
- Handle<JSReceiver> exception = Handle<JSReceiver>::cast(exception_object);
- Handle<Object> tag;
- if (JSReceiver::GetProperty(isolate, exception,
- isolate->factory()->wasm_exception_tag_symbol())
- .ToHandle(&tag)) {
- return tag;
- }
+ Isolate* isolate, Handle<WasmExceptionPackage> exception_package) {
+ Handle<Object> tag;
+ if (JSReceiver::GetProperty(isolate, exception_package,
+ isolate->factory()->wasm_exception_tag_symbol())
+ .ToHandle(&tag)) {
+ return tag;
}
return ReadOnlyRoots(isolate).undefined_value_handle();
}
// static
Handle<Object> WasmExceptionPackage::GetExceptionValues(
- Isolate* isolate, Handle<Object> exception_object) {
- if (exception_object->IsJSReceiver()) {
- Handle<JSReceiver> exception = Handle<JSReceiver>::cast(exception_object);
- Handle<Object> values;
- if (JSReceiver::GetProperty(
- isolate, exception,
- isolate->factory()->wasm_exception_values_symbol())
- .ToHandle(&values)) {
- DCHECK(values->IsFixedArray());
- return values;
- }
+ Isolate* isolate, Handle<WasmExceptionPackage> exception_package) {
+ Handle<Object> values;
+ if (JSReceiver::GetProperty(
+ isolate, exception_package,
+ isolate->factory()->wasm_exception_values_symbol())
+ .ToHandle(&values)) {
+ DCHECK(values->IsFixedArray());
+ return values;
}
return ReadOnlyRoots(isolate).undefined_value_handle();
}
diff --git a/deps/v8/src/wasm/wasm-objects.h b/deps/v8/src/wasm/wasm-objects.h
index c198a9bc63..23c13c4329 100644
--- a/deps/v8/src/wasm/wasm-objects.h
+++ b/deps/v8/src/wasm/wasm-objects.h
@@ -5,13 +5,13 @@
#ifndef V8_WASM_WASM_OBJECTS_H_
#define V8_WASM_WASM_OBJECTS_H_
+#include <memory>
+
#include "src/base/bits.h"
#include "src/codegen/signature.h"
#include "src/debug/debug.h"
-#include "src/debug/interface-types.h"
#include "src/heap/heap.h"
#include "src/objects/objects.h"
-#include "src/objects/script.h"
#include "src/wasm/value-type.h"
// Has to be the last include (doesn't have include guards)
@@ -47,6 +47,8 @@ class WasmJSFunction;
class WasmModuleObject;
class WasmIndirectFunctionTable;
+enum class SharedFlag : uint8_t;
+
template <class CppType>
class Managed;
@@ -124,14 +126,11 @@ class WasmModuleObject : public JSObject {
DECL_ACCESSORS(managed_native_module, Managed<wasm::NativeModule>)
DECL_ACCESSORS(export_wrappers, FixedArray)
DECL_ACCESSORS(script, Script)
- DECL_ACCESSORS(weak_instance_list, WeakArrayList)
DECL_OPTIONAL_ACCESSORS(asm_js_offset_table, ByteArray)
- DECL_OPTIONAL_ACCESSORS(breakpoint_infos, FixedArray)
inline wasm::NativeModule* native_module() const;
inline const std::shared_ptr<wasm::NativeModule>& shared_native_module()
const;
inline const wasm::WasmModule* module() const;
- inline void reset_breakpoint_infos();
// Dispatched behavior.
DECL_PRINTER(WasmModuleObject)
@@ -153,23 +152,28 @@ class WasmModuleObject : public JSObject {
Handle<Script> script, Handle<FixedArray> export_wrappers,
size_t code_size_estimate);
+ // TODO(mstarzinger): The below breakpoint handling methods taking a {Script}
+ // instead of a {WasmModuleObject} as first argument should be moved onto a
+ // separate {WasmScript} class, implementation move to wasm-debug.cc then.
+
// Set a breakpoint on the given byte position inside the given module.
// This will affect all live and future instances of the module.
// The passed position might be modified to point to the next breakable
// location inside the same function.
// If it points outside a function, or behind the last breakable location,
// this function returns false and does not set any breakpoint.
- V8_EXPORT_PRIVATE static bool SetBreakPoint(Handle<WasmModuleObject>,
- int* position,
+ V8_EXPORT_PRIVATE static bool SetBreakPoint(Handle<Script>, int* position,
Handle<BreakPoint> break_point);
+ // Remove a previously set breakpoint at the given byte position inside the
+ // given module. If this breakpoint is not found this function returns false.
+ V8_EXPORT_PRIVATE static bool ClearBreakPoint(Handle<Script>, int position,
+ Handle<BreakPoint> break_point);
+
// Check whether this module was generated from asm.js source.
inline bool is_asm_js();
- static void AddBreakpoint(Handle<WasmModuleObject>, int position,
- Handle<BreakPoint> break_point);
-
- static void SetBreakpointsOnNewInstance(Handle<WasmModuleObject>,
+ static void SetBreakpointsOnNewInstance(Handle<Script>,
Handle<WasmInstanceObject>);
// Get the module name, if set. Returns an empty handle otherwise.
@@ -195,34 +199,12 @@ class WasmModuleObject : public JSObject {
// Does not allocate, hence gc-safe.
Vector<const uint8_t> GetRawFunctionName(uint32_t func_index);
- // Return the byte offset of the function identified by the given index.
- // The offset will be relative to the start of the module bytes.
- // Returns -1 if the function index is invalid.
- int GetFunctionOffset(uint32_t func_index);
-
- // Returns the function containing the given byte offset.
- // Returns -1 if the byte offset is not contained in any function of this
- // module.
- int GetContainingFunction(uint32_t byte_offset);
-
- // Translate from byte offset in the module to function number and byte offset
- // within that function, encoded as line and column in the position info.
- // Returns true if the position is valid inside this module, false otherwise.
- bool GetPositionInfo(uint32_t position, Script::PositionInfo* info);
-
// Get the source position from a given function index and byte offset,
// for either asm.js or pure Wasm modules.
static int GetSourcePosition(Handle<WasmModuleObject>, uint32_t func_index,
uint32_t byte_offset,
bool is_at_number_conversion);
- // Compute the disassembly of a wasm function.
- // Returns the disassembly string and a list of <byte_offset, line, column>
- // entries, mapping wasm byte offsets to line and column in the disassembly.
- // The list is guaranteed to be ordered by the byte_offset.
- // Returns an empty string and empty vector if the function index is invalid.
- V8_EXPORT_PRIVATE debug::WasmDisassembly DisassembleFunction(int func_index);
-
// Extract a portion of the wire bytes as UTF-8 string.
// Returns a null handle if the respective bytes do not form a valid UTF-8
// string.
@@ -233,17 +215,24 @@ class WasmModuleObject : public JSObject {
wasm::WireBytesRef ref);
// Get a list of all possible breakpoints within a given range of this module.
- V8_EXPORT_PRIVATE bool GetPossibleBreakpoints(
- const debug::Location& start, const debug::Location& end,
- std::vector<debug::BreakLocation>* locations);
+ V8_EXPORT_PRIVATE static bool GetPossibleBreakpoints(
+ wasm::NativeModule* native_module, const debug::Location& start,
+ const debug::Location& end, std::vector<debug::BreakLocation>* locations);
// Return an empty handle if no breakpoint is hit at that location, or a
// FixedArray with all hit breakpoint objects.
- static MaybeHandle<FixedArray> CheckBreakPoints(Isolate*,
- Handle<WasmModuleObject>,
+ static MaybeHandle<FixedArray> CheckBreakPoints(Isolate*, Handle<Script>,
int position);
OBJECT_CONSTRUCTORS(WasmModuleObject, JSObject);
+
+ private:
+ // Helper functions that update the breakpoint info list.
+ static void AddBreakpointToInfo(Handle<Script>, int position,
+ Handle<BreakPoint> break_point);
+
+ static bool RemoveBreakpointFromInfo(Handle<Script>, int position,
+ Handle<BreakPoint> break_point);
};
// Representation of a WebAssembly.Table JavaScript-level object.
@@ -354,9 +343,10 @@ class WasmMemoryObject : public JSObject {
V8_EXPORT_PRIVATE static Handle<WasmMemoryObject> New(
Isolate* isolate, MaybeHandle<JSArrayBuffer> buffer, uint32_t maximum);
- V8_EXPORT_PRIVATE static MaybeHandle<WasmMemoryObject> New(
- Isolate* isolate, uint32_t initial, uint32_t maximum,
- bool is_shared_memory);
+ V8_EXPORT_PRIVATE static MaybeHandle<WasmMemoryObject> New(Isolate* isolate,
+ uint32_t initial,
+ uint32_t maximum,
+ SharedFlag shared);
void update_instances(Isolate* isolate, Handle<JSArrayBuffer> buffer);
@@ -645,20 +635,22 @@ class WasmExceptionObject : public JSObject {
// A Wasm exception that has been thrown out of Wasm code.
class WasmExceptionPackage : public JSReceiver {
public:
- // TODO(mstarzinger): Ideally this interface would use {WasmExceptionPackage}
- // instead of {JSReceiver} throughout. For now a type-check implies doing a
- // property lookup however, which would result in casts being handlified.
- static Handle<JSReceiver> New(Isolate* isolate,
- Handle<WasmExceptionTag> exception_tag,
- int encoded_size);
+ static Handle<WasmExceptionPackage> New(
+ Isolate* isolate, Handle<WasmExceptionTag> exception_tag,
+ int encoded_size);
// The below getters return {undefined} in case the given exception package
// does not carry the requested values (i.e. is of a different type).
- static Handle<Object> GetExceptionTag(Isolate*, Handle<Object> exception);
- static Handle<Object> GetExceptionValues(Isolate*, Handle<Object> exception);
+ static Handle<Object> GetExceptionTag(
+ Isolate* isolate, Handle<WasmExceptionPackage> exception_package);
+ static Handle<Object> GetExceptionValues(
+ Isolate* isolate, Handle<WasmExceptionPackage> exception_package);
// Determines the size of the array holding all encoded exception values.
static uint32_t GetEncodedSize(const wasm::WasmException* exception);
+
+ DECL_CAST(WasmExceptionPackage)
+ OBJECT_CONSTRUCTORS(WasmExceptionPackage, JSReceiver);
};
// A Wasm function that is wrapped and exported to JavaScript.
@@ -801,7 +793,7 @@ class WasmExportedFunctionData : public Struct {
DECL_PRINTER(WasmExportedFunctionData)
DECL_VERIFIER(WasmExportedFunctionData)
-// Layout description.
+ // Layout description.
DEFINE_FIELD_OFFSET_CONSTANTS(
HeapObject::kHeaderSize,
TORQUE_GENERATED_WASM_EXPORTED_FUNCTION_DATA_FIELDS)
@@ -828,7 +820,7 @@ class WasmJSFunctionData : public Struct {
// Layout description.
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- TORQUE_GENERATED_WASM_JSFUNCTION_DATA_FIELDS)
+ TORQUE_GENERATED_WASM_JS_FUNCTION_DATA_FIELDS)
OBJECT_CONSTRUCTORS(WasmJSFunctionData, Struct);
};
@@ -838,6 +830,7 @@ class WasmDebugInfo : public Struct {
NEVER_READ_ONLY_SPACE
DECL_ACCESSORS(wasm_instance, WasmInstanceObject)
DECL_ACCESSORS(interpreter_handle, Object) // Foreign or undefined
+ DECL_ACCESSORS(interpreter_reference_stack, Cell)
DECL_OPTIONAL_ACCESSORS(locals_names, FixedArray)
DECL_OPTIONAL_ACCESSORS(c_wasm_entries, FixedArray)
DECL_OPTIONAL_ACCESSORS(c_wasm_entry_map, Managed<wasm::SignatureMap>)
@@ -848,7 +841,7 @@ class WasmDebugInfo : public Struct {
DECL_PRINTER(WasmDebugInfo)
DECL_VERIFIER(WasmDebugInfo)
-// Layout description.
+ // Layout description.
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
TORQUE_GENERATED_WASM_DEBUG_INFO_FIELDS)
@@ -867,6 +860,11 @@ class WasmDebugInfo : public Struct {
V8_EXPORT_PRIVATE static void SetBreakpoint(Handle<WasmDebugInfo>,
int func_index, int offset);
+ // Clear a previously set breakpoint in the given function at the given byte
+ // offset within that function.
+ V8_EXPORT_PRIVATE static void ClearBreakpoint(Handle<WasmDebugInfo>,
+ int func_index, int offset);
+
// Make a set of functions always execute in the interpreter without setting
// breakpoints.
V8_EXPORT_PRIVATE static void RedirectToInterpreter(Handle<WasmDebugInfo>,
diff --git a/deps/v8/src/wasm/wasm-opcodes.cc b/deps/v8/src/wasm/wasm-opcodes.cc
index 879da1445b..3bd76ae43b 100644
--- a/deps/v8/src/wasm/wasm-opcodes.cc
+++ b/deps/v8/src/wasm/wasm-opcodes.cc
@@ -147,11 +147,11 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_OP(Drop, "drop")
CASE_OP(Select, "select")
CASE_OP(SelectWithType, "select")
- CASE_OP(GetLocal, "local.get")
- CASE_OP(SetLocal, "local.set")
- CASE_OP(TeeLocal, "local.tee")
- CASE_OP(GetGlobal, "global.get")
- CASE_OP(SetGlobal, "global.set")
+ CASE_OP(LocalGet, "local.get")
+ CASE_OP(LocalSet, "local.set")
+ CASE_OP(LocalTee, "local.tee")
+ CASE_OP(GlobalGet, "global.get")
+ CASE_OP(GlobalSet, "global.set")
CASE_OP(TableGet, "table.get")
CASE_OP(TableSet, "table.set")
CASE_ALL_OP(Const, "const")
@@ -222,6 +222,8 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_SIMD_OP(Splat, "splat")
CASE_SIMD_OP(Neg, "neg")
CASE_F64x2_OP(Neg, "neg")
+ CASE_F64x2_OP(Sqrt, "sqrt")
+ CASE_F32x4_OP(Sqrt, "sqrt")
CASE_I64x2_OP(Neg, "neg")
CASE_SIMD_OP(Eq, "eq")
CASE_F64x2_OP(Eq, "eq")
@@ -272,7 +274,9 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_F32x4_OP(ReplaceLane, "replace_lane")
CASE_I64x2_OP(ExtractLane, "extract_lane")
CASE_I64x2_OP(ReplaceLane, "replace_lane")
- CASE_SIMDI_OP(ExtractLane, "extract_lane")
+ CASE_I32x4_OP(ExtractLane, "extract_lane")
+ CASE_SIGN_OP(I16x8, ExtractLane, "extract_lane")
+ CASE_SIGN_OP(I8x16, ExtractLane, "extract_lane")
CASE_SIMDI_OP(ReplaceLane, "replace_lane")
CASE_SIGN_OP(SIMDI, Min, "min")
CASE_SIGN_OP(I64x2, Min, "min")
@@ -302,6 +306,7 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_S128_OP(Xor, "xor")
CASE_S128_OP(Not, "not")
CASE_S128_OP(Select, "select")
+ CASE_S8x16_OP(Swizzle, "swizzle")
CASE_S8x16_OP(Shuffle, "shuffle")
CASE_S1x2_OP(AnyTrue, "any_true")
CASE_S1x2_OP(AllTrue, "all_true")
@@ -311,6 +316,10 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_S1x8_OP(AllTrue, "all_true")
CASE_S1x16_OP(AnyTrue, "any_true")
CASE_S1x16_OP(AllTrue, "all_true")
+ CASE_F64x2_OP(Qfma, "qfma")
+ CASE_F64x2_OP(Qfms, "qfms")
+ CASE_F32x4_OP(Qfma, "qfma")
+ CASE_F32x4_OP(Qfms, "qfms")
// Atomic operations.
CASE_OP(AtomicNotify, "atomic.notify")
@@ -489,7 +498,7 @@ constexpr const FunctionSig* kCachedSigs[] = {
// gcc 4.7 - 4.9 has a bug which causes the constexpr attribute to get lost when
// passing functions (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=52892). Hence
// encapsulate these constexpr functions in functors.
-// TODO(clemensh): Remove this once we require gcc >= 5.0.
+// TODO(clemensb): Remove this once we require gcc >= 5.0.
struct GetShortOpcodeSigIndex {
constexpr WasmOpcodeSig operator()(byte opcode) const {
diff --git a/deps/v8/src/wasm/wasm-opcodes.h b/deps/v8/src/wasm/wasm-opcodes.h
index 0b19d7452c..f37f7f0520 100644
--- a/deps/v8/src/wasm/wasm-opcodes.h
+++ b/deps/v8/src/wasm/wasm-opcodes.h
@@ -48,11 +48,11 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
V(Drop, 0x1a, _) \
V(Select, 0x1b, _) \
V(SelectWithType, 0x1c, _) \
- V(GetLocal, 0x20, _) \
- V(SetLocal, 0x21, _) \
- V(TeeLocal, 0x22, _) \
- V(GetGlobal, 0x23, _) \
- V(SetGlobal, 0x24, _) \
+ V(LocalGet, 0x20, _) \
+ V(LocalSet, 0x21, _) \
+ V(LocalTee, 0x22, _) \
+ V(GlobalGet, 0x23, _) \
+ V(GlobalSet, 0x24, _) \
V(TableGet, 0x25, _) \
V(TableSet, 0x26, _) \
V(I32Const, 0x41, _) \
@@ -396,8 +396,9 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
V(I64x2MaxU, 0xfd91, s_ss) \
V(F32x4Abs, 0xfd95, s_s) \
V(F32x4Neg, 0xfd96, s_s) \
- V(F32x4RecipApprox, 0xfd98, s_s) \
- V(F32x4RecipSqrtApprox, 0xfd99, s_s) \
+ V(F32x4Sqrt, 0xfd97, s_s) \
+ V(F32x4Qfma, 0xfd98, s_sss) \
+ V(F32x4Qfms, 0xfd99, s_sss) \
V(F32x4Add, 0xfd9a, s_ss) \
V(F32x4Sub, 0xfd9b, s_ss) \
V(F32x4Mul, 0xfd9c, s_ss) \
@@ -406,6 +407,9 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
V(F32x4Max, 0xfd9f, s_ss) \
V(F64x2Abs, 0xfda0, s_s) \
V(F64x2Neg, 0xfda1, s_s) \
+ V(F64x2Sqrt, 0xfda2, s_s) \
+ V(F64x2Qfma, 0xfda3, s_sss) \
+ V(F64x2Qfms, 0xfda4, s_sss) \
V(F64x2Add, 0xfda5, s_ss) \
V(F64x2Sub, 0xfda6, s_ss) \
V(F64x2Mul, 0xfda7, s_ss) \
@@ -416,6 +420,7 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
V(I32x4UConvertF32x4, 0xfdac, s_s) \
V(F32x4SConvertI32x4, 0xfdaf, s_s) \
V(F32x4UConvertI32x4, 0xfdb0, s_s) \
+ V(S8x16Swizzle, 0xfdc0, s_ss) \
V(I8x16SConvertI16x8, 0xfdc6, s_ss) \
V(I8x16UConvertI16x8, 0xfdc7, s_ss) \
V(I16x8SConvertI32x4, 0xfdc8, s_ss) \
@@ -430,11 +435,15 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
V(I32x4UConvertI16x8High, 0xfdd1, s_s) \
V(I16x8AddHoriz, 0xfdbd, s_ss) \
V(I32x4AddHoriz, 0xfdbe, s_ss) \
- V(F32x4AddHoriz, 0xfdbf, s_ss)
+ V(F32x4AddHoriz, 0xfdbf, s_ss) \
+ V(F32x4RecipApprox, 0xfde0, s_s) \
+ V(F32x4RecipSqrtApprox, 0xfde1, s_s)
#define FOREACH_SIMD_1_OPERAND_1_PARAM_OPCODE(V) \
- V(I8x16ExtractLane, 0xfd05, _) \
- V(I16x8ExtractLane, 0xfd09, _) \
+ V(I8x16ExtractLaneS, 0xfd05, _) \
+ V(I8x16ExtractLaneU, 0xfd06, _) \
+ V(I16x8ExtractLaneS, 0xfd09, _) \
+ V(I16x8ExtractLaneU, 0xfd0a, _) \
V(I32x4ExtractLane, 0xfd0d, _) \
V(I64x2ExtractLane, 0xfd10, _) \
V(F32x4ExtractLane, 0xfd13, _) \
diff --git a/deps/v8/src/wasm/wasm-serialization.cc b/deps/v8/src/wasm/wasm-serialization.cc
index 81460b9fe2..f1fa76b98a 100644
--- a/deps/v8/src/wasm/wasm-serialization.cc
+++ b/deps/v8/src/wasm/wasm-serialization.cc
@@ -289,9 +289,6 @@ class V8_EXPORT_PRIVATE NativeModuleSerializer {
Vector<WasmCode* const> code_table_;
bool write_called_;
- // Reverse lookup tables for embedded addresses.
- std::map<Address, uint32_t> wasm_stub_targets_lookup_;
-
DISALLOW_COPY_AND_ASSIGN(NativeModuleSerializer);
};
@@ -301,11 +298,6 @@ NativeModuleSerializer::NativeModuleSerializer(
DCHECK_NOT_NULL(native_module_);
// TODO(mtrofin): persist the export wrappers. Ideally, we'd only persist
// the unique ones, i.e. the cache.
- for (uint32_t i = 0; i < WasmCode::kRuntimeStubCount; ++i) {
- Address addr = native_module_->runtime_stub_entry(
- static_cast<WasmCode::RuntimeStubId>(i));
- wasm_stub_targets_lookup_.insert(std::make_pair(addr, i));
- }
}
size_t NativeModuleSerializer::MeasureCode(const WasmCode* code) const {
@@ -367,7 +359,7 @@ void NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) {
writer->WriteVector(code->source_positions());
writer->WriteVector(Vector<byte>::cast(code->protected_instructions()));
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_ARM || \
- V8_TARGET_ARCH_PPC
+ V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390X
// On platforms that don't support misaligned word stores, copy to an aligned
// buffer if necessary so we can relocate the serialized code.
std::unique_ptr<byte[]> aligned_buffer;
@@ -400,10 +392,9 @@ void NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) {
SetWasmCalleeTag(iter.rinfo(), tag);
} break;
case RelocInfo::WASM_STUB_CALL: {
- Address orig_target = orig_iter.rinfo()->wasm_stub_call_address();
- auto stub_iter = wasm_stub_targets_lookup_.find(orig_target);
- DCHECK(stub_iter != wasm_stub_targets_lookup_.end());
- uint32_t tag = stub_iter->second;
+ Address target = orig_iter.rinfo()->wasm_stub_call_address();
+ uint32_t tag = native_module_->GetRuntimeStubId(target);
+ DCHECK_GT(WasmCode::kRuntimeStubCount, tag);
SetWasmCalleeTag(iter.rinfo(), tag);
} break;
case RelocInfo::EXTERNAL_REFERENCE: {
@@ -550,6 +541,8 @@ bool NativeModuleDeserializer::ReadCode(uint32_t fn_index, Reader* reader) {
RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED);
+ auto jump_tables_ref =
+ native_module_->FindJumpTablesForCode(code->instruction_start());
for (RelocIterator iter(code->instructions(), code->reloc_info(),
code->constant_pool(), mask);
!iter.done(); iter.next()) {
@@ -557,15 +550,16 @@ bool NativeModuleDeserializer::ReadCode(uint32_t fn_index, Reader* reader) {
switch (mode) {
case RelocInfo::WASM_CALL: {
uint32_t tag = GetWasmCalleeTag(iter.rinfo());
- Address target = native_module_->GetCallTargetForFunction(tag);
+ Address target =
+ native_module_->GetNearCallTargetForFunction(tag, jump_tables_ref);
iter.rinfo()->set_wasm_call_address(target, SKIP_ICACHE_FLUSH);
break;
}
case RelocInfo::WASM_STUB_CALL: {
uint32_t tag = GetWasmCalleeTag(iter.rinfo());
DCHECK_LT(tag, WasmCode::kRuntimeStubCount);
- Address target = native_module_->runtime_stub_entry(
- static_cast<WasmCode::RuntimeStubId>(tag));
+ Address target = native_module_->GetNearRuntimeStubEntry(
+ static_cast<WasmCode::RuntimeStubId>(tag), jump_tables_ref);
iter.rinfo()->set_wasm_stub_call_address(target, SKIP_ICACHE_FLUSH);
break;
}
@@ -628,7 +622,6 @@ MaybeHandle<WasmModuleObject> DeserializeNativeModule(
auto shared_native_module = isolate->wasm_engine()->NewNativeModule(
isolate, enabled_features, std::move(decode_result.value()));
shared_native_module->SetWireBytes(OwnedVector<uint8_t>::Of(wire_bytes_vec));
- shared_native_module->SetRuntimeStubs(isolate);
Handle<FixedArray> export_wrappers;
CompileJsToWasmWrappers(isolate, shared_native_module->module(),
diff --git a/deps/v8/src/wasm/wasm-text.cc b/deps/v8/src/wasm/wasm-text.cc
index 44abd71445..fedd37ccd3 100644
--- a/deps/v8/src/wasm/wasm-text.cc
+++ b/deps/v8/src/wasm/wasm-text.cc
@@ -154,9 +154,9 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.index;
break;
}
- case kExprGetLocal:
- case kExprSetLocal:
- case kExprTeeLocal: {
+ case kExprLocalGet:
+ case kExprLocalSet:
+ case kExprLocalTee: {
LocalIndexImmediate<Decoder::kNoValidate> imm(&i, i.pc());
os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.index;
break;
@@ -166,8 +166,8 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.index;
break;
}
- case kExprGetGlobal:
- case kExprSetGlobal: {
+ case kExprGlobalGet:
+ case kExprGlobalSet: {
GlobalIndexImmediate<Decoder::kNoValidate> imm(&i, i.pc());
os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.index;
break;
@@ -304,8 +304,10 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
break;
}
- case kExprI8x16ExtractLane:
- case kExprI16x8ExtractLane:
+ case kExprI8x16ExtractLaneS:
+ case kExprI8x16ExtractLaneU:
+ case kExprI16x8ExtractLaneS:
+ case kExprI16x8ExtractLaneU:
case kExprI32x4ExtractLane:
case kExprI64x2ExtractLane:
case kExprF32x4ExtractLane:
diff --git a/deps/v8/src/zone/OWNERS b/deps/v8/src/zone/OWNERS
index 01c515ab90..e4e653da5b 100644
--- a/deps/v8/src/zone/OWNERS
+++ b/deps/v8/src/zone/OWNERS
@@ -1,3 +1,3 @@
-clemensh@chromium.org
+clemensb@chromium.org
sigurds@chromium.org
verwaest@chromium.org
diff --git a/deps/v8/test/benchmarks/benchmarks.status b/deps/v8/test/benchmarks/benchmarks.status
index d176e35312..4941ac2be6 100644
--- a/deps/v8/test/benchmarks/benchmarks.status
+++ b/deps/v8/test/benchmarks/benchmarks.status
@@ -35,6 +35,7 @@
# Slow tests.
'kraken/imaging-gaussian-blur': [PASS, SLOW],
'octane/typescript': [PASS, SLOW],
+ 'octane/box2d': [PASS, SLOW],
}], # ALWAYS
# Slow variants.
@@ -42,7 +43,6 @@
# Slow tests.
'kraken/ai-astar': [PASS, SLOW],
'kraken/imaging-desaturate': [PASS, SLOW],
- 'octane/box2d': [PASS, SLOW],
'octane/code-load': [PASS, SLOW],
'octane/crypto': [PASS, SLOW],
'octane/gbemu-part1': [PASS, SLOW],
@@ -64,6 +64,12 @@
'octane/zlib': [SKIP],
}],
+['variant == stress_incremental_marking', {
+ # Too slow for stress_incremental_marking.
+ 'octane/box2d': [SKIP],
+ 'octane/typescript': [SKIP],
+}],
+
['gc_fuzzer', {
# Too slow for gc fuzzing.
'octane/earley-boyer' : [PASS, SLOW, ['mode == debug', SKIP]],
diff --git a/deps/v8/test/cctest/BUILD.gn b/deps/v8/test/cctest/BUILD.gn
index d0934c9977..6d6bcdcd67 100644
--- a/deps/v8/test/cctest/BUILD.gn
+++ b/deps/v8/test/cctest/BUILD.gn
@@ -161,6 +161,7 @@ v8_source_set("cctest_sources") {
"interpreter/test-source-positions.cc",
"libplatform/test-tracing.cc",
"libsampler/test-sampler.cc",
+ "manually-externalized-buffer.h",
"parsing/test-parse-decision.cc",
"parsing/test-preparser.cc",
"parsing/test-scanner-streams.cc",
@@ -185,6 +186,7 @@ v8_source_set("cctest_sources") {
"test-api.h",
"test-array-list.cc",
"test-atomicops.cc",
+ "test-backing-store.cc",
"test-bignum-dtoa.cc",
"test-bignum.cc",
"test-bit-vector.cc",
@@ -267,6 +269,7 @@ v8_source_set("cctest_sources") {
"unicode-helpers.cc",
"unicode-helpers.h",
"wasm/test-c-wasm-entry.cc",
+ "wasm/test-grow-memory.cc",
"wasm/test-jump-table-assembler.cc",
"wasm/test-run-wasm-64.cc",
"wasm/test-run-wasm-asmjs.cc",
diff --git a/deps/v8/test/cctest/DEPS b/deps/v8/test/cctest/DEPS
index 7373012870..06ae6f87f6 100644
--- a/deps/v8/test/cctest/DEPS
+++ b/deps/v8/test/cctest/DEPS
@@ -1,6 +1,7 @@
include_rules = [
+ "+perfetto",
+ "+protos/perfetto",
"+src",
"+tools",
"+torque-generated",
- "+perfetto",
-] \ No newline at end of file
+]
diff --git a/deps/v8/test/cctest/cctest.cc b/deps/v8/test/cctest/cctest.cc
index dcfca2b2df..6adf2041cf 100644
--- a/deps/v8/test/cctest/cctest.cc
+++ b/deps/v8/test/cctest/cctest.cc
@@ -327,9 +327,9 @@ int main(int argc, char* argv[]) {
CcTest::set_array_buffer_allocator(
v8::ArrayBuffer::Allocator::NewDefaultAllocator());
- v8::RegisterExtension(v8::base::make_unique<i::PrintExtension>());
- v8::RegisterExtension(v8::base::make_unique<i::ProfilerExtension>());
- v8::RegisterExtension(v8::base::make_unique<i::TraceExtension>());
+ v8::RegisterExtension(std::make_unique<i::PrintExtension>());
+ v8::RegisterExtension(std::make_unique<i::ProfilerExtension>());
+ v8::RegisterExtension(std::make_unique<i::TraceExtension>());
int tests_run = 0;
bool print_run_count = true;
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index b1a7b5c101..430ca647b7 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -151,6 +151,8 @@
# Pass but take too long with the simulator.
'test-api/ExternalArrays': [PASS, SLOW],
'test-api/Threading*': [SKIP],
+ 'test-cpu-profiler/MultipleIsolates': [PASS, SLOW],
+ 'test-debug/DebugBreakStackTrace': [PASS, SLOW],
}], # 'arch == arm64 and simulator_run'
##############################################################################
@@ -227,6 +229,7 @@
# operations.
'test-run-wasm-atomics/*': [SKIP],
'test-run-wasm-atomics64/*': [SKIP],
+ 'test-regexp/Peephole*': [SKIP],
}], # 'byteorder == big'
##############################################################################
@@ -259,6 +262,7 @@
['arch == arm and simulator_run', {
# Pass but take too long with the simulator.
'test-api/Threading*': [SKIP],
+ 'test-cpu-profiler/MultipleIsolates': [PASS, SLOW],
}], # 'arch == arm and simulator_run'
##############################################################################
@@ -471,8 +475,10 @@
'test-api/WasmI32AtomicWaitCallback': [SKIP],
'test-api/WasmI64AtomicWaitCallback': [SKIP],
'test-api/WasmStreaming*': [SKIP],
+ 'test-backing-store/Run_WasmModule_Buffer_Externalized_Regression_UseAfterFree': [SKIP],
'test-c-wasm-entry/*': [SKIP],
'test-jump-table-assembler/*': [SKIP],
+ 'test-grow-memory/*': [SKIP],
'test-run-wasm-64/*': [SKIP],
'test-run-wasm-asmjs/*': [SKIP],
'test-run-wasm-atomics64/*': [SKIP],
@@ -610,4 +616,12 @@
'*': [SKIP],
}], # variant == jitless and not embedded_builtins
+##############################################################################
+['variant == turboprop', {
+ # Require inlining.
+ 'test-cpu-profiler/DeoptAtFirstLevelInlinedSource': [SKIP],
+ 'test-cpu-profiler/DeoptAtSecondLevelInlinedSource': [SKIP],
+ 'test-cpu-profiler/DeoptUntrackedFunction': [SKIP],
+}], # variant == turboprop
+
]
diff --git a/deps/v8/test/cctest/compiler/serializer-tester.cc b/deps/v8/test/cctest/compiler/serializer-tester.cc
index 338d1bcbfb..01979a2201 100644
--- a/deps/v8/test/cctest/compiler/serializer-tester.cc
+++ b/deps/v8/test/cctest/compiler/serializer-tester.cc
@@ -52,17 +52,19 @@ SerializerTester::SerializerTester(const char* source)
TEST(SerializeEmptyFunction) {
SerializerTester tester(
"function f() {}; %EnsureFeedbackVectorForFunction(f); return f;");
- CHECK(tester.function().IsSerializedForCompilation());
+ JSFunctionRef function = tester.function();
+ CHECK(
+ function.shared().IsSerializedForCompilation(function.feedback_vector()));
}
-// This helper function allows for testing weather an inlinee candidate
+// This helper function allows for testing whether an inlinee candidate
// was properly serialized. It expects that the top-level function (that is
// run through the SerializerTester) will return its inlinee candidate.
void CheckForSerializedInlinee(const char* source, int argc = 0,
Handle<Object> argv[] = {}) {
SerializerTester tester(source);
JSFunctionRef f = tester.function();
- CHECK(f.IsSerializedForCompilation());
+ CHECK(f.shared().IsSerializedForCompilation(f.feedback_vector()));
MaybeHandle<Object> g_obj = Execution::Call(
tester.isolate(), tester.function().object(),
diff --git a/deps/v8/test/cctest/compiler/serializer-tester.h b/deps/v8/test/cctest/compiler/serializer-tester.h
index 7c8016ef81..fe5f93895f 100644
--- a/deps/v8/test/cctest/compiler/serializer-tester.h
+++ b/deps/v8/test/cctest/compiler/serializer-tester.h
@@ -5,6 +5,8 @@
#ifndef V8_CCTEST_COMPILER_SERIALIZER_TESTER_H_
#define V8_CCTEST_COMPILER_SERIALIZER_TESTER_H_
+#include <memory>
+
#include "src/compiler/js-heap-broker.h"
#include "test/cctest/cctest.h"
diff --git a/deps/v8/test/cctest/compiler/test-code-assembler.cc b/deps/v8/test/cctest/compiler/test-code-assembler.cc
index 9e6318ee88..56628ffde4 100644
--- a/deps/v8/test/cctest/compiler/test-code-assembler.cc
+++ b/deps/v8/test/cctest/compiler/test-code-assembler.cc
@@ -18,38 +18,33 @@ namespace compiler {
namespace {
-using Label = CodeAssemblerLabel;
using Variable = CodeAssemblerVariable;
-Node* SmiTag(CodeAssembler& m, // NOLINT(runtime/references)
- Node* value) {
+Node* SmiTag(CodeAssembler* m, Node* value) {
int32_t constant_value;
- if (m.ToInt32Constant(value, &constant_value) &&
+ if (m->ToInt32Constant(value, &constant_value) &&
Smi::IsValid(constant_value)) {
- return m.SmiConstant(Smi::FromInt(constant_value));
+ return m->SmiConstant(Smi::FromInt(constant_value));
}
- return m.WordShl(value, m.IntPtrConstant(kSmiShiftSize + kSmiTagSize));
+ return m->WordShl(value, m->IntPtrConstant(kSmiShiftSize + kSmiTagSize));
}
-Node* UndefinedConstant(CodeAssembler& m) { // NOLINT(runtime/references)
- return m.LoadRoot(RootIndex::kUndefinedValue);
+Node* UndefinedConstant(CodeAssembler* m) {
+ return m->LoadRoot(RootIndex::kUndefinedValue);
}
-Node* SmiFromInt32(CodeAssembler& m, // NOLINT(runtime/references)
- Node* value) {
- value = m.ChangeInt32ToIntPtr(value);
- return m.BitcastWordToTaggedSigned(
- m.WordShl(value, kSmiShiftSize + kSmiTagSize));
+Node* SmiFromInt32(CodeAssembler* m, Node* value) {
+ value = m->ChangeInt32ToIntPtr(value);
+ return m->BitcastWordToTaggedSigned(
+ m->WordShl(value, kSmiShiftSize + kSmiTagSize));
}
-Node* LoadObjectField(CodeAssembler& m, // NOLINT(runtime/references)
- Node* object, int offset,
+Node* LoadObjectField(CodeAssembler* m, Node* object, int offset,
MachineType type = MachineType::AnyTagged()) {
- return m.Load(type, object, m.IntPtrConstant(offset - kHeapObjectTag));
+ return m->Load(type, object, m->IntPtrConstant(offset - kHeapObjectTag));
}
-Node* LoadMap(CodeAssembler& m, // NOLINT(runtime/references)
- Node* object) {
+Node* LoadMap(CodeAssembler* m, Node* object) {
return LoadObjectField(m, object, JSObject::kMapOffset);
}
@@ -59,7 +54,7 @@ TEST(SimpleSmiReturn) {
Isolate* isolate(CcTest::InitIsolateOnce());
CodeAssemblerTester asm_tester(isolate);
CodeAssembler m(asm_tester.state());
- m.Return(SmiTag(m, m.Int32Constant(37)));
+ m.Return(SmiTag(&m, m.Int32Constant(37)));
FunctionTester ft(asm_tester.GenerateCode());
CHECK_EQ(37, ft.CallChecked<Smi>()->value());
}
@@ -91,7 +86,7 @@ TEST(SimpleCallRuntime1Arg) {
CodeAssembler m(asm_tester.state());
TNode<Context> context =
m.HeapConstant(Handle<Context>(isolate->native_context()));
- Node* b = SmiTag(m, m.Int32Constant(0));
+ Node* b = SmiTag(&m, m.Int32Constant(0));
m.Return(m.CallRuntime(Runtime::kIsSmi, context, b));
FunctionTester ft(asm_tester.GenerateCode());
CHECK(ft.CallChecked<Oddball>().is_identical_to(
@@ -104,7 +99,7 @@ TEST(SimpleTailCallRuntime1Arg) {
CodeAssembler m(asm_tester.state());
TNode<Context> context =
m.HeapConstant(Handle<Context>(isolate->native_context()));
- Node* b = SmiTag(m, m.Int32Constant(0));
+ Node* b = SmiTag(&m, m.Int32Constant(0));
m.TailCallRuntime(Runtime::kIsSmi, context, b);
FunctionTester ft(asm_tester.GenerateCode());
CHECK(ft.CallChecked<Oddball>().is_identical_to(
@@ -117,8 +112,8 @@ TEST(SimpleCallRuntime2Arg) {
CodeAssembler m(asm_tester.state());
TNode<Context> context =
m.HeapConstant(Handle<Context>(isolate->native_context()));
- Node* a = SmiTag(m, m.Int32Constant(2));
- Node* b = SmiTag(m, m.Int32Constant(4));
+ Node* a = SmiTag(&m, m.Int32Constant(2));
+ Node* b = SmiTag(&m, m.Int32Constant(4));
m.Return(m.CallRuntime(Runtime::kAdd, context, a, b));
FunctionTester ft(asm_tester.GenerateCode());
CHECK_EQ(6, ft.CallChecked<Smi>()->value());
@@ -130,8 +125,8 @@ TEST(SimpleTailCallRuntime2Arg) {
CodeAssembler m(asm_tester.state());
TNode<Context> context =
m.HeapConstant(Handle<Context>(isolate->native_context()));
- Node* a = SmiTag(m, m.Int32Constant(2));
- Node* b = SmiTag(m, m.Int32Constant(4));
+ Node* a = SmiTag(&m, m.Int32Constant(2));
+ Node* b = SmiTag(&m, m.Int32Constant(4));
m.TailCallRuntime(Runtime::kAdd, context, a, b);
FunctionTester ft(asm_tester.GenerateCode());
CHECK_EQ(6, ft.CallChecked<Smi>()->value());
@@ -139,8 +134,7 @@ TEST(SimpleTailCallRuntime2Arg) {
namespace {
-Handle<JSFunction> CreateSumAllArgumentsFunction(
- FunctionTester& ft) { // NOLINT(runtime/references)
+Handle<JSFunction> CreateSumAllArgumentsFunction(FunctionTester* ft) {
const char* source =
"(function() {\n"
" var sum = 0 + this;\n"
@@ -149,7 +143,7 @@ Handle<JSFunction> CreateSumAllArgumentsFunction(
" }\n"
" return sum;\n"
"})";
- return ft.NewFunction(source);
+ return ft->NewFunction(source);
}
} // namespace
@@ -163,7 +157,7 @@ TEST(SimpleCallJSFunction0Arg) {
Node* function = m.Parameter(0);
Node* context = m.Parameter(kNumParams + 2);
- Node* receiver = SmiTag(m, m.Int32Constant(42));
+ Node* receiver = SmiTag(&m, m.Int32Constant(42));
Callable callable = CodeFactory::Call(isolate);
Node* result = m.CallJS(callable, context, function, receiver);
@@ -171,7 +165,7 @@ TEST(SimpleCallJSFunction0Arg) {
}
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
- Handle<JSFunction> sum = CreateSumAllArgumentsFunction(ft);
+ Handle<JSFunction> sum = CreateSumAllArgumentsFunction(&ft);
MaybeHandle<Object> result = ft.Call(sum);
CHECK_EQ(Smi::FromInt(42), *result.ToHandleChecked());
}
@@ -185,8 +179,8 @@ TEST(SimpleCallJSFunction1Arg) {
Node* function = m.Parameter(0);
Node* context = m.Parameter(1);
- Node* receiver = SmiTag(m, m.Int32Constant(42));
- Node* a = SmiTag(m, m.Int32Constant(13));
+ Node* receiver = SmiTag(&m, m.Int32Constant(42));
+ Node* a = SmiTag(&m, m.Int32Constant(13));
Callable callable = CodeFactory::Call(isolate);
Node* result = m.CallJS(callable, context, function, receiver, a);
@@ -194,7 +188,7 @@ TEST(SimpleCallJSFunction1Arg) {
}
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
- Handle<JSFunction> sum = CreateSumAllArgumentsFunction(ft);
+ Handle<JSFunction> sum = CreateSumAllArgumentsFunction(&ft);
MaybeHandle<Object> result = ft.Call(sum);
CHECK_EQ(Smi::FromInt(55), *result.ToHandleChecked());
}
@@ -208,9 +202,9 @@ TEST(SimpleCallJSFunction2Arg) {
Node* function = m.Parameter(0);
Node* context = m.Parameter(1);
- Node* receiver = SmiTag(m, m.Int32Constant(42));
- Node* a = SmiTag(m, m.Int32Constant(13));
- Node* b = SmiTag(m, m.Int32Constant(153));
+ Node* receiver = SmiTag(&m, m.Int32Constant(42));
+ Node* a = SmiTag(&m, m.Int32Constant(13));
+ Node* b = SmiTag(&m, m.Int32Constant(153));
Callable callable = CodeFactory::Call(isolate);
Node* result = m.CallJS(callable, context, function, receiver, a, b);
@@ -218,7 +212,7 @@ TEST(SimpleCallJSFunction2Arg) {
}
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
- Handle<JSFunction> sum = CreateSumAllArgumentsFunction(ft);
+ Handle<JSFunction> sum = CreateSumAllArgumentsFunction(&ft);
MaybeHandle<Object> result = ft.Call(sum);
CHECK_EQ(Smi::FromInt(208), *result.ToHandleChecked());
}
@@ -228,7 +222,7 @@ TEST(VariableMerge1) {
CodeAssemblerTester asm_tester(isolate);
CodeAssembler m(asm_tester.state());
Variable var1(&m, MachineRepresentation::kTagged);
- Label l1(&m), l2(&m), merge(&m);
+ CodeAssemblerLabel l1(&m), l2(&m), merge(&m);
TNode<Int32T> temp = m.Int32Constant(0);
var1.Bind(temp);
m.Branch(m.Int32Constant(1), &l1, &l2);
@@ -247,7 +241,7 @@ TEST(VariableMerge2) {
CodeAssemblerTester asm_tester(isolate);
CodeAssembler m(asm_tester.state());
Variable var1(&m, MachineRepresentation::kTagged);
- Label l1(&m), l2(&m), merge(&m);
+ CodeAssemblerLabel l1(&m), l2(&m), merge(&m);
TNode<Int32T> temp = m.Int32Constant(0);
var1.Bind(temp);
m.Branch(m.Int32Constant(1), &l1, &l2);
@@ -269,7 +263,7 @@ TEST(VariableMerge3) {
CodeAssembler m(asm_tester.state());
Variable var1(&m, MachineRepresentation::kTagged);
Variable var2(&m, MachineRepresentation::kTagged);
- Label l1(&m), l2(&m), merge(&m);
+ CodeAssemblerLabel l1(&m), l2(&m), merge(&m);
TNode<Int32T> temp = m.Int32Constant(0);
var1.Bind(temp);
var2.Bind(temp);
@@ -293,7 +287,7 @@ TEST(VariableMergeBindFirst) {
CodeAssemblerTester asm_tester(isolate);
CodeAssembler m(asm_tester.state());
Variable var1(&m, MachineRepresentation::kTagged);
- Label l1(&m), l2(&m), merge(&m, &var1), end(&m);
+ CodeAssemblerLabel l1(&m), l2(&m), merge(&m, &var1), end(&m);
TNode<Int32T> temp = m.Int32Constant(0);
var1.Bind(temp);
m.Branch(m.Int32Constant(1), &l1, &l2);
@@ -319,8 +313,8 @@ TEST(VariableMergeSwitch) {
CodeAssemblerTester asm_tester(isolate);
CodeAssembler m(asm_tester.state());
Variable var1(&m, MachineRepresentation::kTagged);
- Label l1(&m), l2(&m), default_label(&m);
- Label* labels[] = {&l1, &l2};
+ CodeAssemblerLabel l1(&m), l2(&m), default_label(&m);
+ CodeAssemblerLabel* labels[] = {&l1, &l2};
int32_t values[] = {1, 2};
TNode<Smi> temp1 = m.SmiConstant(0);
var1.Bind(temp1);
@@ -345,7 +339,7 @@ TEST(SplitEdgeBranchMerge) {
Isolate* isolate(CcTest::InitIsolateOnce());
CodeAssemblerTester asm_tester(isolate);
CodeAssembler m(asm_tester.state());
- Label l1(&m), merge(&m);
+ CodeAssemblerLabel l1(&m), merge(&m);
m.Branch(m.Int32Constant(1), &l1, &merge);
m.Bind(&l1);
m.Goto(&merge);
@@ -357,8 +351,8 @@ TEST(SplitEdgeSwitchMerge) {
Isolate* isolate(CcTest::InitIsolateOnce());
CodeAssemblerTester asm_tester(isolate);
CodeAssembler m(asm_tester.state());
- Label l1(&m), l2(&m), l3(&m), default_label(&m);
- Label* labels[] = {&l1, &l2};
+ CodeAssemblerLabel l1(&m), l2(&m), l3(&m), default_label(&m);
+ CodeAssemblerLabel* labels[] = {&l1, &l2};
int32_t values[] = {1, 2};
m.Branch(m.Int32Constant(1), &l3, &l1);
m.Bind(&l3);
@@ -389,11 +383,11 @@ TEST(TestToConstant) {
CHECK(m.ToInt32Constant(a, &value32));
CHECK(m.ToInt64Constant(a, &value64));
- a = UndefinedConstant(m);
+ a = UndefinedConstant(&m);
CHECK(!m.ToInt32Constant(a, &value32));
CHECK(!m.ToInt64Constant(a, &value64));
- a = UndefinedConstant(m);
+ a = UndefinedConstant(&m);
CHECK(!m.ToInt32Constant(a, &value32));
CHECK(!m.ToInt64Constant(a, &value64));
}
@@ -402,17 +396,17 @@ TEST(DeferredCodePhiHints) {
Isolate* isolate(CcTest::InitIsolateOnce());
CodeAssemblerTester asm_tester(isolate);
CodeAssembler m(asm_tester.state());
- Label block1(&m, Label::kDeferred);
+ CodeAssemblerLabel block1(&m, CodeAssemblerLabel::kDeferred);
m.Goto(&block1);
m.Bind(&block1);
{
Variable var_object(&m, MachineRepresentation::kTagged);
- Label loop(&m, &var_object);
+ CodeAssemblerLabel loop(&m, &var_object);
var_object.Bind(m.SmiConstant(0));
m.Goto(&loop);
m.Bind(&loop);
{
- Node* map = LoadMap(m, var_object.value());
+ Node* map = LoadMap(&m, var_object.value());
var_object.Bind(map);
m.Goto(&loop);
}
@@ -424,10 +418,10 @@ TEST(TestOutOfScopeVariable) {
Isolate* isolate(CcTest::InitIsolateOnce());
CodeAssemblerTester asm_tester(isolate);
CodeAssembler m(asm_tester.state());
- Label block1(&m);
- Label block2(&m);
- Label block3(&m);
- Label block4(&m);
+ CodeAssemblerLabel block1(&m);
+ CodeAssemblerLabel block2(&m);
+ CodeAssemblerLabel block3(&m);
+ CodeAssemblerLabel block4(&m);
m.Branch(m.WordEqual(m.UncheckedCast<IntPtrT>(m.Parameter(0)),
m.IntPtrConstant(0)),
&block1, &block4);
@@ -463,7 +457,7 @@ TEST(GotoIfException) {
m.HeapConstant(isolate->factory()->to_string_tag_symbol());
Variable exception(&m, MachineRepresentation::kTagged);
- Label exception_handler(&m);
+ CodeAssemblerLabel exception_handler(&m);
Callable to_string = Builtins::CallableFor(isolate, Builtins::kToString);
TNode<Object> string = m.CallStub(to_string, context, to_string_tag);
m.GotoIfException(string, &exception_handler, &exception);
@@ -498,9 +492,9 @@ TEST(GotoIfExceptionMultiple) {
Node* second_value = m.Parameter(1);
Node* third_value = m.Parameter(2);
- Label exception_handler1(&m);
- Label exception_handler2(&m);
- Label exception_handler3(&m);
+ CodeAssemblerLabel exception_handler1(&m);
+ CodeAssemblerLabel exception_handler2(&m);
+ CodeAssemblerLabel exception_handler3(&m);
Variable return_value(&m, MachineRepresentation::kWord32);
Variable error(&m, MachineRepresentation::kTagged);
@@ -515,20 +509,20 @@ TEST(GotoIfExceptionMultiple) {
// try { ToString(param2); return 7 } catch (e) { ... }
m.Bind(&exception_handler1);
return_value.Bind(m.Int32Constant(7));
- error.Bind(UndefinedConstant(m));
+ error.Bind(UndefinedConstant(&m));
string = m.CallStub(to_string, context, second_value);
m.GotoIfException(string, &exception_handler2, &error);
- m.Return(SmiFromInt32(m, return_value.value()));
+ m.Return(SmiFromInt32(&m, return_value.value()));
// try { ToString(param3); return 7 & ~2; } catch (e) { return e; }
m.Bind(&exception_handler2);
// Return returnValue & ~2
- error.Bind(UndefinedConstant(m));
+ error.Bind(UndefinedConstant(&m));
string = m.CallStub(to_string, context, third_value);
m.GotoIfException(string, &exception_handler3, &error);
m.Return(SmiFromInt32(
- m, m.Word32And(return_value.value(),
- m.Word32Xor(m.Int32Constant(2), m.Int32Constant(-1)))));
+ &m, m.Word32And(return_value.value(),
+ m.Word32Xor(m.Int32Constant(2), m.Int32Constant(-1)))));
m.Bind(&exception_handler3);
m.Return(error.value());
@@ -578,7 +572,7 @@ TEST(ExceptionHandler) {
CodeAssembler m(asm_tester.state());
CodeAssembler::TVariable<Object> var(m.SmiConstant(0), &m);
- Label exception(&m, {&var}, Label::kDeferred);
+ CodeAssemblerLabel exception(&m, {&var}, CodeAssemblerLabel::kDeferred);
{
CodeAssemblerScopedExceptionHandler handler(&m, &exception, &var);
TNode<Context> context =
diff --git a/deps/v8/test/cctest/compiler/test-gap-resolver.cc b/deps/v8/test/cctest/compiler/test-gap-resolver.cc
index ca26e0b49f..a7b6514c1f 100644
--- a/deps/v8/test/cctest/compiler/test-gap-resolver.cc
+++ b/deps/v8/test/cctest/compiler/test-gap-resolver.cc
@@ -353,7 +353,7 @@ class ParallelMoveCreator : public HandleAndZoneScope {
};
int index = rng_->NextInt(kMaxIndex);
// destination can't be Constant.
- switch (rng_->NextInt(is_source ? 5 : 4)) {
+ switch (rng_->NextInt(is_source ? 3 : 2)) {
case 0:
return AllocatedOperand(LocationOperand::STACK_SLOT, rep,
GetValidSlotIndex(rep, index));
@@ -361,12 +361,6 @@ class ParallelMoveCreator : public HandleAndZoneScope {
return AllocatedOperand(LocationOperand::REGISTER, rep,
GetValidRegisterCode(rep, index));
case 2:
- return ExplicitOperand(LocationOperand::REGISTER, rep,
- GetValidRegisterCode(rep, 1));
- case 3:
- return ExplicitOperand(LocationOperand::STACK_SLOT, rep,
- GetValidSlotIndex(rep, index));
- case 4:
return ConstantOperand(index);
}
UNREACHABLE();
diff --git a/deps/v8/test/cctest/compiler/test-jump-threading.cc b/deps/v8/test/cctest/compiler/test-jump-threading.cc
index 44bee022b3..7440da7fb0 100644
--- a/deps/v8/test/cctest/compiler/test-jump-threading.cc
+++ b/deps/v8/test/cctest/compiler/test-jump-threading.cc
@@ -109,16 +109,16 @@ class TestCode : public HandleAndZoneScope {
}
};
-void VerifyForwarding(TestCode& code, // NOLINT(runtime/references)
- int count, int* expected) {
+void VerifyForwarding(TestCode* code, int count, int* expected) {
v8::internal::AccountingAllocator allocator;
Zone local_zone(&allocator, ZONE_NAME);
ZoneVector<RpoNumber> result(&local_zone);
- JumpThreading::ComputeForwarding(&local_zone, result, &code.sequence_, true);
+ JumpThreading::ComputeForwarding(&local_zone, &result, &code->sequence_,
+ true);
CHECK(count == static_cast<int>(result.size()));
for (int i = 0; i < count; i++) {
- CHECK(expected[i] == result[i].ToInt());
+ CHECK_EQ(expected[i], result[i].ToInt());
}
}
@@ -133,7 +133,7 @@ TEST(FwEmpty1) {
code.End();
static int expected[] = {2, 2, 2};
- VerifyForwarding(code, 3, expected);
+ VerifyForwarding(&code, 3, expected);
}
@@ -150,7 +150,7 @@ TEST(FwEmptyN) {
code.End();
static int expected[] = {2, 2, 2};
- VerifyForwarding(code, 3, expected);
+ VerifyForwarding(&code, 3, expected);
}
}
@@ -162,7 +162,7 @@ TEST(FwNone1) {
code.End();
static int expected[] = {0};
- VerifyForwarding(code, 1, expected);
+ VerifyForwarding(&code, 1, expected);
}
@@ -174,7 +174,7 @@ TEST(FwMoves1) {
code.End();
static int expected[] = {0};
- VerifyForwarding(code, 1, expected);
+ VerifyForwarding(&code, 1, expected);
}
@@ -188,7 +188,7 @@ TEST(FwMoves2) {
code.End();
static int expected[] = {1, 1};
- VerifyForwarding(code, 2, expected);
+ VerifyForwarding(&code, 2, expected);
}
@@ -202,7 +202,7 @@ TEST(FwMoves2b) {
code.End();
static int expected[] = {0, 1};
- VerifyForwarding(code, 2, expected);
+ VerifyForwarding(&code, 2, expected);
}
@@ -216,7 +216,7 @@ TEST(FwOther2) {
code.End();
static int expected[] = {0, 1};
- VerifyForwarding(code, 2, expected);
+ VerifyForwarding(&code, 2, expected);
}
@@ -229,7 +229,7 @@ TEST(FwNone2a) {
code.End();
static int expected[] = {1, 1};
- VerifyForwarding(code, 2, expected);
+ VerifyForwarding(&code, 2, expected);
}
@@ -242,7 +242,7 @@ TEST(FwNone2b) {
code.End();
static int expected[] = {1, 1};
- VerifyForwarding(code, 2, expected);
+ VerifyForwarding(&code, 2, expected);
}
@@ -253,7 +253,7 @@ TEST(FwLoop1) {
code.Jump(0);
static int expected[] = {0};
- VerifyForwarding(code, 1, expected);
+ VerifyForwarding(&code, 1, expected);
}
@@ -266,7 +266,7 @@ TEST(FwLoop2) {
code.Jump(0);
static int expected[] = {0, 0};
- VerifyForwarding(code, 2, expected);
+ VerifyForwarding(&code, 2, expected);
}
@@ -281,7 +281,7 @@ TEST(FwLoop3) {
code.Jump(0);
static int expected[] = {0, 0, 0};
- VerifyForwarding(code, 3, expected);
+ VerifyForwarding(&code, 3, expected);
}
@@ -294,7 +294,7 @@ TEST(FwLoop1b) {
code.Jump(1);
static int expected[] = {1, 1};
- VerifyForwarding(code, 2, expected);
+ VerifyForwarding(&code, 2, expected);
}
@@ -309,7 +309,7 @@ TEST(FwLoop2b) {
code.Jump(1);
static int expected[] = {1, 1, 1};
- VerifyForwarding(code, 3, expected);
+ VerifyForwarding(&code, 3, expected);
}
@@ -326,7 +326,7 @@ TEST(FwLoop3b) {
code.Jump(1);
static int expected[] = {1, 1, 1, 1};
- VerifyForwarding(code, 4, expected);
+ VerifyForwarding(&code, 4, expected);
}
@@ -345,7 +345,7 @@ TEST(FwLoop2_1a) {
code.Jump(2);
static int expected[] = {1, 1, 1, 1, 1};
- VerifyForwarding(code, 5, expected);
+ VerifyForwarding(&code, 5, expected);
}
@@ -364,7 +364,7 @@ TEST(FwLoop2_1b) {
code.Jump(2);
static int expected[] = {2, 2, 2, 2, 2};
- VerifyForwarding(code, 5, expected);
+ VerifyForwarding(&code, 5, expected);
}
@@ -383,7 +383,7 @@ TEST(FwLoop2_1c) {
code.Jump(1);
static int expected[] = {1, 1, 1, 1, 1};
- VerifyForwarding(code, 5, expected);
+ VerifyForwarding(&code, 5, expected);
}
@@ -402,7 +402,7 @@ TEST(FwLoop2_1d) {
code.Jump(1);
static int expected[] = {1, 1, 1, 1, 1};
- VerifyForwarding(code, 5, expected);
+ VerifyForwarding(&code, 5, expected);
}
@@ -423,7 +423,7 @@ TEST(FwLoop3_1a) {
code.Jump(0);
static int expected[] = {2, 2, 2, 2, 2, 2};
- VerifyForwarding(code, 6, expected);
+ VerifyForwarding(&code, 6, expected);
}
@@ -443,7 +443,7 @@ TEST(FwDiamonds) {
code.End();
int expected[] = {0, i ? 1 : 3, j ? 2 : 3, 3};
- VerifyForwarding(code, 4, expected);
+ VerifyForwarding(&code, 4, expected);
}
}
}
@@ -470,7 +470,7 @@ TEST(FwDiamonds2) {
int merge = k ? 3 : 4;
int expected[] = {0, i ? 1 : merge, j ? 2 : merge, merge, 4};
- VerifyForwarding(code, 5, expected);
+ VerifyForwarding(&code, 5, expected);
}
}
}
@@ -504,7 +504,7 @@ TEST(FwDoubleDiamonds) {
int expected[] = {0, i ? 1 : 3, j ? 2 : 3, 3,
x ? 4 : 6, y ? 5 : 6, 6};
- VerifyForwarding(code, 7, expected);
+ VerifyForwarding(&code, 7, expected);
}
}
}
@@ -568,7 +568,7 @@ void RunPermutedChain(int* permutation, int size) {
int expected[] = {size + 1, size + 1, size + 1, size + 1,
size + 1, size + 1, size + 1};
- VerifyForwarding(code, size + 2, expected);
+ VerifyForwarding(&code, size + 2, expected);
}
@@ -604,55 +604,50 @@ void RunPermutedDiamond(int* permutation, int size) {
int expected[] = {br, 5, 5, 5, 5, 5};
expected[br] = br;
- VerifyForwarding(code, 6, expected);
+ VerifyForwarding(&code, 6, expected);
}
TEST(FwPermuted_diamond) { RunAllPermutations<4>(RunPermutedDiamond); }
-void ApplyForwarding(TestCode& code, // NOLINT(runtime/references)
- int size, int* forward) {
- code.sequence_.RecomputeAssemblyOrderForTesting();
- ZoneVector<RpoNumber> vector(code.main_zone());
+void ApplyForwarding(TestCode* code, int size, int* forward) {
+ code->sequence_.RecomputeAssemblyOrderForTesting();
+ ZoneVector<RpoNumber> vector(code->main_zone());
for (int i = 0; i < size; i++) {
vector.push_back(RpoNumber::FromInt(forward[i]));
}
- JumpThreading::ApplyForwarding(code.main_zone(), vector, &code.sequence_);
+ JumpThreading::ApplyForwarding(code->main_zone(), vector, &code->sequence_);
}
-void CheckJump(TestCode& code, // NOLINT(runtime/references)
- int pos, int target) {
- Instruction* instr = code.sequence_.InstructionAt(pos);
+void CheckJump(TestCode* code, int pos, int target) {
+ Instruction* instr = code->sequence_.InstructionAt(pos);
CHECK_EQ(kArchJmp, instr->arch_opcode());
CHECK_EQ(1, static_cast<int>(instr->InputCount()));
CHECK_EQ(0, static_cast<int>(instr->OutputCount()));
CHECK_EQ(0, static_cast<int>(instr->TempCount()));
- CHECK_EQ(target, code.sequence_.InputRpo(instr, 0).ToInt());
+ CHECK_EQ(target, code->sequence_.InputRpo(instr, 0).ToInt());
}
-void CheckNop(TestCode& code, // NOLINT(runtime/references)
- int pos) {
- Instruction* instr = code.sequence_.InstructionAt(pos);
+void CheckNop(TestCode* code, int pos) {
+ Instruction* instr = code->sequence_.InstructionAt(pos);
CHECK_EQ(kArchNop, instr->arch_opcode());
CHECK_EQ(0, static_cast<int>(instr->InputCount()));
CHECK_EQ(0, static_cast<int>(instr->OutputCount()));
CHECK_EQ(0, static_cast<int>(instr->TempCount()));
}
-void CheckBranch(TestCode& code, // NOLINT(runtime/references)
- int pos, int t1, int t2) {
- Instruction* instr = code.sequence_.InstructionAt(pos);
+void CheckBranch(TestCode* code, int pos, int t1, int t2) {
+ Instruction* instr = code->sequence_.InstructionAt(pos);
CHECK_EQ(2, static_cast<int>(instr->InputCount()));
CHECK_EQ(0, static_cast<int>(instr->OutputCount()));
CHECK_EQ(0, static_cast<int>(instr->TempCount()));
- CHECK_EQ(t1, code.sequence_.InputRpo(instr, 0).ToInt());
- CHECK_EQ(t2, code.sequence_.InputRpo(instr, 1).ToInt());
+ CHECK_EQ(t1, code->sequence_.InputRpo(instr, 0).ToInt());
+ CHECK_EQ(t2, code->sequence_.InputRpo(instr, 1).ToInt());
}
-void CheckAssemblyOrder(TestCode& code, // NOLINT(runtime/references)
- int size, int* expected) {
+void CheckAssemblyOrder(TestCode* code, int size, int* expected) {
int i = 0;
- for (auto const block : code.sequence_.instruction_blocks()) {
+ for (auto const block : code->sequence_.instruction_blocks()) {
CHECK_EQ(expected[i++], block->ao_number().ToInt());
}
}
@@ -668,12 +663,12 @@ TEST(Rewire1) {
code.End();
static int forward[] = {2, 2, 2};
- ApplyForwarding(code, 3, forward);
- CheckJump(code, j1, 2);
- CheckNop(code, j2);
+ ApplyForwarding(&code, 3, forward);
+ CheckJump(&code, j1, 2);
+ CheckNop(&code, j2);
static int assembly[] = {0, 1, 1};
- CheckAssemblyOrder(code, 3, assembly);
+ CheckAssemblyOrder(&code, 3, assembly);
}
@@ -691,13 +686,13 @@ TEST(Rewire1_deferred) {
code.End();
static int forward[] = {3, 3, 3, 3};
- ApplyForwarding(code, 4, forward);
- CheckJump(code, j1, 3);
- CheckNop(code, j2);
- CheckNop(code, j3);
+ ApplyForwarding(&code, 4, forward);
+ CheckJump(&code, j1, 3);
+ CheckNop(&code, j2);
+ CheckNop(&code, j3);
static int assembly[] = {0, 1, 2, 1};
- CheckAssemblyOrder(code, 4, assembly);
+ CheckAssemblyOrder(&code, 4, assembly);
}
@@ -717,12 +712,12 @@ TEST(Rewire2_deferred) {
code.End();
static int forward[] = {0, 1, 2, 3};
- ApplyForwarding(code, 4, forward);
- CheckJump(code, j1, 1);
- CheckJump(code, j2, 3);
+ ApplyForwarding(&code, 4, forward);
+ CheckJump(&code, j1, 1);
+ CheckJump(&code, j2, 3);
static int assembly[] = {0, 2, 3, 1};
- CheckAssemblyOrder(code, 4, assembly);
+ CheckAssemblyOrder(&code, 4, assembly);
}
@@ -742,18 +737,18 @@ TEST(Rewire_diamond) {
code.End();
int forward[] = {0, 1, i ? 4 : 2, j ? 4 : 3, 4};
- ApplyForwarding(code, 5, forward);
- CheckJump(code, j1, 1);
- CheckBranch(code, b1, i ? 4 : 2, j ? 4 : 3);
+ ApplyForwarding(&code, 5, forward);
+ CheckJump(&code, j1, 1);
+ CheckBranch(&code, b1, i ? 4 : 2, j ? 4 : 3);
if (i) {
- CheckNop(code, j2);
+ CheckNop(&code, j2);
} else {
- CheckJump(code, j2, 4);
+ CheckJump(&code, j2, 4);
}
if (j) {
- CheckNop(code, j3);
+ CheckNop(&code, j3);
} else {
- CheckJump(code, j3, 4);
+ CheckJump(&code, j3, 4);
}
int assembly[] = {0, 1, 2, 3, 4};
@@ -763,7 +758,7 @@ TEST(Rewire_diamond) {
if (j) {
for (int k = 4; k < 5; k++) assembly[k]--;
}
- CheckAssemblyOrder(code, 5, assembly);
+ CheckAssemblyOrder(&code, 5, assembly);
}
}
}
diff --git a/deps/v8/test/cctest/compiler/test-loop-analysis.cc b/deps/v8/test/cctest/compiler/test-loop-analysis.cc
index 231a3ada5a..38ce2f3463 100644
--- a/deps/v8/test/cctest/compiler/test-loop-analysis.cc
+++ b/deps/v8/test/cctest/compiler/test-loop-analysis.cc
@@ -201,9 +201,9 @@ struct While {
}
void chain(Node* control) { loop->ReplaceInput(0, control); }
- void nest(While& that) { // NOLINT(runtime/references)
- that.loop->ReplaceInput(1, exit);
- this->loop->ReplaceInput(0, that.if_true);
+ void nest(While* that) {
+ that->loop->ReplaceInput(1, exit);
+ this->loop->ReplaceInput(0, that->if_true);
}
};
@@ -214,17 +214,17 @@ struct Counter {
Node* phi;
Node* add;
- Counter(While& w, // NOLINT(runtime/references)
- int32_t b, int32_t k)
- : base(w.t.jsgraph.Int32Constant(b)), inc(w.t.jsgraph.Int32Constant(k)) {
+ Counter(While* w, int32_t b, int32_t k)
+ : base(w->t.jsgraph.Int32Constant(b)),
+ inc(w->t.jsgraph.Int32Constant(k)) {
Build(w);
}
- Counter(While& w, Node* b, Node* k) : base(b), inc(k) { Build(w); }
+ Counter(While* w, Node* b, Node* k) : base(b), inc(k) { Build(w); }
- void Build(While& w) {
- phi = w.t.graph.NewNode(w.t.op(2, false), base, base, w.loop);
- add = w.t.graph.NewNode(&kIntAdd, phi, inc);
+ void Build(While* w) {
+ phi = w->t.graph.NewNode(w->t.op(2, false), base, base, w->loop);
+ add = w->t.graph.NewNode(&kIntAdd, phi, inc);
phi->ReplaceInput(1, add);
}
};
@@ -236,16 +236,16 @@ struct StoreLoop {
Node* phi;
Node* store;
- explicit StoreLoop(While& w) // NOLINT(runtime/references)
- : base(w.t.graph.start()), val(w.t.jsgraph.Int32Constant(13)) {
+ explicit StoreLoop(While* w)
+ : base(w->t.graph.start()), val(w->t.jsgraph.Int32Constant(13)) {
Build(w);
}
- StoreLoop(While& w, Node* b, Node* v) : base(b), val(v) { Build(w); }
+ StoreLoop(While* w, Node* b, Node* v) : base(b), val(v) { Build(w); }
- void Build(While& w) {
- phi = w.t.graph.NewNode(w.t.op(2, true), base, base, w.loop);
- store = w.t.graph.NewNode(&kStore, val, phi, w.loop);
+ void Build(While* w) {
+ phi = w->t.graph.NewNode(w->t.op(2, true), base, base, w->loop);
+ store = w->t.graph.NewNode(&kStore, val, phi, w->loop);
phi->ReplaceInput(1, store);
}
};
@@ -287,7 +287,7 @@ TEST(LaLoop1c) {
// One loop with a counter.
LoopFinderTester t;
While w(t, t.p0);
- Counter c(w, 0, 1);
+ Counter c(&w, 0, 1);
t.Return(c.phi, t.start, w.exit);
Node* chain[] = {w.loop};
@@ -303,7 +303,7 @@ TEST(LaLoop1e) {
// One loop with an effect phi.
LoopFinderTester t;
While w(t, t.p0);
- StoreLoop c(w);
+ StoreLoop c(&w);
t.Return(t.p0, c.phi, w.exit);
Node* chain[] = {w.loop};
@@ -319,8 +319,8 @@ TEST(LaLoop1d) {
// One loop with two counters.
LoopFinderTester t;
While w(t, t.p0);
- Counter c1(w, 0, 1);
- Counter c2(w, 1, 1);
+ Counter c1(&w, 0, 1);
+ Counter c2(&w, 1, 1);
t.Return(t.graph.NewNode(&kIntAdd, c1.phi, c2.phi), t.start, w.exit);
Node* chain[] = {w.loop};
@@ -365,8 +365,8 @@ TEST(LaLoop2c) {
LoopFinderTester t;
While w1(t, t.p0);
While w2(t, t.p0);
- Counter c1(w1, 0, 1);
- Counter c2(w2, 0, 1);
+ Counter c1(&w1, 0, 1);
+ Counter c2(&w2, 0, 1);
w2.chain(w1.exit);
t.Return(t.graph.NewNode(&kIntAdd, c1.phi, c2.phi), t.start, w2.exit);
@@ -396,10 +396,10 @@ TEST(LaLoop2cc) {
LoopFinderTester t;
While w1(t, t.p0);
While w2(t, t.p0);
- Counter c1(w1, 0, 1);
+ Counter c1(&w1, 0, 1);
// various usage scenarios for the second loop.
- Counter c2(w2, i & 1 ? t.p0 : c1.phi, i & 2 ? t.p0 : c1.phi);
+ Counter c2(&w2, i & 1 ? t.p0 : c1.phi, i & 2 ? t.p0 : c1.phi);
if (i & 3) w2.branch->ReplaceInput(0, c1.phi);
w2.chain(w1.exit);
@@ -431,7 +431,7 @@ TEST(LaNestedLoop1) {
LoopFinderTester t;
While w1(t, t.p0);
While w2(t, t.p0);
- w2.nest(w1);
+ w2.nest(&w1);
t.Return(t.p0, t.start, w1.exit);
Node* chain[] = {w1.loop, w2.loop};
@@ -452,10 +452,10 @@ TEST(LaNestedLoop1c) {
LoopFinderTester t;
While w1(t, t.p0);
While w2(t, t.p0);
- Counter c1(w1, 0, 1);
- Counter c2(w2, 0, 1);
+ Counter c1(&w1, 0, 1);
+ Counter c2(&w2, 0, 1);
w2.branch->ReplaceInput(0, c2.phi);
- w2.nest(w1);
+ w2.nest(&w1);
t.Return(c1.phi, t.start, w1.exit);
Node* chain[] = {w1.loop, w2.loop};
@@ -477,7 +477,7 @@ TEST(LaNestedLoop1x) {
LoopFinderTester t;
While w1(t, t.p0);
While w2(t, t.p0);
- w2.nest(w1);
+ w2.nest(&w1);
const Operator* op = t.common.Phi(MachineRepresentation::kWord32, 2);
Node* p1a = t.graph.NewNode(op, t.p0, t.p0, w1.loop);
@@ -513,8 +513,8 @@ TEST(LaNestedLoop2) {
While w1(t, t.p0);
While w2(t, t.p0);
While w3(t, t.p0);
- w2.nest(w1);
- w3.nest(w1);
+ w2.nest(&w1);
+ w3.nest(&w1);
w3.chain(w2.exit);
t.Return(t.p0, t.start, w1.exit);
@@ -573,11 +573,11 @@ TEST(LaNestedLoop3c) {
// Three nested loops with counters.
LoopFinderTester t;
While w1(t, t.p0);
- Counter c1(w1, 0, 1);
+ Counter c1(&w1, 0, 1);
While w2(t, t.p0);
- Counter c2(w2, 0, 1);
+ Counter c2(&w2, 0, 1);
While w3(t, t.p0);
- Counter c3(w3, 0, 1);
+ Counter c3(&w3, 0, 1);
w2.loop->ReplaceInput(0, w1.if_true);
w3.loop->ReplaceInput(0, w2.if_true);
w2.loop->ReplaceInput(1, w3.exit);
diff --git a/deps/v8/test/cctest/compiler/test-multiple-return.cc b/deps/v8/test/cctest/compiler/test-multiple-return.cc
index c054e7654a..ad1c7efbd7 100644
--- a/deps/v8/test/cctest/compiler/test-multiple-return.cc
+++ b/deps/v8/test/cctest/compiler/test-multiple-return.cc
@@ -43,81 +43,76 @@ CallDescriptor* CreateCallDescriptor(Zone* zone, int return_count,
return compiler::GetWasmCallDescriptor(zone, builder.Build());
}
-Node* MakeConstant(RawMachineAssembler& m, // NOLINT(runtime/references)
- MachineType type, int value) {
+Node* MakeConstant(RawMachineAssembler* m, MachineType type, int value) {
switch (type.representation()) {
case MachineRepresentation::kWord32:
- return m.Int32Constant(static_cast<int32_t>(value));
+ return m->Int32Constant(static_cast<int32_t>(value));
case MachineRepresentation::kWord64:
- return m.Int64Constant(static_cast<int64_t>(value));
+ return m->Int64Constant(static_cast<int64_t>(value));
case MachineRepresentation::kFloat32:
- return m.Float32Constant(static_cast<float>(value));
+ return m->Float32Constant(static_cast<float>(value));
case MachineRepresentation::kFloat64:
- return m.Float64Constant(static_cast<double>(value));
+ return m->Float64Constant(static_cast<double>(value));
default:
UNREACHABLE();
}
}
-Node* Add(RawMachineAssembler& m, // NOLINT(runtime/references)
- MachineType type, Node* a, Node* b) {
+Node* Add(RawMachineAssembler* m, MachineType type, Node* a, Node* b) {
switch (type.representation()) {
case MachineRepresentation::kWord32:
- return m.Int32Add(a, b);
+ return m->Int32Add(a, b);
case MachineRepresentation::kWord64:
- return m.Int64Add(a, b);
+ return m->Int64Add(a, b);
case MachineRepresentation::kFloat32:
- return m.Float32Add(a, b);
+ return m->Float32Add(a, b);
case MachineRepresentation::kFloat64:
- return m.Float64Add(a, b);
+ return m->Float64Add(a, b);
default:
UNREACHABLE();
}
}
-Node* Sub(RawMachineAssembler& m, // NOLINT(runtime/references)
- MachineType type, Node* a, Node* b) {
+Node* Sub(RawMachineAssembler* m, MachineType type, Node* a, Node* b) {
switch (type.representation()) {
case MachineRepresentation::kWord32:
- return m.Int32Sub(a, b);
+ return m->Int32Sub(a, b);
case MachineRepresentation::kWord64:
- return m.Int64Sub(a, b);
+ return m->Int64Sub(a, b);
case MachineRepresentation::kFloat32:
- return m.Float32Sub(a, b);
+ return m->Float32Sub(a, b);
case MachineRepresentation::kFloat64:
- return m.Float64Sub(a, b);
+ return m->Float64Sub(a, b);
default:
UNREACHABLE();
}
}
-Node* Mul(RawMachineAssembler& m, // NOLINT(runtime/references)
- MachineType type, Node* a, Node* b) {
+Node* Mul(RawMachineAssembler* m, MachineType type, Node* a, Node* b) {
switch (type.representation()) {
case MachineRepresentation::kWord32:
- return m.Int32Mul(a, b);
+ return m->Int32Mul(a, b);
case MachineRepresentation::kWord64:
- return m.Int64Mul(a, b);
+ return m->Int64Mul(a, b);
case MachineRepresentation::kFloat32:
- return m.Float32Mul(a, b);
+ return m->Float32Mul(a, b);
case MachineRepresentation::kFloat64:
- return m.Float64Mul(a, b);
+ return m->Float64Mul(a, b);
default:
UNREACHABLE();
}
}
-Node* ToInt32(RawMachineAssembler& m, // NOLINT(runtime/references)
- MachineType type, Node* a) {
+Node* ToInt32(RawMachineAssembler* m, MachineType type, Node* a) {
switch (type.representation()) {
case MachineRepresentation::kWord32:
return a;
case MachineRepresentation::kWord64:
- return m.TruncateInt64ToInt32(a);
+ return m->TruncateInt64ToInt32(a);
case MachineRepresentation::kFloat32:
- return m.TruncateFloat32ToInt32(a);
+ return m->TruncateFloat32ToInt32(a);
case MachineRepresentation::kFloat64:
- return m.RoundFloat64ToInt32(a);
+ return m->RoundFloat64ToInt32(a);
default:
UNREACHABLE();
}
@@ -159,9 +154,9 @@ void TestReturnMultipleValues(MachineType type) {
using Node_ptr = Node*;
std::unique_ptr<Node_ptr[]> returns(new Node_ptr[count]);
for (int i = 0; i < count; ++i) {
- if (i % 3 == 0) returns[i] = Add(m, type, p0, p1);
- if (i % 3 == 1) returns[i] = Sub(m, type, p0, p1);
- if (i % 3 == 2) returns[i] = Mul(m, type, p0, p1);
+ if (i % 3 == 0) returns[i] = Add(&m, type, p0, p1);
+ if (i % 3 == 1) returns[i] = Sub(&m, type, p0, p1);
+ if (i % 3 == 2) returns[i] = Mul(&m, type, p0, p1);
}
m.Return(count, returns.get());
@@ -175,7 +170,7 @@ void TestReturnMultipleValues(MachineType type) {
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_code) {
StdoutStream os;
- code->Disassemble("multi_value", os);
+ code->Disassemble("multi_value", os, handles.main_isolate());
}
#endif
@@ -201,29 +196,29 @@ void TestReturnMultipleValues(MachineType type) {
// WasmContext dummy
call_inputs[1] = mt.PointerConstant(nullptr);
// Special inputs for the test.
- call_inputs[2] = MakeConstant(mt, type, a);
- call_inputs[3] = MakeConstant(mt, type, b);
+ call_inputs[2] = MakeConstant(&mt, type, a);
+ call_inputs[3] = MakeConstant(&mt, type, b);
for (int i = 2; i < param_count; i++) {
- call_inputs[2 + i] = MakeConstant(mt, type, i);
+ call_inputs[2 + i] = MakeConstant(&mt, type, i);
}
Node* ret_multi = mt.AddNode(mt.common()->Call(desc),
input_count, call_inputs);
- Node* ret = MakeConstant(mt, type, 0);
+ Node* ret = MakeConstant(&mt, type, 0);
bool sign = false;
for (int i = 0; i < count; ++i) {
Node* x = (count == 1)
? ret_multi
: mt.AddNode(mt.common()->Projection(i), ret_multi);
- ret = sign ? Sub(mt, type, ret, x) : Add(mt, type, ret, x);
+ ret = sign ? Sub(&mt, type, ret, x) : Add(&mt, type, ret, x);
if (i % 4 == 0) sign = !sign;
}
- mt.Return(ToInt32(mt, type, ret));
+ mt.Return(ToInt32(&mt, type, ret));
#ifdef ENABLE_DISASSEMBLER
Handle<Code> code2 = mt.GetCode();
if (FLAG_print_code) {
StdoutStream os;
- code2->Disassemble("multi_value_call", os);
+ code2->Disassemble("multi_value_call", os, handles.main_isolate());
}
#endif
CHECK_EQ(expect, mt.Call());
@@ -265,7 +260,7 @@ void ReturnLastValue(MachineType type) {
std::unique_ptr<Node* []> returns(new Node*[return_count]);
for (int i = 0; i < return_count; ++i) {
- returns[i] = MakeConstant(m, type, i);
+ returns[i] = MakeConstant(&m, type, i);
}
m.Return(return_count, returns.get());
@@ -292,8 +287,9 @@ void ReturnLastValue(MachineType type) {
Node* call = mt.AddNode(mt.common()->Call(desc), 2, inputs);
- mt.Return(ToInt32(
- mt, type, mt.AddNode(mt.common()->Projection(return_count - 1), call)));
+ mt.Return(
+ ToInt32(&mt, type,
+ mt.AddNode(mt.common()->Projection(return_count - 1), call)));
CHECK_EQ(expect, mt.Call());
}
@@ -327,7 +323,7 @@ void ReturnSumOfReturns(MachineType type) {
std::unique_ptr<Node* []> returns(new Node*[return_count]);
for (int i = 0; i < return_count; ++i) {
- returns[i] = MakeConstant(m, type, i);
+ returns[i] = MakeConstant(&m, type, i);
}
m.Return(return_count, returns.get());
@@ -360,7 +356,7 @@ void ReturnSumOfReturns(MachineType type) {
expect += i;
result = mt.Int32Add(
result,
- ToInt32(mt, type, mt.AddNode(mt.common()->Projection(i), call)));
+ ToInt32(&mt, type, mt.AddNode(mt.common()->Projection(i), call)));
}
mt.Return(result);
diff --git a/deps/v8/test/cctest/compiler/test-run-load-store.cc b/deps/v8/test/cctest/compiler/test-run-load-store.cc
index 3a8e9d61d4..6f52f339f3 100644
--- a/deps/v8/test/cctest/compiler/test-run-load-store.cc
+++ b/deps/v8/test/cctest/compiler/test-run-load-store.cc
@@ -37,6 +37,11 @@ enum TestAlignment {
#define A_GIG (1024ULL * 1024ULL * 1024ULL)
namespace {
+byte* ComputeOffset(void* real_address, int32_t offset) {
+ return reinterpret_cast<byte*>(reinterpret_cast<Address>(real_address) -
+ offset);
+}
+
void RunLoadInt32(const TestAlignment t) {
RawMachineAssemblerTester<int32_t> m;
@@ -65,7 +70,7 @@ void RunLoadInt32Offset(TestAlignment t) {
for (size_t i = 0; i < arraysize(offsets); i++) {
RawMachineAssemblerTester<int32_t> m;
int32_t offset = offsets[i];
- byte* pointer = reinterpret_cast<byte*>(&p1) - offset;
+ byte* pointer = ComputeOffset(&p1, offset);
// generate load [#base + #index]
if (t == TestAlignment::kAligned) {
@@ -93,8 +98,8 @@ void RunLoadStoreFloat32Offset(TestAlignment t) {
base::AddWithWraparound(0x2342AABB, base::MulWithWraparound(i, 3));
RawMachineAssemblerTester<int32_t> m;
int32_t offset = i;
- byte* from = reinterpret_cast<byte*>(&p1) - offset;
- byte* to = reinterpret_cast<byte*>(&p2) - offset;
+ byte* from = ComputeOffset(&p1, offset);
+ byte* to = ComputeOffset(&p2, offset);
// generate load [#base + #index]
if (t == TestAlignment::kAligned) {
Node* load = m.Load(MachineType::Float32(), m.PointerConstant(from),
@@ -131,8 +136,8 @@ void RunLoadStoreFloat64Offset(TestAlignment t) {
base::AddWithWraparound(0x2342AABB, base::MulWithWraparound(i, 3));
RawMachineAssemblerTester<int32_t> m;
int32_t offset = i;
- byte* from = reinterpret_cast<byte*>(&p1) - offset;
- byte* to = reinterpret_cast<byte*>(&p2) - offset;
+ byte* from = ComputeOffset(&p1, offset);
+ byte* to = ComputeOffset(&p2, offset);
// generate load [#base + #index]
if (t == TestAlignment::kAligned) {
Node* load = m.Load(MachineType::Float64(), m.PointerConstant(from),
@@ -259,7 +264,7 @@ void RunLoadImmIndex(MachineType type, TestAlignment t) {
for (int offset = -1; offset <= 200000; offset *= -5) {
for (int i = 0; i < kNumElems; i++) {
BufferedRawMachineAssemblerTester<CType> m;
- void* base_pointer = &buffer[0] - offset;
+ void* base_pointer = ComputeOffset(&buffer[0], offset * sizeof(CType));
#ifdef V8_COMPRESS_POINTERS
if (type.IsTagged()) {
// When pointer compression is enabled then we need to access only
diff --git a/deps/v8/test/cctest/compiler/test-run-machops.cc b/deps/v8/test/cctest/compiler/test-run-machops.cc
index 1e5a73389e..ccc05ce11b 100644
--- a/deps/v8/test/cctest/compiler/test-run-machops.cc
+++ b/deps/v8/test/cctest/compiler/test-run-machops.cc
@@ -10,9 +10,10 @@
#include "src/base/ieee754.h"
#include "src/base/overflowing-math.h"
#include "src/base/utils/random-number-generator.h"
+#include "src/common/ptr-compr-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/utils/boxed-float.h"
#include "src/utils/utils.h"
-#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/codegen-tester.h"
#include "test/cctest/compiler/value-helper.h"
@@ -410,12 +411,15 @@ TEST(CompressDecompressTaggedAnyPointer) {
}
TEST(CompressDecompressTaggedAnySigned) {
- RawMachineAssemblerTester<int64_t> m;
+ RawMachineAssemblerTester<void*> m;
Smi smi = Smi::FromInt(123);
- int64_t smiPointer = static_cast<int64_t>(smi.ptr());
- Node* node = m.Int64Constant(smiPointer);
+ Node* node = m.Int64Constant(static_cast<int64_t>(smi.ptr()));
m.Return(m.ChangeCompressedToTagged(m.ChangeTaggedToCompressed(node)));
- CHECK_EQ(smiPointer, m.Call());
+
+ Object result = Object(reinterpret_cast<Address>(m.Call()));
+ Address smiPointer =
+ DecompressTaggedAny(m.isolate(), CompressTagged(smi.ptr()));
+ CHECK_EQ(smiPointer, result.ptr());
}
TEST(CompressDecompressTaggedPointer) {
@@ -432,13 +436,15 @@ TEST(CompressDecompressTaggedPointer) {
}
TEST(CompressDecompressTaggedSigned) {
- RawMachineAssemblerTester<int64_t> m;
+ RawMachineAssemblerTester<void*> m;
Smi smi = Smi::FromInt(123);
- int64_t smiPointer = static_cast<int64_t>(smi.ptr());
- Node* node = m.Int64Constant(smiPointer);
+ Address smiPointer = smi.ptr();
+ Node* node = m.Int64Constant(static_cast<int64_t>(smiPointer));
m.Return(m.ChangeCompressedSignedToTaggedSigned(
m.ChangeTaggedSignedToCompressedSigned(node)));
- CHECK_EQ(smiPointer, m.Call());
+
+ Object result = Object(reinterpret_cast<Address>(m.Call()));
+ CHECK_EQ(smiPointer, result.ptr());
}
#endif // V8_COMPRESS_POINTERS
diff --git a/deps/v8/test/cctest/compiler/test-run-native-calls.cc b/deps/v8/test/cctest/compiler/test-run-native-calls.cc
index 026e8307ae..6ab480743b 100644
--- a/deps/v8/test/cctest/compiler/test-run-native-calls.cc
+++ b/deps/v8/test/cctest/compiler/test-run-native-calls.cc
@@ -254,7 +254,7 @@ Handle<Code> CompileGraph(const char* name, CallDescriptor* call_descriptor,
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_opt_code) {
StdoutStream os;
- code->Disassemble(name, os);
+ code->Disassemble(name, os, isolate);
}
#endif
return code;
@@ -327,38 +327,32 @@ class ArgsBuffer {
return kTypes;
}
- Node* MakeConstant(RawMachineAssembler& raw, // NOLINT(runtime/references)
- int32_t value) {
- return raw.Int32Constant(value);
+ Node* MakeConstant(RawMachineAssembler* raw, int32_t value) {
+ return raw->Int32Constant(value);
}
- Node* MakeConstant(RawMachineAssembler& raw, // NOLINT(runtime/references)
- int64_t value) {
- return raw.Int64Constant(value);
+ Node* MakeConstant(RawMachineAssembler* raw, int64_t value) {
+ return raw->Int64Constant(value);
}
- Node* MakeConstant(RawMachineAssembler& raw, // NOLINT(runtime/references)
- float32 value) {
- return raw.Float32Constant(value);
+ Node* MakeConstant(RawMachineAssembler* raw, float32 value) {
+ return raw->Float32Constant(value);
}
- Node* MakeConstant(RawMachineAssembler& raw, // NOLINT(runtime/references)
- float64 value) {
- return raw.Float64Constant(value);
+ Node* MakeConstant(RawMachineAssembler* raw, float64 value) {
+ return raw->Float64Constant(value);
}
- Node* LoadInput(RawMachineAssembler& raw, // NOLINT(runtime/references)
- Node* base, int index) {
- Node* offset = raw.Int32Constant(index * sizeof(CType));
- return raw.Load(MachineTypeForC<CType>(), base, offset);
+ Node* LoadInput(RawMachineAssembler* raw, Node* base, int index) {
+ Node* offset = raw->Int32Constant(index * sizeof(CType));
+ return raw->Load(MachineTypeForC<CType>(), base, offset);
}
- Node* StoreOutput(RawMachineAssembler& raw, // NOLINT(runtime/references)
- Node* value) {
- Node* base = raw.PointerConstant(&output);
- Node* offset = raw.Int32Constant(0);
- return raw.Store(MachineTypeForC<CType>().representation(), base, offset,
- value, kNoWriteBarrier);
+ Node* StoreOutput(RawMachineAssembler* raw, Node* value) {
+ Node* base = raw->PointerConstant(&output);
+ Node* offset = raw->Int32Constant(0);
+ return raw->Store(MachineTypeForC<CType>().representation(), base, offset,
+ value, kNoWriteBarrier);
}
// Computes the next set of inputs by updating the {input} array.
@@ -425,7 +419,7 @@ template <typename CType>
class Computer {
public:
static void Run(CallDescriptor* desc,
- void (*build)(CallDescriptor*, RawMachineAssembler&),
+ void (*build)(CallDescriptor*, RawMachineAssembler*),
CType (*compute)(CallDescriptor*, CType* inputs),
int seed = 1) {
int num_params = ParamCount(desc);
@@ -438,7 +432,7 @@ class Computer {
Zone zone(isolate->allocator(), ZONE_NAME);
Graph graph(&zone);
RawMachineAssembler raw(isolate, &graph, desc);
- build(desc, raw);
+ build(desc, &raw);
inner = CompileGraph("Compute", desc, &graph, raw.ExportForTest());
}
@@ -459,11 +453,11 @@ class Computer {
int input_count = 0;
inputs[input_count++] = target;
for (int i = 0; i < num_params; i++) {
- inputs[input_count++] = io.MakeConstant(raw, io.input[i]);
+ inputs[input_count++] = io.MakeConstant(&raw, io.input[i]);
}
Node* call = raw.CallN(desc, input_count, inputs);
- Node* store = io.StoreOutput(raw, call);
+ Node* store = io.StoreOutput(&raw, call);
USE(store);
raw.Return(raw.Int32Constant(seed));
wrapper = CompileGraph("Compute-wrapper-const", cdesc, &graph,
@@ -494,11 +488,11 @@ class Computer {
int input_count = 0;
inputs[input_count++] = target;
for (int i = 0; i < num_params; i++) {
- inputs[input_count++] = io.LoadInput(raw, base, i);
+ inputs[input_count++] = io.LoadInput(&raw, base, i);
}
Node* call = raw.CallN(desc, input_count, inputs);
- Node* store = io.StoreOutput(raw, call);
+ Node* store = io.StoreOutput(&raw, call);
USE(store);
raw.Return(raw.Int32Constant(seed));
wrapper =
@@ -704,28 +698,25 @@ TEST(Run_CopyTwentyInt32_all_allocatable_pairs) {
}
}
-
template <typename CType>
static void Run_Computation(
- CallDescriptor* desc, void (*build)(CallDescriptor*, RawMachineAssembler&),
+ CallDescriptor* desc, void (*build)(CallDescriptor*, RawMachineAssembler*),
CType (*compute)(CallDescriptor*, CType* inputs), int seed = 1) {
Computer<CType>::Run(desc, build, compute, seed);
}
-
static uint32_t coeff[] = {1, 2, 3, 5, 7, 11, 13, 17, 19, 23, 29,
31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73,
79, 83, 89, 97, 101, 103, 107, 109, 113};
-static void Build_Int32_WeightedSum(
- CallDescriptor* desc,
- RawMachineAssembler& raw) { // NOLINT(runtime/references)
- Node* result = raw.Int32Constant(0);
+static void Build_Int32_WeightedSum(CallDescriptor* desc,
+ RawMachineAssembler* raw) {
+ Node* result = raw->Int32Constant(0);
for (int i = 0; i < ParamCount(desc); i++) {
- Node* term = raw.Int32Mul(raw.Parameter(i), raw.Int32Constant(coeff[i]));
- result = raw.Int32Add(result, term);
+ Node* term = raw->Int32Mul(raw->Parameter(i), raw->Int32Constant(coeff[i]));
+ result = raw->Int32Add(result, term);
}
- raw.Return(result);
+ raw->Return(result);
}
static int32_t Compute_Int32_WeightedSum(CallDescriptor* desc, int32_t* input) {
@@ -774,10 +765,8 @@ TEST_INT32_WEIGHTEDSUM(17)
TEST_INT32_WEIGHTEDSUM(19)
template <int which>
-static void Build_Select(
- CallDescriptor* desc,
- RawMachineAssembler& raw) { // NOLINT(runtime/references)
- raw.Return(raw.Parameter(which));
+static void Build_Select(CallDescriptor* desc, RawMachineAssembler* raw) {
+ raw->Return(raw->Parameter(which));
}
template <typename CType, int which>
@@ -950,9 +939,8 @@ TEST(Float64Select_stack_params_return_reg) {
}
template <typename CType, int which>
-static void Build_Select_With_Call(
- CallDescriptor* desc,
- RawMachineAssembler& raw) { // NOLINT(runtime/references)
+static void Build_Select_With_Call(CallDescriptor* desc,
+ RawMachineAssembler* raw) {
Handle<Code> inner = Handle<Code>::null();
int num_params = ParamCount(desc);
CHECK_LE(num_params, kMaxParamCount);
@@ -971,16 +959,16 @@ static void Build_Select_With_Call(
{
// Build a call to the function that does the select.
- Node* target = raw.HeapConstant(inner);
- Node** inputs = raw.zone()->NewArray<Node*>(num_params + 1);
+ Node* target = raw->HeapConstant(inner);
+ Node** inputs = raw->zone()->NewArray<Node*>(num_params + 1);
int input_count = 0;
inputs[input_count++] = target;
for (int i = 0; i < num_params; i++) {
- inputs[input_count++] = raw.Parameter(i);
+ inputs[input_count++] = raw->Parameter(i);
}
- Node* call = raw.CallN(desc, input_count, inputs);
- raw.Return(call);
+ Node* call = raw->CallN(desc, input_count, inputs);
+ raw->Return(call);
}
}
diff --git a/deps/v8/test/cctest/heap/heap-tester.h b/deps/v8/test/cctest/heap/heap-tester.h
index 6f6cfb46b5..0b47665a78 100644
--- a/deps/v8/test/cctest/heap/heap-tester.h
+++ b/deps/v8/test/cctest/heap/heap-tester.h
@@ -14,6 +14,7 @@
V(CompactionFullAbortedPage) \
V(CompactionPartiallyAbortedPage) \
V(CompactionPartiallyAbortedPageIntraAbortedPointers) \
+ V(CompactionPartiallyAbortedPageWithInvalidatedSlots) \
V(CompactionPartiallyAbortedPageWithStoreBufferEntries) \
V(CompactionSpaceDivideMultiplePages) \
V(CompactionSpaceDivideSinglePage) \
@@ -102,6 +103,7 @@ class HeapTester {
// test-heap.cc
static AllocationResult AllocateByteArrayForTest(Heap* heap, int length,
AllocationType allocation);
+ static bool CodeEnsureLinearAllocationArea(Heap* heap, int size_in_bytes);
// test-mark-compact.cc
static AllocationResult AllocateMapForTest(v8::internal::Isolate* isolate);
diff --git a/deps/v8/test/cctest/heap/heap-utils.cc b/deps/v8/test/cctest/heap/heap-utils.cc
index 8b53dab9c5..3fa2714a61 100644
--- a/deps/v8/test/cctest/heap/heap-utils.cc
+++ b/deps/v8/test/cctest/heap/heap-utils.cc
@@ -98,11 +98,15 @@ std::vector<Handle<FixedArray>> CreatePadding(Heap* heap, int padding_size,
allocate_memory = free_memory;
length = FixedArrayLenFromSize(allocate_memory);
if (length <= 0) {
- // Not enough room to create another fixed array. Let's create a filler.
- if (free_memory > (2 * kTaggedSize)) {
+ // Not enough room to create another FixedArray, so create a filler.
+ if (allocation == i::AllocationType::kOld) {
heap->CreateFillerObjectAt(
*heap->old_space()->allocation_top_address(), free_memory,
ClearRecordedSlots::kNo);
+ } else {
+ heap->CreateFillerObjectAt(
+ *heap->new_space()->allocation_top_address(), free_memory,
+ ClearRecordedSlots::kNo);
}
break;
}
@@ -127,8 +131,9 @@ void AllocateAllButNBytes(v8::internal::NewSpace* space, int extra_bytes,
if (new_linear_size == 0) return;
std::vector<Handle<FixedArray>> handles = heap::CreatePadding(
space->heap(), new_linear_size, i::AllocationType::kYoung);
- if (out_handles != nullptr)
+ if (out_handles != nullptr) {
out_handles->insert(out_handles->end(), handles.begin(), handles.end());
+ }
}
void FillCurrentPage(v8::internal::NewSpace* space,
@@ -144,8 +149,9 @@ bool FillUpOnePage(v8::internal::NewSpace* space,
if (space_remaining == 0) return false;
std::vector<Handle<FixedArray>> handles = heap::CreatePadding(
space->heap(), space_remaining, i::AllocationType::kYoung);
- if (out_handles != nullptr)
+ if (out_handles != nullptr) {
out_handles->insert(out_handles->end(), handles.begin(), handles.end());
+ }
return true;
}
diff --git a/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc b/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc
index b4122c9619..66354cab7f 100644
--- a/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc
+++ b/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc
@@ -193,8 +193,8 @@ TEST(ArrayBuffer_UnregisterDuringSweep) {
// barriers and proper synchronization this will trigger a data race on
// TSAN.
v8::ArrayBuffer::Contents contents = ab->Externalize();
- heap->isolate()->array_buffer_allocator()->Free(contents.Data(),
- contents.ByteLength());
+ contents.Deleter()(contents.Data(), contents.ByteLength(),
+ contents.DeleterData());
}
}
diff --git a/deps/v8/test/cctest/heap/test-compaction.cc b/deps/v8/test/cctest/heap/test-compaction.cc
index 35bd9225ea..96eca0f5ae 100644
--- a/deps/v8/test/cctest/heap/test-compaction.cc
+++ b/deps/v8/test/cctest/heap/test-compaction.cc
@@ -6,6 +6,7 @@
#include "src/heap/factory.h"
#include "src/heap/heap-inl.h"
#include "src/heap/mark-compact.h"
+#include "src/heap/remembered-set.h"
#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-tester.h"
@@ -31,9 +32,8 @@ void CheckInvariantsOfAbortedPage(Page* page) {
CHECK(!page->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
}
-void CheckAllObjectsOnPage(
- std::vector<Handle<FixedArray>>& handles, // NOLINT(runtime/references)
- Page* page) {
+void CheckAllObjectsOnPage(const std::vector<Handle<FixedArray>>& handles,
+ Page* page) {
for (Handle<FixedArray> fixed_array : handles) {
CHECK(Page::FromHeapObject(*fixed_array) == page);
}
@@ -85,6 +85,18 @@ HEAP_TEST(CompactionFullAbortedPage) {
}
}
+namespace {
+
+int GetObjectSize(int objects_per_page) {
+ int allocatable =
+ static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage());
+ // Make sure that object_size is a multiple of kTaggedSize.
+ int object_size =
+ ((allocatable / kTaggedSize) / objects_per_page) * kTaggedSize;
+ return Min(kMaxRegularHeapObjectSize, object_size);
+}
+
+} // namespace
HEAP_TEST(CompactionPartiallyAbortedPage) {
if (FLAG_never_compact) return;
@@ -97,10 +109,7 @@ HEAP_TEST(CompactionPartiallyAbortedPage) {
FLAG_manual_evacuation_candidates_selection = true;
const int objects_per_page = 10;
- const int object_size =
- Min(kMaxRegularHeapObjectSize,
- static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()) /
- objects_per_page);
+ const int object_size = GetObjectSize(objects_per_page);
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
@@ -163,6 +172,81 @@ HEAP_TEST(CompactionPartiallyAbortedPage) {
}
}
+HEAP_TEST(CompactionPartiallyAbortedPageWithInvalidatedSlots) {
+ if (FLAG_never_compact) return;
+ // Test evacuating a page partially when it contains recorded
+ // slots and invalidated objects.
+
+ // Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
+ // we can reach the state of a half aborted page.
+ ManualGCScope manual_gc_scope;
+ FLAG_manual_evacuation_candidates_selection = true;
+
+ const int objects_per_page = 10;
+ const int object_size = GetObjectSize(objects_per_page);
+
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = isolate->heap();
+ {
+ HandleScope scope1(isolate);
+
+ heap::SealCurrentObjects(heap);
+
+ {
+ HandleScope scope2(isolate);
+ // Fill another page with objects of size {object_size} (last one is
+ // properly adjusted).
+ CHECK(heap->old_space()->Expand());
+ auto compaction_page_handles = heap::CreatePadding(
+ heap,
+ static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()),
+ AllocationType::kOld, object_size);
+ Page* to_be_aborted_page =
+ Page::FromHeapObject(*compaction_page_handles.front());
+ for (Handle<FixedArray> object : compaction_page_handles) {
+ CHECK_EQ(Page::FromHeapObject(*object), to_be_aborted_page);
+
+ for (int i = 0; i < object->length(); i++) {
+ RememberedSet<OLD_TO_NEW>::Insert<AccessMode::ATOMIC>(
+ to_be_aborted_page, object->RawFieldOfElementAt(i).address());
+ }
+ }
+ // First object is going to be evacuated.
+ to_be_aborted_page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(
+ *compaction_page_handles.front());
+ // Last object is NOT going to be evacuated.
+ // This happens since not all objects fit on the only other page in the
+ // old space, the GC isn't allowed to allocate another page.
+ to_be_aborted_page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(
+ *compaction_page_handles.back());
+ to_be_aborted_page->SetFlag(
+ MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
+
+ {
+ // Add another page that is filled with {num_objects} objects of size
+ // {object_size}.
+ HandleScope scope3(isolate);
+ CHECK(heap->old_space()->Expand());
+ const int num_objects = 3;
+ std::vector<Handle<FixedArray>> page_to_fill_handles =
+ heap::CreatePadding(heap, object_size * num_objects,
+ AllocationType::kOld, object_size);
+ Page* page_to_fill =
+ Page::FromAddress(page_to_fill_handles.front()->address());
+
+ heap->set_force_oom(true);
+ CcTest::CollectAllGarbage();
+ heap->mark_compact_collector()->EnsureSweepingCompleted();
+
+ CHECK_EQ(Page::FromHeapObject(*compaction_page_handles.front()),
+ page_to_fill);
+ CHECK_EQ(Page::FromHeapObject(*compaction_page_handles.back()),
+ to_be_aborted_page);
+ }
+ }
+ }
+}
HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
if (FLAG_never_compact) return;
@@ -177,10 +261,7 @@ HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
FLAG_manual_evacuation_candidates_selection = true;
const int objects_per_page = 10;
- const int object_size =
- Min(kMaxRegularHeapObjectSize,
- static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()) /
- objects_per_page);
+ const int object_size = GetObjectSize(objects_per_page);
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
@@ -271,10 +352,7 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) {
FLAG_manual_evacuation_candidates_selection = true;
const int objects_per_page = 10;
- const int object_size =
- Min(kMaxRegularHeapObjectSize,
- static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()) /
- objects_per_page);
+ const int object_size = GetObjectSize(objects_per_page);
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
diff --git a/deps/v8/test/cctest/heap/test-embedder-tracing.cc b/deps/v8/test/cctest/heap/test-embedder-tracing.cc
index 28553266ff..8ff4acc05b 100644
--- a/deps/v8/test/cctest/heap/test-embedder-tracing.cc
+++ b/deps/v8/test/cctest/heap/test-embedder-tracing.cc
@@ -17,12 +17,6 @@
namespace v8 {
-// See test below: TracedGlobalNoDestructor.
-template <>
-struct TracedGlobalTrait<v8::TracedGlobal<v8::Value>> {
- static constexpr bool kRequiresExplicitDestruction = false;
-};
-
namespace internal {
namespace heap {
@@ -68,7 +62,7 @@ class TestEmbedderHeapTracer final : public v8::EmbedderHeapTracer {
bool AdvanceTracing(double deadline_in_ms) final {
for (auto global : to_register_with_v8_) {
- RegisterEmbedderReference(global->As<v8::Value>());
+ RegisterEmbedderReference(global->As<v8::Data>());
}
to_register_with_v8_.clear();
return true;
@@ -293,13 +287,14 @@ void ConstructJSObject(v8::Isolate* isolate, v8::Local<v8::Context> context,
CHECK(!global->IsEmpty());
}
+template <typename T>
void ConstructJSApiObject(v8::Isolate* isolate, v8::Local<v8::Context> context,
- v8::TracedGlobal<v8::Object>* global) {
+ T* global) {
v8::HandleScope scope(isolate);
v8::Local<v8::Object> object(
ConstructTraceableJSApiObject(context, nullptr, nullptr));
CHECK(!object.IsEmpty());
- *global = v8::TracedGlobal<v8::Object>(isolate, object);
+ *global = T(isolate, object);
CHECK(!global->IsEmpty());
}
@@ -360,10 +355,6 @@ TEST(TracedGlobalCopyWithDestructor) {
v8::HandleScope scope(isolate);
i::GlobalHandles* global_handles = CcTest::i_isolate()->global_handles();
- static_assert(TracedGlobalTrait<
- v8::TracedGlobal<v8::Object>>::kRequiresExplicitDestruction,
- "destructor expected");
-
const size_t initial_count = global_handles->handles_count();
v8::TracedGlobal<v8::Object> global1;
{
@@ -401,18 +392,14 @@ TEST(TracedGlobalCopyNoDestructor) {
v8::HandleScope scope(isolate);
i::GlobalHandles* global_handles = CcTest::i_isolate()->global_handles();
- static_assert(!TracedGlobalTrait<
- v8::TracedGlobal<v8::Value>>::kRequiresExplicitDestruction,
- "no destructor expected");
-
const size_t initial_count = global_handles->handles_count();
- v8::TracedGlobal<v8::Value> global1;
+ v8::TracedReference<v8::Value> global1;
{
v8::HandleScope scope(isolate);
global1.Reset(isolate, v8::Object::New(isolate));
}
- v8::TracedGlobal<v8::Value> global2(global1);
- v8::TracedGlobal<v8::Value> global3;
+ v8::TracedReference<v8::Value> global2(global1);
+ v8::TracedReference<v8::Value> global3;
global3 = global2;
CHECK_EQ(initial_count + 3, global_handles->handles_count());
CHECK(!global1.IsEmpty());
@@ -500,7 +487,7 @@ TEST(TracedGlobalToUnmodifiedJSApiObjectSurvivesScavengePerDefault) {
heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
tracer.ConsiderTracedGlobalAsRoot(true);
TracedGlobalTest(
- CcTest::isolate(), ConstructJSApiObject,
+ CcTest::isolate(), ConstructJSApiObject<TracedGlobal<v8::Object>>,
[](const TracedGlobal<v8::Object>& global) {}, InvokeScavenge,
SurvivalMode::kSurvives);
}
@@ -513,7 +500,7 @@ TEST(TracedGlobalToUnmodifiedJSApiObjectDiesOnScavengeWhenExcludedFromRoots) {
heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
tracer.ConsiderTracedGlobalAsRoot(false);
TracedGlobalTest(
- CcTest::isolate(), ConstructJSApiObject,
+ CcTest::isolate(), ConstructJSApiObject<TracedGlobal<v8::Object>>,
[](const TracedGlobal<v8::Object>& global) {}, InvokeScavenge,
SurvivalMode::kDies);
}
@@ -671,9 +658,6 @@ TEST(TracedGlobalWithDestructor) {
CHECK(!traced->IsEmpty());
CHECK_EQ(initial_count + 1, global_handles->handles_count());
}
- static_assert(TracedGlobalTrait<
- v8::TracedGlobal<v8::Object>>::kRequiresExplicitDestruction,
- "destructor expected");
delete traced;
CHECK_EQ(initial_count, global_handles->handles_count());
// GC should not need to clear the handle.
@@ -691,21 +675,18 @@ TEST(TracedGlobalNoDestructor) {
i::GlobalHandles* global_handles = CcTest::i_isolate()->global_handles();
const size_t initial_count = global_handles->handles_count();
- char* memory = new char[sizeof(v8::TracedGlobal<v8::Value>)];
- auto* traced = new (memory) v8::TracedGlobal<v8::Value>();
+ char* memory = new char[sizeof(v8::TracedReference<v8::Value>)];
+ auto* traced = new (memory) v8::TracedReference<v8::Value>();
{
v8::HandleScope scope(isolate);
v8::Local<v8::Value> object(ConstructTraceableJSApiObject(
isolate->GetCurrentContext(), nullptr, nullptr));
CHECK(traced->IsEmpty());
- *traced = v8::TracedGlobal<v8::Value>(isolate, object);
+ *traced = v8::TracedReference<v8::Value>(isolate, object);
CHECK(!traced->IsEmpty());
CHECK_EQ(initial_count + 1, global_handles->handles_count());
}
- static_assert(!TracedGlobalTrait<
- v8::TracedGlobal<v8::Value>>::kRequiresExplicitDestruction,
- "no destructor expected");
- traced->~TracedGlobal<v8::Value>();
+ traced->~TracedReference<v8::Value>();
CHECK_EQ(initial_count + 1, global_handles->handles_count());
// GC should clear the handle.
heap::InvokeMarkSweep();
@@ -759,18 +740,19 @@ class EmbedderHeapTracerNoDestructorNonTracingClearing final
uint16_t class_id_to_optimize)
: class_id_to_optimize_(class_id_to_optimize) {}
- bool IsRootForNonTracingGC(const v8::TracedGlobal<v8::Value>& handle) final {
+ bool IsRootForNonTracingGC(
+ const v8::TracedReference<v8::Value>& handle) final {
return handle.WrapperClassId() != class_id_to_optimize_;
}
void ResetHandleInNonTracingGC(
- const v8::TracedGlobal<v8::Value>& handle) final {
+ const v8::TracedReference<v8::Value>& handle) final {
if (handle.WrapperClassId() != class_id_to_optimize_) return;
// Convention (for test): Objects that are optimized have their first field
// set as a back pointer.
- TracedGlobal<v8::Value>* original_handle =
- reinterpret_cast<TracedGlobal<v8::Value>*>(
+ TracedReferenceBase<v8::Value>* original_handle =
+ reinterpret_cast<TracedReferenceBase<v8::Value>*>(
v8::Object::GetAlignedPointerFromInternalField(
handle.As<v8::Object>(), 0));
original_handle->Reset();
@@ -781,23 +763,23 @@ class EmbedderHeapTracerNoDestructorNonTracingClearing final
};
template <typename T>
-void SetupOptimizedAndNonOptimizedHandle(
- v8::Isolate* isolate, uint16_t optimized_class_id,
- v8::TracedGlobal<T>* optimized_handle,
- v8::TracedGlobal<T>* non_optimized_handle) {
+void SetupOptimizedAndNonOptimizedHandle(v8::Isolate* isolate,
+ uint16_t optimized_class_id,
+ T* optimized_handle,
+ T* non_optimized_handle) {
v8::HandleScope scope(isolate);
v8::Local<v8::Object> optimized_object(ConstructTraceableJSApiObject(
isolate->GetCurrentContext(), optimized_handle, nullptr));
CHECK(optimized_handle->IsEmpty());
- *optimized_handle = v8::TracedGlobal<T>(isolate, optimized_object);
+ *optimized_handle = T(isolate, optimized_object);
CHECK(!optimized_handle->IsEmpty());
optimized_handle->SetWrapperClassId(optimized_class_id);
v8::Local<v8::Object> non_optimized_object(ConstructTraceableJSApiObject(
isolate->GetCurrentContext(), nullptr, nullptr));
CHECK(non_optimized_handle->IsEmpty());
- *non_optimized_handle = v8::TracedGlobal<T>(isolate, non_optimized_object);
+ *non_optimized_handle = T(isolate, non_optimized_object);
CHECK(!non_optimized_handle->IsEmpty());
}
@@ -813,9 +795,6 @@ TEST(TracedGlobalDestructorReclaimedOnScavenge) {
heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
i::GlobalHandles* global_handles = CcTest::i_isolate()->global_handles();
- static_assert(TracedGlobalTrait<
- v8::TracedGlobal<v8::Object>>::kRequiresExplicitDestruction,
- "destructor expected");
const size_t initial_count = global_handles->handles_count();
auto* optimized_handle = new v8::TracedGlobal<v8::Object>();
auto* non_optimized_handle = new v8::TracedGlobal<v8::Object>();
@@ -841,12 +820,9 @@ TEST(TracedGlobalNoDestructorReclaimedOnScavenge) {
heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
i::GlobalHandles* global_handles = CcTest::i_isolate()->global_handles();
- static_assert(!TracedGlobalTrait<
- v8::TracedGlobal<v8::Value>>::kRequiresExplicitDestruction,
- "no destructor expected");
const size_t initial_count = global_handles->handles_count();
- auto* optimized_handle = new v8::TracedGlobal<v8::Value>();
- auto* non_optimized_handle = new v8::TracedGlobal<v8::Value>();
+ auto* optimized_handle = new v8::TracedReference<v8::Value>();
+ auto* non_optimized_handle = new v8::TracedReference<v8::Value>();
SetupOptimizedAndNonOptimizedHandle(isolate, kClassIdToOptimize,
optimized_handle, non_optimized_handle);
CHECK_EQ(initial_count + 2, global_handles->handles_count());
diff --git a/deps/v8/test/cctest/heap/test-heap.cc b/deps/v8/test/cctest/heap/test-heap.cc
index fd17c0f063..03f98c6453 100644
--- a/deps/v8/test/cctest/heap/test-heap.cc
+++ b/deps/v8/test/cctest/heap/test-heap.cc
@@ -1791,7 +1791,7 @@ TEST(HeapNumberAlignment) {
AlignOldSpace(required_alignment, offset);
Handle<Object> number_old =
- factory->NewNumber(1.000321, AllocationType::kOld);
+ factory->NewNumber<AllocationType::kOld>(1.000321);
CHECK(number_old->IsHeapNumber());
CHECK(heap->InOldSpace(*number_old));
CHECK_EQ(0, Heap::GetFillToAlign(HeapObject::cast(*number_old).address(),
@@ -3663,9 +3663,58 @@ TEST(DeferredHandles) {
DeferredHandleScope deferred(isolate);
DummyVisitor visitor;
isolate->handle_scope_implementer()->Iterate(&visitor);
- delete deferred.Detach();
+ deferred.Detach();
}
+static void TestFillersFromDeferredHandles(bool promote) {
+ // We assume that the fillers can only arise when left-trimming arrays.
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = isolate->heap();
+ v8::HandleScope scope(reinterpret_cast<v8::Isolate*>(isolate));
+
+ const size_t n = 10;
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(n);
+
+ if (promote) {
+ // Age the array so it's ready for promotion on next GC.
+ CcTest::CollectGarbage(NEW_SPACE);
+ }
+ CHECK(Heap::InYoungGeneration(*array));
+
+ DeferredHandleScope deferred_scope(isolate);
+
+ // Trim the array three times to different sizes so all kinds of fillers are
+ // created and tracked by the deferred handles.
+ Handle<FixedArrayBase> filler_1 = Handle<FixedArrayBase>(*array, isolate);
+ Handle<FixedArrayBase> filler_2 =
+ Handle<FixedArrayBase>(heap->LeftTrimFixedArray(*filler_1, 1), isolate);
+ Handle<FixedArrayBase> filler_3 =
+ Handle<FixedArrayBase>(heap->LeftTrimFixedArray(*filler_2, 2), isolate);
+ Handle<FixedArrayBase> tail =
+ Handle<FixedArrayBase>(heap->LeftTrimFixedArray(*filler_3, 3), isolate);
+
+ std::unique_ptr<DeferredHandles> deferred_handles(deferred_scope.Detach());
+
+ // GC should retain the trimmed array but drop all of the three fillers.
+ CcTest::CollectGarbage(NEW_SPACE);
+ if (promote) {
+ CHECK(heap->InOldSpace(*tail));
+ } else {
+ CHECK(Heap::InYoungGeneration(*tail));
+ }
+ CHECK_EQ(n - 6, (*tail).length());
+ CHECK(!filler_1->IsHeapObject());
+ CHECK(!filler_2->IsHeapObject());
+ CHECK(!filler_3->IsHeapObject());
+}
+
+TEST(DoNotEvacuateFillersFromDeferredHandles) {
+ TestFillersFromDeferredHandles(false /*promote*/);
+}
+
+TEST(DoNotPromoteFillersFromDeferredHandles) {
+ TestFillersFromDeferredHandles(true /*promote*/);
+}
TEST(IncrementalMarkingStepMakesBigProgressWithLargeObjects) {
if (!FLAG_incremental_marking) return;
@@ -5269,34 +5318,6 @@ TEST(ScriptIterator) {
CHECK_EQ(0, script_count);
}
-
-TEST(SharedFunctionInfoIterator) {
- CcTest::InitializeVM();
- v8::HandleScope scope(CcTest::isolate());
- Isolate* isolate = CcTest::i_isolate();
- Heap* heap = CcTest::heap();
- LocalContext context;
-
- CcTest::CollectAllGarbage();
- CcTest::CollectAllGarbage();
-
- int sfi_count = 0;
- {
- HeapObjectIterator it(heap);
- for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
- if (!obj.IsSharedFunctionInfo()) continue;
- sfi_count++;
- }
- }
-
- {
- SharedFunctionInfo::GlobalIterator iterator(isolate);
- while (!iterator.Next().is_null()) sfi_count--;
- }
-
- CHECK_EQ(0, sfi_count);
-}
-
// This is the same as Factory::NewByteArray, except it doesn't retry on
// allocation failure.
AllocationResult HeapTester::AllocateByteArrayForTest(
@@ -5316,6 +5337,11 @@ AllocationResult HeapTester::AllocateByteArrayForTest(
return result;
}
+bool HeapTester::CodeEnsureLinearAllocationArea(Heap* heap, int size_in_bytes) {
+ return heap->code_space()->EnsureLinearAllocationArea(
+ size_in_bytes, AllocationOrigin::kRuntime);
+}
+
HEAP_TEST(Regress587004) {
ManualGCScope manual_gc_scope;
#ifdef VERIFY_HEAP
@@ -5995,6 +6021,173 @@ TEST(UncommitUnusedLargeObjectMemory) {
CHECK_EQ(shrinked_size, chunk->CommittedPhysicalMemory());
}
+template <RememberedSetType direction>
+static size_t GetRememberedSetSize(HeapObject obj) {
+ size_t count = 0;
+ auto chunk = MemoryChunk::FromHeapObject(obj);
+ RememberedSet<direction>::Iterate(
+ chunk,
+ [&count](MaybeObjectSlot slot) {
+ count++;
+ return KEEP_SLOT;
+ },
+ SlotSet::KEEP_EMPTY_BUCKETS);
+ return count;
+}
+
+TEST(RememberedSet_InsertOnWriteBarrier) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ Heap* heap = isolate->heap();
+ heap::SealCurrentObjects(heap);
+ HandleScope scope(isolate);
+
+ // Allocate an object in old space.
+ Handle<FixedArray> arr = factory->NewFixedArray(3, AllocationType::kOld);
+
+ // Add into 'arr' references to young objects.
+ {
+ HandleScope scope_inner(isolate);
+ Handle<Object> number = factory->NewHeapNumber(42);
+ arr->set(0, *number);
+ arr->set(1, *number);
+ arr->set(2, *number);
+ Handle<Object> number_other = factory->NewHeapNumber(24);
+ arr->set(2, *number_other);
+ }
+ // Remembered sets track *slots* pages with cross-generational pointers, so
+ // must have recorded three of them each exactly once.
+ CHECK_EQ(3, GetRememberedSetSize<OLD_TO_NEW>(*arr));
+}
+
+TEST(RememberedSet_InsertInLargePage) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ Heap* heap = isolate->heap();
+ heap::SealCurrentObjects(heap);
+ HandleScope scope(isolate);
+
+ // Allocate an object in Large space.
+ const int count = Max(FixedArray::kMaxRegularLength + 1, 128 * KB);
+ Handle<FixedArray> arr = factory->NewFixedArray(count, AllocationType::kOld);
+ CHECK(heap->lo_space()->Contains(*arr));
+ CHECK_EQ(0, GetRememberedSetSize<OLD_TO_NEW>(*arr));
+
+ // Create OLD_TO_NEW references from the large object so that the
+ // corresponding slots end up in different SlotSets.
+ {
+ HandleScope short_lived(isolate);
+ Handle<Object> number = factory->NewHeapNumber(42);
+ arr->set(0, *number);
+ arr->set(count - 1, *number);
+ }
+ CHECK_EQ(2, GetRememberedSetSize<OLD_TO_NEW>(*arr));
+}
+
+TEST(RememberedSet_InsertOnPromotingObjectToOld) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ Heap* heap = isolate->heap();
+ heap::SealCurrentObjects(heap);
+ HandleScope scope(isolate);
+
+ // Create a young object and age it one generation inside the new space.
+ Handle<FixedArray> arr = factory->NewFixedArray(1);
+ CcTest::CollectGarbage(i::NEW_SPACE);
+ CHECK(Heap::InYoungGeneration(*arr));
+
+ // Add into 'arr' a reference to an object one generation younger.
+ {
+ HandleScope scope_inner(isolate);
+ Handle<Object> number = factory->NewHeapNumber(42);
+ arr->set(0, *number);
+ }
+
+ // Promote 'arr' into old, its element is still in new, the old to new
+ // refs are inserted into the remembered sets during GC.
+ CcTest::CollectGarbage(i::NEW_SPACE);
+
+ CHECK(heap->InOldSpace(*arr));
+ CHECK_EQ(1, GetRememberedSetSize<OLD_TO_NEW>(*arr));
+}
+
+TEST(RememberedSet_RemoveStaleOnScavenge) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ Heap* heap = isolate->heap();
+ heap::SealCurrentObjects(heap);
+ HandleScope scope(isolate);
+
+ // Allocate an object in old space and add into it references to young.
+ Handle<FixedArray> arr = factory->NewFixedArray(3, AllocationType::kOld);
+ {
+ HandleScope scope_inner(isolate);
+ Handle<Object> number = factory->NewHeapNumber(42);
+ arr->set(0, *number); // will be trimmed away
+ arr->set(1, *number); // will be replaced with #undefined
+ arr->set(2, *number); // will be promoted into old
+ }
+ CHECK_EQ(3, GetRememberedSetSize<OLD_TO_NEW>(*arr));
+
+ // Run scavenger once so the young object becomes ready for promotion on the
+ // next pass.
+ CcTest::CollectGarbage(i::NEW_SPACE);
+ arr->set(1, ReadOnlyRoots(CcTest::heap()).undefined_value());
+ Handle<FixedArrayBase> tail =
+ Handle<FixedArrayBase>(heap->LeftTrimFixedArray(*arr, 1), isolate);
+
+ // None of the actions above should have updated the remembered set.
+ CHECK_EQ(3, GetRememberedSetSize<OLD_TO_NEW>(*tail));
+
+ // Run GC to promote the remaining young object and fixup the stale entries in
+ // the remembered set.
+ CcTest::CollectGarbage(i::NEW_SPACE);
+ CHECK_EQ(0, GetRememberedSetSize<OLD_TO_NEW>(*tail));
+}
+
+// The OLD_TO_OLD remembered set is created temporary by GC and is cleared at
+// the end of the pass. There is no way to observe it so the test only checks
+// that compaction has happened and otherwise relies on code's self-validation.
+TEST(RememberedSet_OldToOld) {
+ if (FLAG_stress_incremental_marking) return;
+
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ Heap* heap = isolate->heap();
+ heap::SealCurrentObjects(heap);
+ HandleScope scope(isolate);
+
+ Handle<FixedArray> arr = factory->NewFixedArray(10, AllocationType::kOld);
+ {
+ HandleScope short_lived(isolate);
+ factory->NewFixedArray(100, AllocationType::kOld);
+ }
+ Handle<Object> ref = factory->NewFixedArray(100, AllocationType::kOld);
+ arr->set(0, *ref);
+
+ // To force compaction of the old space, fill it with garbage and start a new
+ // page (so that the page with 'arr' becomes subject to compaction).
+ {
+ HandleScope short_lived(isolate);
+ heap::SimulateFullSpace(heap->old_space());
+ factory->NewFixedArray(100, AllocationType::kOld);
+ }
+
+ FLAG_manual_evacuation_candidates_selection = true;
+ heap::ForceEvacuationCandidate(Page::FromHeapObject(*arr));
+ const auto prev_location = *arr;
+
+ // This GC pass will evacuate the page with 'arr'/'ref' so it will have to
+ // create OLD_TO_OLD remembered set to track the reference.
+ CcTest::CollectAllGarbage();
+ CHECK_NE(prev_location, *arr);
+}
+
TEST(RememberedSetRemoveRange) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
@@ -6016,59 +6209,64 @@ TEST(RememberedSetRemoveRange) {
slots[chunk->area_end() - kTaggedSize] = true;
for (auto x : slots) {
- RememberedSet<OLD_TO_NEW>::Insert(chunk, x.first);
+ RememberedSet<OLD_TO_NEW>::Insert<AccessMode::ATOMIC>(chunk, x.first);
}
- RememberedSet<OLD_TO_NEW>::Iterate(chunk,
- [&slots](MaybeObjectSlot slot) {
- CHECK(slots[slot.address()]);
- return KEEP_SLOT;
- },
- SlotSet::PREFREE_EMPTY_BUCKETS);
+ RememberedSet<OLD_TO_NEW>::Iterate(
+ chunk,
+ [&slots](MaybeObjectSlot slot) {
+ CHECK(slots[slot.address()]);
+ return KEEP_SLOT;
+ },
+ SlotSet::FREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_NEW>::RemoveRange(chunk, start, start + kTaggedSize,
SlotSet::FREE_EMPTY_BUCKETS);
slots[start] = false;
- RememberedSet<OLD_TO_NEW>::Iterate(chunk,
- [&slots](MaybeObjectSlot slot) {
- CHECK(slots[slot.address()]);
- return KEEP_SLOT;
- },
- SlotSet::PREFREE_EMPTY_BUCKETS);
+ RememberedSet<OLD_TO_NEW>::Iterate(
+ chunk,
+ [&slots](MaybeObjectSlot slot) {
+ CHECK(slots[slot.address()]);
+ return KEEP_SLOT;
+ },
+ SlotSet::FREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_NEW>::RemoveRange(chunk, start + kTaggedSize,
start + Page::kPageSize,
SlotSet::FREE_EMPTY_BUCKETS);
slots[start + kTaggedSize] = false;
slots[start + Page::kPageSize - kTaggedSize] = false;
- RememberedSet<OLD_TO_NEW>::Iterate(chunk,
- [&slots](MaybeObjectSlot slot) {
- CHECK(slots[slot.address()]);
- return KEEP_SLOT;
- },
- SlotSet::PREFREE_EMPTY_BUCKETS);
+ RememberedSet<OLD_TO_NEW>::Iterate(
+ chunk,
+ [&slots](MaybeObjectSlot slot) {
+ CHECK(slots[slot.address()]);
+ return KEEP_SLOT;
+ },
+ SlotSet::FREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_NEW>::RemoveRange(chunk, start,
start + Page::kPageSize + kTaggedSize,
SlotSet::FREE_EMPTY_BUCKETS);
slots[start + Page::kPageSize] = false;
- RememberedSet<OLD_TO_NEW>::Iterate(chunk,
- [&slots](MaybeObjectSlot slot) {
- CHECK(slots[slot.address()]);
- return KEEP_SLOT;
- },
- SlotSet::PREFREE_EMPTY_BUCKETS);
+ RememberedSet<OLD_TO_NEW>::Iterate(
+ chunk,
+ [&slots](MaybeObjectSlot slot) {
+ CHECK(slots[slot.address()]);
+ return KEEP_SLOT;
+ },
+ SlotSet::FREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_NEW>::RemoveRange(chunk, chunk->area_end() - kTaggedSize,
chunk->area_end(),
SlotSet::FREE_EMPTY_BUCKETS);
slots[chunk->area_end() - kTaggedSize] = false;
- RememberedSet<OLD_TO_NEW>::Iterate(chunk,
- [&slots](MaybeObjectSlot slot) {
- CHECK(slots[slot.address()]);
- return KEEP_SLOT;
- },
- SlotSet::PREFREE_EMPTY_BUCKETS);
+ RememberedSet<OLD_TO_NEW>::Iterate(
+ chunk,
+ [&slots](MaybeObjectSlot slot) {
+ CHECK(slots[slot.address()]);
+ return KEEP_SLOT;
+ },
+ SlotSet::FREE_EMPTY_BUCKETS);
}
HEAP_TEST(Regress670675) {
@@ -6164,53 +6362,6 @@ HEAP_TEST(Regress5831) {
CHECK(chunk->NeverEvacuate());
}
-TEST(Regress6800) {
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- HandleScope handle_scope(isolate);
-
- const int kRootLength = 1000;
- Handle<FixedArray> root =
- isolate->factory()->NewFixedArray(kRootLength, AllocationType::kOld);
- {
- HandleScope inner_scope(isolate);
- Handle<FixedArray> new_space_array = isolate->factory()->NewFixedArray(1);
- for (int i = 0; i < kRootLength; i++) {
- root->set(i, *new_space_array);
- }
- for (int i = 0; i < kRootLength; i++) {
- root->set(i, ReadOnlyRoots(CcTest::heap()).undefined_value());
- }
- }
- CcTest::CollectGarbage(NEW_SPACE);
- CHECK_EQ(0, RememberedSet<OLD_TO_NEW>::NumberOfPreFreedEmptyBuckets(
- MemoryChunk::FromHeapObject(*root)));
-}
-
-TEST(Regress6800LargeObject) {
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- HandleScope handle_scope(isolate);
-
- const int kRootLength = i::kMaxRegularHeapObjectSize / kTaggedSize;
- Handle<FixedArray> root =
- isolate->factory()->NewFixedArray(kRootLength, AllocationType::kOld);
- CcTest::heap()->lo_space()->Contains(*root);
- {
- HandleScope inner_scope(isolate);
- Handle<FixedArray> new_space_array = isolate->factory()->NewFixedArray(1);
- for (int i = 0; i < kRootLength; i++) {
- root->set(i, *new_space_array);
- }
- for (int i = 0; i < kRootLength; i++) {
- root->set(i, ReadOnlyRoots(CcTest::heap()).undefined_value());
- }
- }
- CcTest::CollectGarbage(OLD_SPACE);
- CHECK_EQ(0, RememberedSet<OLD_TO_NEW>::NumberOfPreFreedEmptyBuckets(
- MemoryChunk::FromHeapObject(*root)));
-}
-
HEAP_TEST(RegressMissingWriteBarrierInAllocate) {
if (!FLAG_incremental_marking) return;
ManualGCScope manual_gc_scope;
@@ -6631,6 +6782,19 @@ HEAP_TEST(MemoryReducerActivationForSmallHeaps) {
CHECK_EQ(heap->memory_reducer()->state_.action, MemoryReducer::Action::kWait);
}
+TEST(AllocateExternalBackingStore) {
+ ManualGCScope manual_gc_scope;
+ LocalContext env;
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = isolate->heap();
+ int initial_ms_count = heap->ms_count();
+ void* result =
+ heap->AllocateExternalBackingStore([](size_t) { return nullptr; }, 10);
+ CHECK_NULL(result);
+ // At least two GCs should happen.
+ CHECK_LE(2, heap->ms_count() - initial_ms_count);
+}
+
TEST(CodeObjectRegistry) {
// We turn off compaction to ensure that code is not moving.
FLAG_never_compact = true;
@@ -6642,11 +6806,13 @@ TEST(CodeObjectRegistry) {
HandleScope outer_scope(heap->isolate());
Address code2_address;
{
+ // Ensure that both code objects end up on the same page.
+ CHECK(HeapTester::CodeEnsureLinearAllocationArea(
+ heap, kMaxRegularHeapObjectSize));
code1 = DummyOptimizedCode(isolate);
Handle<Code> code2 = DummyOptimizedCode(isolate);
code2_address = code2->address();
- // If this check breaks, change the allocation to ensure that both code
- // objects are on the same page.
+
CHECK_EQ(MemoryChunk::FromHeapObject(*code1),
MemoryChunk::FromHeapObject(*code2));
CHECK(MemoryChunk::FromHeapObject(*code1)->Contains(code1->address()));
diff --git a/deps/v8/test/cctest/heap/test-invalidated-slots.cc b/deps/v8/test/cctest/heap/test-invalidated-slots.cc
index af42503f86..861c48d69d 100644
--- a/deps/v8/test/cctest/heap/test-invalidated-slots.cc
+++ b/deps/v8/test/cctest/heap/test-invalidated-slots.cc
@@ -70,8 +70,7 @@ HEAP_TEST(InvalidatedSlotsSomeInvalidatedRanges) {
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
// Register every second byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i += 2) {
- page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i],
- byte_arrays[i].Size());
+ page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i]);
}
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
for (size_t i = 0; i < byte_arrays.size(); i++) {
@@ -95,8 +94,7 @@ HEAP_TEST(InvalidatedSlotsAllInvalidatedRanges) {
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
// Register the all byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i++) {
- page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i],
- byte_arrays[i].Size());
+ page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i]);
}
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
for (size_t i = 0; i < byte_arrays.size(); i++) {
@@ -117,8 +115,7 @@ HEAP_TEST(InvalidatedSlotsAfterTrimming) {
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
// Register the all byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i++) {
- page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i],
- byte_arrays[i].Size());
+ page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i]);
}
// Trim byte arrays and check that the slots outside the byte arrays are
// considered invalid if the old space page was swept.
@@ -145,8 +142,7 @@ HEAP_TEST(InvalidatedSlotsEvacuationCandidate) {
// This should be no-op because the page is marked as evacuation
// candidate.
for (size_t i = 0; i < byte_arrays.size(); i++) {
- page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i],
- byte_arrays[i].Size());
+ page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i]);
}
// All slots must still be valid.
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
@@ -169,8 +165,7 @@ HEAP_TEST(InvalidatedSlotsResetObjectRegression) {
heap->RightTrimFixedArray(byte_arrays[0], byte_arrays[0].length() - 8);
// Register the all byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i++) {
- page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i],
- byte_arrays[i].Size());
+ page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i]);
}
// All slots must still be invalid.
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
@@ -359,8 +354,7 @@ HEAP_TEST(InvalidatedSlotsCleanupFull) {
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
// Register all byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i++) {
- page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(byte_arrays[i],
- byte_arrays[i].Size());
+ page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(byte_arrays[i]);
}
// Mark full page as free
@@ -379,8 +373,7 @@ HEAP_TEST(InvalidatedSlotsCleanupEachObject) {
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
// Register all byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i++) {
- page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(byte_arrays[i],
- byte_arrays[i].Size());
+ page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(byte_arrays[i]);
}
// Mark each object as free on page
@@ -405,11 +398,9 @@ HEAP_TEST(InvalidatedSlotsCleanupRightTrim) {
CHECK_GT(byte_arrays.size(), 1);
ByteArray& invalidated = byte_arrays[1];
- int invalidated_size = invalidated.Size();
heap->RightTrimFixedArray(invalidated, invalidated.length() - 8);
- page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(invalidated,
- invalidated_size);
+ page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(invalidated);
// Free memory at end of invalidated object
InvalidatedSlotsCleanup cleanup = InvalidatedSlotsCleanup::OldToNew(page);
@@ -418,8 +409,6 @@ HEAP_TEST(InvalidatedSlotsCleanupRightTrim) {
// After cleanup the invalidated object should be smaller
InvalidatedSlots* invalidated_slots = page->invalidated_slots<OLD_TO_NEW>();
- CHECK_GE((*invalidated_slots)[HeapObject::FromAddress(invalidated.address())],
- invalidated.Size());
CHECK_EQ(invalidated_slots->size(), 1);
}
diff --git a/deps/v8/test/cctest/heap/test-page-promotion.cc b/deps/v8/test/cctest/heap/test-page-promotion.cc
index df6211826e..c31fc39c2e 100644
--- a/deps/v8/test/cctest/heap/test-page-promotion.cc
+++ b/deps/v8/test/cctest/heap/test-page-promotion.cc
@@ -43,8 +43,7 @@ v8::Isolate* NewIsolateForPagePromotion(int min_semi_space_size = 8,
return isolate;
}
-Page* FindLastPageInNewSpace(
- std::vector<Handle<FixedArray>>& handles) { // NOLINT(runtime/references)
+Page* FindLastPageInNewSpace(const std::vector<Handle<FixedArray>>& handles) {
for (auto rit = handles.rbegin(); rit != handles.rend(); ++rit) {
// One deref gets the Handle, the second deref gets the FixedArray.
Page* candidate = Page::FromHeapObject(**rit);
@@ -146,8 +145,10 @@ UNINITIALIZED_TEST(PagePromotion_NewToNewJSArrayBuffer) {
heap::FillCurrentPage(heap->new_space());
// Allocate a buffer we would like to check against.
Handle<JSArrayBuffer> buffer =
- i_isolate->factory()->NewJSArrayBuffer(SharedFlag::kNotShared);
- CHECK(JSArrayBuffer::SetupAllocatingData(buffer, i_isolate, 100));
+ i_isolate->factory()
+ ->NewJSArrayBufferAndBackingStore(100,
+ InitializedFlag::kZeroInitialized)
+ .ToHandleChecked();
std::vector<Handle<FixedArray>> handles;
// Simulate a full space, filling the interesting page with live objects.
heap::SimulateFullSpace(heap->new_space(), &handles);
@@ -188,8 +189,10 @@ UNINITIALIZED_TEST(PagePromotion_NewToOldJSArrayBuffer) {
heap::FillCurrentPage(heap->new_space());
// Allocate a buffer we would like to check against.
Handle<JSArrayBuffer> buffer =
- i_isolate->factory()->NewJSArrayBuffer(SharedFlag::kNotShared);
- CHECK(JSArrayBuffer::SetupAllocatingData(buffer, i_isolate, 100));
+ i_isolate->factory()
+ ->NewJSArrayBufferAndBackingStore(100,
+ InitializedFlag::kZeroInitialized)
+ .ToHandleChecked();
std::vector<Handle<FixedArray>> handles;
// Simulate a full space, filling the interesting page with live objects.
heap::SimulateFullSpace(heap->new_space(), &handles);
diff --git a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
index 370c5d8131..a271df4f67 100644
--- a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
+++ b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
@@ -132,24 +132,24 @@ BytecodeExpectationsPrinter::GetBytecodeArrayOfCallee(
}
void BytecodeExpectationsPrinter::PrintEscapedString(
- std::ostream& stream, const std::string& string) const {
+ std::ostream* stream, const std::string& string) const {
for (char c : string) {
switch (c) {
case '"':
- stream << "\\\"";
+ *stream << "\\\"";
break;
case '\\':
- stream << "\\\\";
+ *stream << "\\\\";
break;
default:
- stream << c;
+ *stream << c;
break;
}
}
}
void BytecodeExpectationsPrinter::PrintBytecodeOperand(
- std::ostream& stream, const BytecodeArrayIterator& bytecode_iterator,
+ std::ostream* stream, const BytecodeArrayIterator& bytecode_iterator,
const Bytecode& bytecode, int op_index, int parameter_count) const {
OperandType op_type = Bytecodes::GetOperandType(bytecode, op_index);
OperandSize op_size = Bytecodes::GetOperandSize(
@@ -172,207 +172,207 @@ void BytecodeExpectationsPrinter::PrintBytecodeOperand(
if (Bytecodes::IsRegisterOperandType(op_type)) {
Register register_value = bytecode_iterator.GetRegisterOperand(op_index);
- stream << 'R';
- if (op_size != OperandSize::kByte) stream << size_tag;
+ *stream << 'R';
+ if (op_size != OperandSize::kByte) *stream << size_tag;
if (register_value.is_current_context()) {
- stream << "(context)";
+ *stream << "(context)";
} else if (register_value.is_function_closure()) {
- stream << "(closure)";
+ *stream << "(closure)";
} else if (register_value.is_parameter()) {
int parameter_index = register_value.ToParameterIndex(parameter_count);
if (parameter_index == 0) {
- stream << "(this)";
+ *stream << "(this)";
} else {
- stream << "(arg" << (parameter_index - 1) << ')';
+ *stream << "(arg" << (parameter_index - 1) << ')';
}
} else {
- stream << '(' << register_value.index() << ')';
+ *stream << '(' << register_value.index() << ')';
}
} else {
switch (op_type) {
case OperandType::kFlag8:
- stream << 'U' << size_tag << '(';
- stream << bytecode_iterator.GetFlagOperand(op_index);
+ *stream << 'U' << size_tag << '(';
+ *stream << bytecode_iterator.GetFlagOperand(op_index);
break;
case OperandType::kIdx: {
- stream << 'U' << size_tag << '(';
- stream << bytecode_iterator.GetIndexOperand(op_index);
+ *stream << 'U' << size_tag << '(';
+ *stream << bytecode_iterator.GetIndexOperand(op_index);
break;
}
case OperandType::kUImm:
- stream << 'U' << size_tag << '(';
- stream << bytecode_iterator.GetUnsignedImmediateOperand(op_index);
+ *stream << 'U' << size_tag << '(';
+ *stream << bytecode_iterator.GetUnsignedImmediateOperand(op_index);
break;
case OperandType::kImm:
- stream << 'I' << size_tag << '(';
- stream << bytecode_iterator.GetImmediateOperand(op_index);
+ *stream << 'I' << size_tag << '(';
+ *stream << bytecode_iterator.GetImmediateOperand(op_index);
break;
case OperandType::kRegCount:
- stream << 'U' << size_tag << '(';
- stream << bytecode_iterator.GetRegisterCountOperand(op_index);
+ *stream << 'U' << size_tag << '(';
+ *stream << bytecode_iterator.GetRegisterCountOperand(op_index);
break;
case OperandType::kRuntimeId: {
- stream << 'U' << size_tag << '(';
+ *stream << 'U' << size_tag << '(';
Runtime::FunctionId id =
bytecode_iterator.GetRuntimeIdOperand(op_index);
- stream << "Runtime::k" << i::Runtime::FunctionForId(id)->name;
+ *stream << "Runtime::k" << i::Runtime::FunctionForId(id)->name;
break;
}
case OperandType::kIntrinsicId: {
- stream << 'U' << size_tag << '(';
+ *stream << 'U' << size_tag << '(';
Runtime::FunctionId id =
bytecode_iterator.GetIntrinsicIdOperand(op_index);
- stream << "Runtime::k" << i::Runtime::FunctionForId(id)->name;
+ *stream << "Runtime::k" << i::Runtime::FunctionForId(id)->name;
break;
}
case OperandType::kNativeContextIndex: {
- stream << 'U' << size_tag << '(';
+ *stream << 'U' << size_tag << '(';
uint32_t idx = bytecode_iterator.GetNativeContextIndexOperand(op_index);
- stream << "%" << NameForNativeContextIntrinsicIndex(idx);
+ *stream << "%" << NameForNativeContextIntrinsicIndex(idx);
break;
}
default:
UNREACHABLE();
}
- stream << ')';
+ *stream << ')';
}
}
void BytecodeExpectationsPrinter::PrintBytecode(
- std::ostream& stream, const BytecodeArrayIterator& bytecode_iterator,
+ std::ostream* stream, const BytecodeArrayIterator& bytecode_iterator,
int parameter_count) const {
Bytecode bytecode = bytecode_iterator.current_bytecode();
OperandScale operand_scale = bytecode_iterator.current_operand_scale();
if (Bytecodes::OperandScaleRequiresPrefixBytecode(operand_scale)) {
Bytecode prefix = Bytecodes::OperandScaleToPrefixBytecode(operand_scale);
- stream << "B(" << Bytecodes::ToString(prefix) << "), ";
+ *stream << "B(" << Bytecodes::ToString(prefix) << "), ";
}
- stream << "B(" << Bytecodes::ToString(bytecode) << ')';
+ *stream << "B(" << Bytecodes::ToString(bytecode) << ')';
int operands_count = Bytecodes::NumberOfOperands(bytecode);
for (int op_index = 0; op_index < operands_count; ++op_index) {
- stream << ", ";
+ *stream << ", ";
PrintBytecodeOperand(stream, bytecode_iterator, bytecode, op_index,
parameter_count);
}
}
void BytecodeExpectationsPrinter::PrintSourcePosition(
- std::ostream& stream, SourcePositionTableIterator& source_iterator,
+ std::ostream* stream, SourcePositionTableIterator* source_iterator,
int bytecode_offset) const {
static const size_t kPositionWidth = 4;
- if (!source_iterator.done() &&
- source_iterator.code_offset() == bytecode_offset) {
- stream << "/* " << std::setw(kPositionWidth)
- << source_iterator.source_position().ScriptOffset();
- if (source_iterator.is_statement()) {
- stream << " S> */ ";
+ if (!source_iterator->done() &&
+ source_iterator->code_offset() == bytecode_offset) {
+ *stream << "/* " << std::setw(kPositionWidth)
+ << source_iterator->source_position().ScriptOffset();
+ if (source_iterator->is_statement()) {
+ *stream << " S> */ ";
} else {
- stream << " E> */ ";
+ *stream << " E> */ ";
}
- source_iterator.Advance();
+ source_iterator->Advance();
} else {
- stream << " " << std::setw(kPositionWidth) << ' ' << " ";
+ *stream << " " << std::setw(kPositionWidth) << ' ' << " ";
}
}
-void BytecodeExpectationsPrinter::PrintV8String(std::ostream& stream,
+void BytecodeExpectationsPrinter::PrintV8String(std::ostream* stream,
i::String string) const {
- stream << '"';
+ *stream << '"';
for (int i = 0, length = string.length(); i < length; ++i) {
- stream << i::AsEscapedUC16ForJSON(string.Get(i));
+ *stream << i::AsEscapedUC16ForJSON(string.Get(i));
}
- stream << '"';
+ *stream << '"';
}
void BytecodeExpectationsPrinter::PrintConstant(
- std::ostream& stream, i::Handle<i::Object> constant) const {
+ std::ostream* stream, i::Handle<i::Object> constant) const {
if (constant->IsSmi()) {
- stream << "Smi [";
- i::Smi::cast(*constant).SmiPrint(stream);
- stream << "]";
+ *stream << "Smi [";
+ i::Smi::cast(*constant).SmiPrint(*stream);
+ *stream << "]";
} else {
- stream << i::HeapObject::cast(*constant).map().instance_type();
+ *stream << i::HeapObject::cast(*constant).map().instance_type();
if (constant->IsHeapNumber()) {
- stream << " [";
- i::HeapNumber::cast(*constant).HeapNumberPrint(stream);
- stream << "]";
+ *stream << " [";
+ i::HeapNumber::cast(*constant).HeapNumberPrint(*stream);
+ *stream << "]";
} else if (constant->IsString()) {
- stream << " [";
+ *stream << " [";
PrintV8String(stream, i::String::cast(*constant));
- stream << "]";
+ *stream << "]";
}
}
}
void BytecodeExpectationsPrinter::PrintFrameSize(
- std::ostream& stream, i::Handle<i::BytecodeArray> bytecode_array) const {
+ std::ostream* stream, i::Handle<i::BytecodeArray> bytecode_array) const {
int32_t frame_size = bytecode_array->frame_size();
DCHECK(IsAligned(frame_size, kSystemPointerSize));
- stream << "frame size: " << frame_size / kSystemPointerSize
- << "\nparameter count: " << bytecode_array->parameter_count() << '\n';
+ *stream << "frame size: " << frame_size / kSystemPointerSize
+ << "\nparameter count: " << bytecode_array->parameter_count() << '\n';
}
void BytecodeExpectationsPrinter::PrintBytecodeSequence(
- std::ostream& stream, i::Handle<i::BytecodeArray> bytecode_array) const {
- stream << "bytecode array length: " << bytecode_array->length()
- << "\nbytecodes: [\n";
+ std::ostream* stream, i::Handle<i::BytecodeArray> bytecode_array) const {
+ *stream << "bytecode array length: " << bytecode_array->length()
+ << "\nbytecodes: [\n";
SourcePositionTableIterator source_iterator(
bytecode_array->SourcePositionTable());
BytecodeArrayIterator bytecode_iterator(bytecode_array);
for (; !bytecode_iterator.done(); bytecode_iterator.Advance()) {
- stream << kIndent;
- PrintSourcePosition(stream, source_iterator,
+ *stream << kIndent;
+ PrintSourcePosition(stream, &source_iterator,
bytecode_iterator.current_offset());
PrintBytecode(stream, bytecode_iterator, bytecode_array->parameter_count());
- stream << ",\n";
+ *stream << ",\n";
}
- stream << "]\n";
+ *stream << "]\n";
}
void BytecodeExpectationsPrinter::PrintConstantPool(
- std::ostream& stream, i::FixedArray constant_pool) const {
- stream << "constant pool: [\n";
+ std::ostream* stream, i::FixedArray constant_pool) const {
+ *stream << "constant pool: [\n";
int num_constants = constant_pool.length();
if (num_constants > 0) {
for (int i = 0; i < num_constants; ++i) {
- stream << kIndent;
+ *stream << kIndent;
PrintConstant(stream, i::FixedArray::get(constant_pool, i, i_isolate()));
- stream << ",\n";
+ *stream << ",\n";
}
}
- stream << "]\n";
+ *stream << "]\n";
}
void BytecodeExpectationsPrinter::PrintCodeSnippet(
- std::ostream& stream, const std::string& body) const {
- stream << "snippet: \"\n";
+ std::ostream* stream, const std::string& body) const {
+ *stream << "snippet: \"\n";
std::stringstream body_stream(body);
std::string body_line;
while (std::getline(body_stream, body_line)) {
- stream << kIndent;
+ *stream << kIndent;
PrintEscapedString(stream, body_line);
- stream << '\n';
+ *stream << '\n';
}
- stream << "\"\n";
+ *stream << "\"\n";
}
void BytecodeExpectationsPrinter::PrintHandlers(
- std::ostream& stream, i::Handle<i::BytecodeArray> bytecode_array) const {
- stream << "handlers: [\n";
+ std::ostream* stream, i::Handle<i::BytecodeArray> bytecode_array) const {
+ *stream << "handlers: [\n";
HandlerTable table(*bytecode_array);
for (int i = 0, num_entries = table.NumberOfRangeEntries(); i < num_entries;
++i) {
- stream << " [" << table.GetRangeStart(i) << ", " << table.GetRangeEnd(i)
- << ", " << table.GetRangeHandler(i) << "],\n";
+ *stream << " [" << table.GetRangeStart(i) << ", " << table.GetRangeEnd(i)
+ << ", " << table.GetRangeHandler(i) << "],\n";
}
- stream << "]\n";
+ *stream << "]\n";
}
void BytecodeExpectationsPrinter::PrintBytecodeArray(
- std::ostream& stream, i::Handle<i::BytecodeArray> bytecode_array) const {
+ std::ostream* stream, i::Handle<i::BytecodeArray> bytecode_array) const {
PrintFrameSize(stream, bytecode_array);
PrintBytecodeSequence(stream, bytecode_array);
PrintConstantPool(stream, bytecode_array->constant_pool());
@@ -380,7 +380,7 @@ void BytecodeExpectationsPrinter::PrintBytecodeArray(
}
void BytecodeExpectationsPrinter::PrintExpectation(
- std::ostream& stream, const std::string& snippet) const {
+ std::ostream* stream, const std::string& snippet) const {
std::string source_code =
wrap_ ? WrapCodeInFunction(test_function_name_.c_str(), snippet)
: snippet;
@@ -404,10 +404,10 @@ void BytecodeExpectationsPrinter::PrintExpectation(
}
}
- stream << "---\n";
+ *stream << "---\n";
PrintCodeSnippet(stream, snippet);
PrintBytecodeArray(stream, bytecode_array);
- stream << '\n';
+ *stream << '\n';
}
} // namespace interpreter
diff --git a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h
index dc51e5fb7a..6a469461d5 100644
--- a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h
+++ b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h
@@ -36,8 +36,7 @@ class BytecodeExpectationsPrinter final {
oneshot_opt_(false),
test_function_name_(kDefaultTopFunctionName) {}
- void PrintExpectation(std::ostream& stream, // NOLINT
- const std::string& snippet) const;
+ void PrintExpectation(std::ostream* stream, const std::string& snippet) const;
void set_module(bool module) { module_ = module; }
bool module() const { return module_; }
@@ -60,34 +59,30 @@ class BytecodeExpectationsPrinter final {
std::string test_function_name() const { return test_function_name_; }
private:
- void PrintEscapedString(std::ostream& stream, // NOLINT
+ void PrintEscapedString(std::ostream* stream,
const std::string& string) const;
- void PrintBytecodeOperand(std::ostream& stream, // NOLINT
+ void PrintBytecodeOperand(std::ostream* stream,
const BytecodeArrayIterator& bytecode_iterator,
const Bytecode& bytecode, int op_index,
int parameter_count) const;
- void PrintBytecode(std::ostream& stream, // NOLINT
+ void PrintBytecode(std::ostream* stream,
const BytecodeArrayIterator& bytecode_iterator,
int parameter_count) const;
- void PrintSourcePosition(std::ostream& stream, // NOLINT
- SourcePositionTableIterator&
- source_iterator, // NOLINT(runtime/references)
+ void PrintSourcePosition(std::ostream* stream,
+ SourcePositionTableIterator* source_iterator,
int bytecode_offset) const;
- void PrintV8String(std::ostream& stream, // NOLINT
- i::String string) const;
- void PrintConstant(std::ostream& stream, // NOLINT
- i::Handle<i::Object> constant) const;
- void PrintFrameSize(std::ostream& stream, // NOLINT
+ void PrintV8String(std::ostream* stream, i::String string) const;
+ void PrintConstant(std::ostream* stream, i::Handle<i::Object> constant) const;
+ void PrintFrameSize(std::ostream* stream,
i::Handle<i::BytecodeArray> bytecode_array) const;
- void PrintBytecodeSequence(std::ostream& stream, // NOLINT
+ void PrintBytecodeSequence(std::ostream* stream,
i::Handle<i::BytecodeArray> bytecode_array) const;
- void PrintConstantPool(std::ostream& stream, // NOLINT
+ void PrintConstantPool(std::ostream* stream,
i::FixedArray constant_pool) const;
- void PrintCodeSnippet(std::ostream& stream, // NOLINT
- const std::string& body) const;
- void PrintBytecodeArray(std::ostream& stream, // NOLINT
+ void PrintCodeSnippet(std::ostream* stream, const std::string& body) const;
+ void PrintBytecodeArray(std::ostream* stream,
i::Handle<i::BytecodeArray> bytecode_array) const;
- void PrintHandlers(std::ostream& stream, // NOLINT
+ void PrintHandlers(std::ostream* stream,
i::Handle<i::BytecodeArray> bytecode_array) const;
v8::Local<v8::String> V8StringFromUTF8(const char* data) const;
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden
index d6097e938d..7b1de53911 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden
@@ -141,9 +141,9 @@ handlers: [
snippet: "
var a = [ 1, 2 ]; return [ 0, ...a ];
"
-frame size: 7
+frame size: 6
parameter count: 1
-bytecode array length: 80
+bytecode array length: 68
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
@@ -152,11 +152,7 @@ bytecodes: [
B(Star), R(2),
B(LdaConstant), U8(2),
/* 67 S> */ B(Star), R(1),
- B(GetIterator), R(0), U8(2),
- B(Star), R(6),
- B(CallProperty0), R(6), R(0), U8(4),
- B(JumpIfJSReceiver), U8(7),
- B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
+ B(GetIterator), R(0), U8(2), U8(4),
B(Star), R(4),
B(LdaNamedProperty), R(4), U8(3), U8(6),
B(Star), R(3),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
index 1dbb999371..0e7cac1ad9 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
@@ -214,7 +214,7 @@ snippet: "
"
frame size: 19
parameter count: 1
-bytecode array length: 369
+bytecode array length: 357
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(4),
@@ -238,11 +238,7 @@ bytecodes: [
B(JumpConstant), U8(15),
/* 36 S> */ B(CreateArrayLiteral), U8(4), U8(0), U8(37),
B(Star), R(10),
- B(GetIterator), R(10), U8(1),
- B(Star), R(11),
- B(CallProperty0), R(11), R(10), U8(3),
- B(JumpIfJSReceiver), U8(7),
- B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
+ B(GetIterator), R(10), U8(1), U8(3),
B(Star), R(9),
B(LdaNamedProperty), R(9), U8(5), U8(5),
B(Star), R(8),
@@ -376,7 +372,7 @@ bytecodes: [
]
constant pool: [
Smi [30],
- Smi [148],
+ Smi [136],
Smi [16],
Smi [7],
ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
@@ -390,16 +386,16 @@ constant pool: [
Smi [6],
Smi [9],
SCOPE_INFO_TYPE,
- Smi [274],
+ Smi [262],
Smi [6],
Smi [9],
Smi [23],
]
handlers: [
- [20, 315, 323],
- [23, 279, 281],
- [92, 179, 187],
- [211, 244, 246],
+ [20, 303, 311],
+ [23, 267, 269],
+ [80, 167, 175],
+ [199, 232, 234],
]
---
@@ -410,7 +406,7 @@ snippet: "
"
frame size: 17
parameter count: 1
-bytecode array length: 466
+bytecode array length: 467
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(5),
B(Mov), R(closure), R(1),
@@ -431,7 +427,7 @@ bytecodes: [
B(LdaSmi), I8(1),
B(Star), R(1),
B(Mov), R(5), R(2),
- B(JumpConstant), U8(17),
+ B(JumpConstant), U8(18),
/* 49 S> */ B(LdaGlobal), U8(7), U8(0),
B(Star), R(9),
/* 56 E> */ B(CallUndefinedReceiver0), R(9), U8(2),
@@ -440,25 +436,25 @@ bytecodes: [
B(JumpIfUndefinedOrNull), U8(15),
B(Star), R(11),
B(CallProperty0), R(11), R(10), U8(6),
- B(JumpIfJSReceiver), U8(22),
+ B(JumpIfJSReceiver), U8(23),
B(CallRuntime), U16(Runtime::kThrowSymbolAsyncIteratorInvalid), R(0), U8(0),
- B(GetIterator), R(10), U8(8),
+ B(LdaNamedProperty), R(10), U8(9), U8(8),
B(Star), R(11),
B(CallProperty0), R(11), R(10), U8(10),
B(Star), R(11),
B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(11), U8(1),
B(Star), R(7),
- B(LdaNamedProperty), R(7), U8(9), U8(12),
+ B(LdaNamedProperty), R(7), U8(10), U8(12),
B(Star), R(9),
B(LdaUndefined),
B(Star), R(8),
B(LdaZero),
B(Star), R(6),
B(Ldar), R(6),
- B(SwitchOnSmiNoFeedback), U8(10), U8(2), I8(1),
+ B(SwitchOnSmiNoFeedback), U8(11), U8(2), I8(1),
B(CallProperty1), R(9), R(7), R(8), U8(14),
B(Jump), U8(140),
- B(LdaNamedProperty), R(7), U8(12), U8(16),
+ B(LdaNamedProperty), R(7), U8(13), U8(16),
B(JumpIfUndefinedOrNull), U8(11),
B(Star), R(10),
B(CallProperty1), R(10), R(7), R(8), U8(18),
@@ -480,12 +476,12 @@ bytecodes: [
B(Star), R(1),
B(Mov), R(10), R(2),
B(Jump), U8(241),
- B(LdaNamedProperty), R(7), U8(13), U8(20),
+ B(LdaNamedProperty), R(7), U8(14), U8(20),
B(JumpIfUndefinedOrNull), U8(11),
B(Star), R(12),
B(CallProperty1), R(12), R(7), R(8), U8(22),
B(Jump), U8(66),
- B(LdaNamedProperty), R(7), U8(12), U8(24),
+ B(LdaNamedProperty), R(7), U8(13), U8(24),
B(JumpIfUndefinedOrNull), U8(55),
B(Star), R(12),
B(CallProperty0), R(12), R(7), U8(26),
@@ -525,9 +521,9 @@ bytecodes: [
B(Mov), R(12), R(5),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(5), U8(1),
- B(LdaNamedProperty), R(5), U8(14), U8(28),
+ B(LdaNamedProperty), R(5), U8(15), U8(28),
B(JumpIfToBooleanTrue), U8(38),
- B(LdaNamedProperty), R(5), U8(15), U8(30),
+ B(LdaNamedProperty), R(5), U8(16), U8(30),
B(Star), R(15),
B(LdaFalse),
B(Star), R(16),
@@ -539,7 +535,7 @@ bytecodes: [
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(Star), R(6),
B(JumpLoop), U8(236), I8(0),
- B(LdaNamedProperty), R(5), U8(15), U8(32),
+ B(LdaNamedProperty), R(5), U8(16), U8(32),
B(Star), R(7),
B(LdaSmi), I8(1),
B(TestReferenceEqual), R(6),
@@ -551,7 +547,7 @@ bytecodes: [
B(Ldar), R(7),
B(Jump), U8(36),
B(Star), R(5),
- B(CreateCatchContext), R(5), U8(16),
+ B(CreateCatchContext), R(5), U8(17),
B(Star), R(4),
B(LdaTheHole),
B(SetPendingMessage),
@@ -580,7 +576,7 @@ bytecodes: [
B(Ldar), R(3),
B(SetPendingMessage),
B(Ldar), R(1),
- B(SwitchOnSmiNoFeedback), U8(18), U8(3), I8(0),
+ B(SwitchOnSmiNoFeedback), U8(19), U8(3), I8(0),
B(Jump), U8(22),
B(Ldar), R(2),
B(ReThrow),
@@ -597,14 +593,15 @@ bytecodes: [
]
constant pool: [
Smi [30],
- Smi [157],
- Smi [229],
- Smi [279],
- Smi [338],
+ Smi [158],
+ Smi [230],
+ Smi [280],
+ Smi [339],
Smi [16],
Smi [7],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["g"],
SYMBOL_TYPE,
+ SYMBOL_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
Smi [11],
Smi [70],
@@ -613,13 +610,13 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
SCOPE_INFO_TYPE,
- Smi [371],
+ Smi [372],
Smi [6],
Smi [9],
Smi [23],
]
handlers: [
- [20, 412, 420],
- [23, 374, 378],
+ [20, 413, 421],
+ [23, 375, 379],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncModules.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncModules.golden
new file mode 100644
index 0000000000..7cbe661b94
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncModules.golden
@@ -0,0 +1,349 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+wrap: no
+module: yes
+top level: yes
+top level await: yes
+
+---
+snippet: "
+ await 42;
+"
+frame size: 8
+parameter count: 2
+bytecode array length: 142
+bytecodes: [
+ B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
+ B(LdaConstant), U8(2),
+ B(Star), R(3),
+ B(Mov), R(arg0), R(2),
+ B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(2),
+ B(PushContext), R(2),
+ B(Mov), R(closure), R(3),
+ B(Mov), R(this), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(3), U8(2),
+ B(Star), R(0),
+ /* 0 E> */ B(StackCheck),
+ /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(3), U8(0),
+ B(ResumeGenerator), R(0), R(0), U8(3),
+ B(Star), R(3),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
+ B(SwitchOnSmiNoFeedback), U8(3), U8(2), I8(0),
+ B(Ldar), R(3),
+ /* 0 E> */ B(Throw),
+ B(Ldar), R(3),
+ /* 10 S> */ B(Return),
+ B(Mov), R(3), R(1),
+ B(Ldar), R(1),
+ B(Mov), R(context), R(3),
+ /* 0 S> */ B(LdaSmi), I8(42),
+ B(Star), R(5),
+ B(Mov), R(0), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(4), U8(2),
+ /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(4), U8(1),
+ B(ResumeGenerator), R(0), R(0), U8(4),
+ B(Star), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
+ B(Star), R(5),
+ B(LdaZero),
+ B(TestReferenceEqual), R(5),
+ B(JumpIfTrue), U8(5),
+ B(Ldar), R(4),
+ B(ReThrow),
+ B(LdaUndefined),
+ B(Star), R(5),
+ B(LdaTrue),
+ B(Star), R(6),
+ B(Mov), R(0), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(4), U8(3),
+ /* 10 S> */ B(Return),
+ B(Star), R(4),
+ B(CreateCatchContext), R(4), U8(5),
+ B(Star), R(3),
+ B(LdaTheHole),
+ B(SetPendingMessage),
+ B(Ldar), R(3),
+ B(PushContext), R(4),
+ B(LdaImmutableCurrentContextSlot), U8(4),
+ B(Star), R(6),
+ B(LdaTrue),
+ B(Star), R(7),
+ B(Mov), R(0), R(5),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(5), U8(3),
+ /* 10 S> */ B(Return),
+]
+constant pool: [
+ Smi [36],
+ Smi [80],
+ SCOPE_INFO_TYPE,
+ Smi [10],
+ Smi [7],
+ SCOPE_INFO_TYPE,
+]
+handlers: [
+ [64, 114, 114],
+]
+
+---
+snippet: "
+ await import(\"foo\");
+"
+frame size: 8
+parameter count: 2
+bytecode array length: 152
+bytecodes: [
+ B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
+ B(LdaConstant), U8(2),
+ B(Star), R(3),
+ B(Mov), R(arg0), R(2),
+ B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(2),
+ B(PushContext), R(2),
+ B(Mov), R(closure), R(3),
+ B(Mov), R(this), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(3), U8(2),
+ B(Star), R(0),
+ /* 0 E> */ B(StackCheck),
+ /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(3), U8(0),
+ B(ResumeGenerator), R(0), R(0), U8(3),
+ B(Star), R(3),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
+ B(SwitchOnSmiNoFeedback), U8(3), U8(2), I8(0),
+ B(Ldar), R(3),
+ /* 0 E> */ B(Throw),
+ B(Ldar), R(3),
+ /* 21 S> */ B(Return),
+ B(Mov), R(3), R(1),
+ B(Ldar), R(1),
+ B(Mov), R(context), R(3),
+ /* 0 S> */ B(LdaConstant), U8(5),
+ B(Star), R(5),
+ B(Mov), R(closure), R(4),
+ B(CallRuntime), U16(Runtime::kDynamicImportCall), R(4), U8(2),
+ B(Star), R(5),
+ B(Mov), R(0), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(4), U8(2),
+ /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(4), U8(1),
+ B(ResumeGenerator), R(0), R(0), U8(4),
+ B(Star), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
+ B(Star), R(5),
+ B(LdaZero),
+ B(TestReferenceEqual), R(5),
+ B(JumpIfTrue), U8(5),
+ B(Ldar), R(4),
+ B(ReThrow),
+ B(LdaUndefined),
+ B(Star), R(5),
+ B(LdaTrue),
+ B(Star), R(6),
+ B(Mov), R(0), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(4), U8(3),
+ /* 21 S> */ B(Return),
+ B(Star), R(4),
+ B(CreateCatchContext), R(4), U8(6),
+ B(Star), R(3),
+ B(LdaTheHole),
+ B(SetPendingMessage),
+ B(Ldar), R(3),
+ B(PushContext), R(4),
+ B(LdaImmutableCurrentContextSlot), U8(4),
+ B(Star), R(6),
+ B(LdaTrue),
+ B(Star), R(7),
+ B(Mov), R(0), R(5),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(5), U8(3),
+ /* 21 S> */ B(Return),
+]
+constant pool: [
+ Smi [36],
+ Smi [90],
+ SCOPE_INFO_TYPE,
+ Smi [10],
+ Smi [7],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["foo"],
+ SCOPE_INFO_TYPE,
+]
+handlers: [
+ [64, 124, 124],
+]
+
+---
+snippet: "
+ await 42;
+ async function foo() {
+ await 42;
+ }
+ foo();
+"
+frame size: 9
+parameter count: 2
+bytecode array length: 153
+bytecodes: [
+ B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
+ B(LdaConstant), U8(2),
+ B(Star), R(4),
+ B(Mov), R(arg0), R(3),
+ B(CallRuntime), U16(Runtime::kPushModuleContext), R(3), U8(2),
+ B(PushContext), R(3),
+ B(Mov), R(closure), R(4),
+ B(Mov), R(this), R(5),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(4), U8(2),
+ B(Star), R(0),
+ B(CreateClosure), U8(3), U8(0), U8(0),
+ B(Star), R(1),
+ /* 0 E> */ B(StackCheck),
+ B(Ldar), R(0),
+ /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(4), U8(0),
+ B(ResumeGenerator), R(0), R(0), U8(4),
+ B(Star), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
+ B(SwitchOnSmiNoFeedback), U8(4), U8(2), I8(0),
+ B(Ldar), R(4),
+ /* 0 E> */ B(Throw),
+ B(Ldar), R(4),
+ /* 54 S> */ B(Return),
+ B(Mov), R(4), R(2),
+ B(Ldar), R(2),
+ B(Mov), R(context), R(4),
+ /* 0 S> */ B(LdaSmi), I8(42),
+ B(Star), R(6),
+ B(Mov), R(0), R(5),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(5), U8(2),
+ /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(5), U8(1),
+ B(ResumeGenerator), R(0), R(0), U8(5),
+ B(Star), R(5),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
+ B(Star), R(6),
+ B(LdaZero),
+ B(TestReferenceEqual), R(6),
+ B(JumpIfTrue), U8(5),
+ B(Ldar), R(5),
+ B(ReThrow),
+ /* 47 S> */ B(CallUndefinedReceiver0), R(1), U8(0),
+ B(LdaUndefined),
+ B(Star), R(6),
+ B(LdaTrue),
+ B(Star), R(7),
+ B(Mov), R(0), R(5),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(5), U8(3),
+ /* 54 S> */ B(Return),
+ B(Star), R(5),
+ B(CreateCatchContext), R(5), U8(6),
+ B(Star), R(4),
+ B(LdaTheHole),
+ B(SetPendingMessage),
+ B(Ldar), R(4),
+ B(PushContext), R(5),
+ B(LdaImmutableCurrentContextSlot), U8(4),
+ B(Star), R(7),
+ B(LdaTrue),
+ B(Star), R(8),
+ B(Mov), R(0), R(6),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(6), U8(3),
+ /* 54 S> */ B(Return),
+]
+constant pool: [
+ Smi [44],
+ Smi [88],
+ SCOPE_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ Smi [10],
+ Smi [7],
+ SCOPE_INFO_TYPE,
+]
+handlers: [
+ [72, 125, 125],
+]
+
+---
+snippet: "
+ import * as foo from \"bar\";
+ await import(\"goo\");
+"
+frame size: 9
+parameter count: 2
+bytecode array length: 164
+bytecodes: [
+ B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
+ B(LdaConstant), U8(2),
+ B(Star), R(4),
+ B(Mov), R(arg0), R(3),
+ B(CallRuntime), U16(Runtime::kPushModuleContext), R(3), U8(2),
+ B(PushContext), R(3),
+ B(Mov), R(closure), R(4),
+ B(Mov), R(this), R(5),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(4), U8(2),
+ B(Star), R(0),
+ B(LdaZero),
+ B(Star), R(4),
+ B(CallRuntime), U16(Runtime::kGetModuleNamespace), R(4), U8(1),
+ B(Star), R(1),
+ /* 0 E> */ B(StackCheck),
+ B(Ldar), R(0),
+ /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(4), U8(0),
+ B(ResumeGenerator), R(0), R(0), U8(4),
+ B(Star), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
+ B(SwitchOnSmiNoFeedback), U8(3), U8(2), I8(0),
+ B(Ldar), R(4),
+ /* 0 E> */ B(Throw),
+ B(Ldar), R(4),
+ /* 49 S> */ B(Return),
+ B(Mov), R(4), R(2),
+ B(Ldar), R(2),
+ B(Mov), R(context), R(4),
+ /* 28 S> */ B(LdaConstant), U8(5),
+ B(Star), R(6),
+ B(Mov), R(closure), R(5),
+ B(CallRuntime), U16(Runtime::kDynamicImportCall), R(5), U8(2),
+ B(Star), R(6),
+ B(Mov), R(0), R(5),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(5), U8(2),
+ /* 28 E> */ B(SuspendGenerator), R(0), R(0), U8(5), U8(1),
+ B(ResumeGenerator), R(0), R(0), U8(5),
+ B(Star), R(5),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
+ B(Star), R(6),
+ B(LdaZero),
+ B(TestReferenceEqual), R(6),
+ B(JumpIfTrue), U8(5),
+ B(Ldar), R(5),
+ B(ReThrow),
+ B(LdaUndefined),
+ B(Star), R(6),
+ B(LdaTrue),
+ B(Star), R(7),
+ B(Mov), R(0), R(5),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(5), U8(3),
+ /* 49 S> */ B(Return),
+ B(Star), R(5),
+ B(CreateCatchContext), R(5), U8(6),
+ B(Star), R(4),
+ B(LdaTheHole),
+ B(SetPendingMessage),
+ B(Ldar), R(4),
+ B(PushContext), R(5),
+ B(LdaImmutableCurrentContextSlot), U8(4),
+ B(Star), R(7),
+ B(LdaTrue),
+ B(Star), R(8),
+ B(Mov), R(0), R(6),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(6), U8(3),
+ /* 49 S> */ B(Return),
+]
+constant pool: [
+ Smi [48],
+ Smi [102],
+ SCOPE_INFO_TYPE,
+ Smi [10],
+ Smi [7],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["goo"],
+ SCOPE_INFO_TYPE,
+]
+handlers: [
+ [76, 136, 136],
+]
+
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden
index 963cbee018..b86d4e61b1 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden
@@ -65,9 +65,9 @@ handlers: [
snippet: "
Math.max(0, ...[1, 2, 3], 4);
"
-frame size: 9
+frame size: 8
parameter count: 1
-bytecode array length: 106
+bytecode array length: 94
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 34 S> */ B(LdaGlobal), U8(0), U8(0),
@@ -80,14 +80,10 @@ bytecodes: [
B(Star), R(3),
/* 49 S> */ B(CreateArrayLiteral), U8(4), U8(5), U8(37),
B(Star), R(7),
- B(GetIterator), R(7), U8(6),
- B(Star), R(8),
- B(CallProperty0), R(8), R(7), U8(8),
- B(Mov), R(0), R(2),
- B(JumpIfJSReceiver), U8(7),
- B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
+ B(GetIterator), R(7), U8(6), U8(8),
B(Star), R(6),
B(LdaNamedProperty), R(6), U8(5), U8(10),
+ B(Mov), R(0), R(2),
B(Star), R(5),
B(CallProperty0), R(5), R(6), U8(19),
B(Star), R(7),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden
index be635a2ed0..cd439d5d14 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden
@@ -12,27 +12,26 @@ snippet: "
speak() { console.log(this.name + ' is speaking.'); }
}
"
-frame size: 8
+frame size: 7
parameter count: 1
-bytecode array length: 44
+bytecode array length: 41
bytecodes: [
/* 30 E> */ B(StackCheck),
B(CreateBlockContext), U8(0),
- B(PushContext), R(2),
+ B(PushContext), R(1),
B(LdaTheHole),
- B(Star), R(6),
+ B(Star), R(5),
B(CreateClosure), U8(2), U8(0), U8(2),
- B(Star), R(3),
+ B(Star), R(2),
B(LdaConstant), U8(1),
- B(Star), R(4),
+ B(Star), R(3),
B(CreateClosure), U8(3), U8(1), U8(2),
- B(Star), R(7),
- B(Mov), R(3), R(5),
- B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(4),
- B(Star), R(4),
- B(Mov), R(5), R(1),
- B(PopContext), R(2),
- B(Mov), R(1), R(0),
+ B(Star), R(6),
+ B(Mov), R(2), R(4),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(4),
+ B(Star), R(3),
+ B(PopContext), R(1),
+ B(Mov), R(4), R(0),
B(LdaUndefined),
/* 149 S> */ B(Return),
]
@@ -52,27 +51,26 @@ snippet: "
speak() { console.log(this.name + ' is speaking.'); }
}
"
-frame size: 8
+frame size: 7
parameter count: 1
-bytecode array length: 44
+bytecode array length: 41
bytecodes: [
/* 30 E> */ B(StackCheck),
B(CreateBlockContext), U8(0),
- B(PushContext), R(2),
+ B(PushContext), R(1),
B(LdaTheHole),
- B(Star), R(6),
+ B(Star), R(5),
B(CreateClosure), U8(2), U8(0), U8(2),
- B(Star), R(3),
+ B(Star), R(2),
B(LdaConstant), U8(1),
- B(Star), R(4),
+ B(Star), R(3),
B(CreateClosure), U8(3), U8(1), U8(2),
- B(Star), R(7),
- B(Mov), R(3), R(5),
- B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(4),
- B(Star), R(4),
- B(Mov), R(5), R(1),
- B(PopContext), R(2),
- B(Mov), R(1), R(0),
+ B(Star), R(6),
+ B(Mov), R(2), R(4),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(4),
+ B(Star), R(3),
+ B(PopContext), R(1),
+ B(Mov), R(4), R(0),
B(LdaUndefined),
/* 149 S> */ B(Return),
]
@@ -94,43 +92,42 @@ snippet: "
static [n1]() { return n1; }
}
"
-frame size: 12
+frame size: 11
parameter count: 1
-bytecode array length: 87
+bytecode array length: 84
bytecodes: [
B(CreateFunctionContext), U8(0), U8(2),
- B(PushContext), R(2),
+ B(PushContext), R(1),
/* 30 E> */ B(StackCheck),
/* 43 S> */ B(LdaConstant), U8(1),
/* 43 E> */ B(StaCurrentContextSlot), U8(4),
/* 57 S> */ B(LdaConstant), U8(2),
/* 57 E> */ B(StaCurrentContextSlot), U8(5),
B(CreateBlockContext), U8(3),
- B(PushContext), R(3),
+ B(PushContext), R(2),
B(LdaTheHole),
- B(Star), R(7),
+ B(Star), R(6),
B(CreateClosure), U8(5), U8(0), U8(2),
- B(Star), R(4),
+ B(Star), R(3),
B(LdaConstant), U8(4),
- B(Star), R(5),
- /* 75 S> */ B(LdaImmutableContextSlot), R(3), U8(4), U8(0),
- B(ToName), R(8),
+ B(Star), R(4),
+ /* 75 S> */ B(LdaImmutableContextSlot), R(2), U8(4), U8(0),
+ B(ToName), R(7),
B(CreateClosure), U8(6), U8(1), U8(2),
- B(Star), R(9),
- /* 106 S> */ B(LdaImmutableContextSlot), R(3), U8(5), U8(0),
- B(ToName), R(10),
+ B(Star), R(8),
+ /* 106 S> */ B(LdaImmutableContextSlot), R(2), U8(5), U8(0),
+ B(ToName), R(9),
B(LdaConstant), U8(7),
- B(TestEqualStrict), R(10), U8(0),
- B(Mov), R(4), R(6),
+ B(TestEqualStrict), R(9), U8(0),
+ B(Mov), R(3), R(5),
B(JumpIfFalse), U8(7),
B(CallRuntime), U16(Runtime::kThrowStaticPrototypeError), R(0), U8(0),
B(CreateClosure), U8(8), U8(2), U8(2),
- B(Star), R(11),
- B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(7),
- B(Star), R(5),
- B(Mov), R(4), R(1),
- B(PopContext), R(3),
- B(Mov), R(1), R(0),
+ B(Star), R(10),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(7),
+ B(Star), R(4),
+ B(PopContext), R(2),
+ B(Mov), R(3), R(0),
B(LdaUndefined),
/* 129 S> */ B(Return),
]
@@ -154,29 +151,28 @@ snippet: "
class C { constructor() { count++; }}
return new C();
"
-frame size: 8
+frame size: 7
parameter count: 1
-bytecode array length: 52
+bytecode array length: 49
bytecodes: [
B(CreateFunctionContext), U8(0), U8(1),
- B(PushContext), R(2),
+ B(PushContext), R(1),
/* 30 E> */ B(StackCheck),
/* 46 S> */ B(LdaZero),
/* 46 E> */ B(StaCurrentContextSlot), U8(4),
B(CreateBlockContext), U8(1),
- B(PushContext), R(3),
+ B(PushContext), R(2),
B(LdaTheHole),
- B(Star), R(7),
+ B(Star), R(6),
B(CreateClosure), U8(3), U8(0), U8(2),
- B(Star), R(4),
+ B(Star), R(3),
B(LdaConstant), U8(2),
- B(Star), R(5),
- B(Mov), R(4), R(6),
- B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(3),
- B(Star), R(5),
- B(Mov), R(6), R(1),
- B(PopContext), R(3),
- B(Mov), R(1), R(0),
+ B(Star), R(4),
+ B(Mov), R(3), R(5),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(3),
+ B(Star), R(4),
+ B(PopContext), R(2),
+ B(Mov), R(5), R(0),
/* 87 S> */ B(Ldar), R(0),
/* 94 E> */ B(Construct), R(0), R(0), U8(0), U8(0),
/* 102 S> */ B(Return),
@@ -195,39 +191,38 @@ snippet: "
(class {})
class E { static name () {}}
"
-frame size: 8
+frame size: 7
parameter count: 1
-bytecode array length: 73
+bytecode array length: 70
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 34 S> */ B(CreateBlockContext), U8(0),
- B(PushContext), R(2),
+ B(PushContext), R(1),
B(LdaTheHole),
- B(Star), R(6),
+ B(Star), R(5),
B(CreateClosure), U8(2), U8(0), U8(2),
- B(Star), R(3),
+ B(Star), R(2),
B(LdaConstant), U8(1),
- B(Star), R(4),
- B(Mov), R(3), R(5),
- B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(3),
- B(Star), R(4),
- B(PopContext), R(2),
+ B(Star), R(3),
+ B(Mov), R(2), R(4),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(3),
+ B(Star), R(3),
+ B(PopContext), R(1),
B(CreateBlockContext), U8(3),
- B(PushContext), R(2),
+ B(PushContext), R(1),
B(LdaTheHole),
- B(Star), R(6),
+ B(Star), R(5),
B(CreateClosure), U8(5), U8(1), U8(2),
- B(Star), R(3),
+ B(Star), R(2),
B(LdaConstant), U8(4),
- B(Star), R(4),
+ B(Star), R(3),
B(CreateClosure), U8(6), U8(2), U8(2),
- B(Star), R(7),
- B(Mov), R(3), R(5),
- B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(4),
- B(Star), R(4),
- B(Mov), R(5), R(1),
- B(PopContext), R(2),
- B(Mov), R(1), R(0),
+ B(Star), R(6),
+ B(Mov), R(2), R(4),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(4),
+ B(Star), R(3),
+ B(PopContext), R(1),
+ B(Mov), R(4), R(0),
B(LdaUndefined),
/* 74 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/DestructuringAssignment.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/DestructuringAssignment.golden
index e26b79a9fb..b4c9a75ef1 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/DestructuringAssignment.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/DestructuringAssignment.golden
@@ -12,21 +12,17 @@ snippet: "
"
frame size: 14
parameter count: 1
-bytecode array length: 172
+bytecode array length: 160
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 45 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
B(Star), R(1),
- /* 60 S> */ B(GetIterator), R(1), U8(1),
- B(Star), R(6),
- B(CallProperty0), R(6), R(1), U8(3),
- B(Mov), R(1), R(2),
- B(JumpIfJSReceiver), U8(7),
- B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
+ /* 60 S> */ B(GetIterator), R(1), U8(1), U8(3),
B(Star), R(4),
B(LdaNamedProperty), R(4), U8(1), U8(5),
B(Star), R(3),
B(LdaFalse),
+ B(Mov), R(1), R(2),
B(Star), R(5),
B(Mov), R(context), R(8),
/* 57 S> */ B(Ldar), R(5),
@@ -101,8 +97,8 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
]
handlers: [
- [40, 82, 90],
- [114, 147, 149],
+ [28, 70, 78],
+ [102, 135, 137],
]
---
@@ -112,21 +108,17 @@ snippet: "
"
frame size: 15
parameter count: 1
-bytecode array length: 258
+bytecode array length: 246
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 48 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
B(Star), R(2),
- /* 69 S> */ B(GetIterator), R(2), U8(1),
- B(Star), R(7),
- B(CallProperty0), R(7), R(2), U8(3),
- B(Mov), R(2), R(3),
- B(JumpIfJSReceiver), U8(7),
- B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
+ /* 69 S> */ B(GetIterator), R(2), U8(1), U8(3),
B(Star), R(5),
B(LdaNamedProperty), R(5), U8(1), U8(5),
B(Star), R(4),
B(LdaFalse),
+ B(Mov), R(2), R(3),
B(Star), R(6),
B(Mov), R(context), R(9),
B(Ldar), R(6),
@@ -235,8 +227,8 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
]
handlers: [
- [40, 168, 176],
- [200, 233, 235],
+ [28, 156, 164],
+ [188, 221, 223],
]
---
@@ -246,23 +238,19 @@ snippet: "
"
frame size: 16
parameter count: 1
-bytecode array length: 223
+bytecode array length: 211
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 40 S> */ B(CreateEmptyObjectLiteral),
B(Star), R(0),
/* 51 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
B(Star), R(2),
- /* 68 S> */ B(GetIterator), R(2), U8(1),
- B(Star), R(7),
- B(CallProperty0), R(7), R(2), U8(3),
- B(Mov), R(2), R(3),
- B(JumpIfJSReceiver), U8(7),
- B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
+ /* 68 S> */ B(GetIterator), R(2), U8(1), U8(3),
B(Star), R(5),
B(LdaNamedProperty), R(5), U8(1), U8(5),
B(Star), R(4),
B(LdaFalse),
+ B(Mov), R(2), R(3),
B(Star), R(6),
B(Mov), R(context), R(9),
/* 59 S> */ B(Ldar), R(6),
@@ -357,8 +345,8 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
]
handlers: [
- [43, 133, 141],
- [165, 198, 200],
+ [31, 121, 129],
+ [153, 186, 188],
]
---
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
index f60e591040..43b6c0ed22 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
@@ -16,7 +16,7 @@ snippet: "
"
frame size: 19
parameter count: 1
-bytecode array length: 320
+bytecode array length: 321
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(4),
@@ -31,15 +31,15 @@ bytecodes: [
B(JumpIfUndefinedOrNull), U8(15),
B(Star), R(8),
B(CallProperty0), R(8), R(7), U8(3),
- B(JumpIfJSReceiver), U8(22),
+ B(JumpIfJSReceiver), U8(23),
B(CallRuntime), U16(Runtime::kThrowSymbolAsyncIteratorInvalid), R(0), U8(0),
- B(GetIterator), R(7), U8(5),
+ B(LdaNamedProperty), R(7), U8(4), U8(5),
B(Star), R(8),
B(CallProperty0), R(8), R(7), U8(7),
B(Star), R(8),
B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(8), U8(1),
B(Star), R(6),
- B(LdaNamedProperty), R(6), U8(4), U8(9),
+ B(LdaNamedProperty), R(6), U8(5), U8(9),
B(Star), R(5),
B(LdaFalse),
B(Star), R(7),
@@ -64,9 +64,9 @@ bytecodes: [
B(Mov), R(12), R(11),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
- B(LdaNamedProperty), R(11), U8(5), U8(13),
+ B(LdaNamedProperty), R(11), U8(6), U8(13),
B(JumpIfToBooleanTrue), U8(23),
- B(LdaNamedProperty), R(11), U8(6), U8(15),
+ B(LdaNamedProperty), R(11), U8(7), U8(15),
B(Star), R(11),
B(LdaFalse),
B(Star), R(7),
@@ -87,7 +87,7 @@ bytecodes: [
B(Star), R(10),
B(Ldar), R(7),
B(JumpIfToBooleanTrue), U8(94),
- B(LdaNamedProperty), R(6), U8(7), U8(17),
+ B(LdaNamedProperty), R(6), U8(8), U8(17),
B(Star), R(14),
B(JumpIfUndefinedOrNull), U8(86),
B(Mov), R(context), R(15),
@@ -95,7 +95,7 @@ bytecodes: [
B(JumpIfTrue), U8(18),
B(Wide), B(LdaSmi), I16(159),
B(Star), R(16),
- B(LdaConstant), U8(8),
+ B(LdaConstant), U8(9),
B(Star), R(17),
B(CallRuntime), U16(Runtime::kNewTypeError), R(16), U8(2),
B(Throw),
@@ -139,7 +139,7 @@ bytecodes: [
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(5), U8(3),
/* 57 S> */ B(Return),
B(Star), R(5),
- B(CreateCatchContext), R(5), U8(9),
+ B(CreateCatchContext), R(5), U8(10),
B(Star), R(4),
B(LdaTheHole),
B(SetPendingMessage),
@@ -154,10 +154,11 @@ bytecodes: [
/* 57 S> */ B(Return),
]
constant pool: [
- Smi [95],
- Smi [224],
+ Smi [96],
+ Smi [225],
ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
SYMBOL_TYPE,
+ SYMBOL_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
@@ -166,9 +167,9 @@ constant pool: [
SCOPE_INFO_TYPE,
]
handlers: [
- [20, 292, 292],
- [74, 154, 162],
- [186, 255, 257],
+ [20, 293, 293],
+ [75, 155, 163],
+ [187, 256, 258],
]
---
@@ -180,7 +181,7 @@ snippet: "
"
frame size: 19
parameter count: 1
-bytecode array length: 341
+bytecode array length: 342
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(4),
@@ -195,15 +196,15 @@ bytecodes: [
B(JumpIfUndefinedOrNull), U8(15),
B(Star), R(8),
B(CallProperty0), R(8), R(7), U8(3),
- B(JumpIfJSReceiver), U8(22),
+ B(JumpIfJSReceiver), U8(23),
B(CallRuntime), U16(Runtime::kThrowSymbolAsyncIteratorInvalid), R(0), U8(0),
- B(GetIterator), R(7), U8(5),
+ B(LdaNamedProperty), R(7), U8(4), U8(5),
B(Star), R(8),
B(CallProperty0), R(8), R(7), U8(7),
B(Star), R(8),
B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(8), U8(1),
B(Star), R(6),
- B(LdaNamedProperty), R(6), U8(4), U8(9),
+ B(LdaNamedProperty), R(6), U8(5), U8(9),
B(Star), R(5),
B(LdaFalse),
B(Star), R(7),
@@ -228,9 +229,9 @@ bytecodes: [
B(Mov), R(12), R(11),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
- B(LdaNamedProperty), R(11), U8(5), U8(13),
+ B(LdaNamedProperty), R(11), U8(6), U8(13),
B(JumpIfToBooleanTrue), U8(27),
- B(LdaNamedProperty), R(11), U8(6), U8(15),
+ B(LdaNamedProperty), R(11), U8(7), U8(15),
B(Star), R(11),
B(LdaFalse),
B(Star), R(7),
@@ -253,7 +254,7 @@ bytecodes: [
B(Star), R(10),
B(Ldar), R(7),
B(JumpIfToBooleanTrue), U8(94),
- B(LdaNamedProperty), R(6), U8(7), U8(17),
+ B(LdaNamedProperty), R(6), U8(8), U8(17),
B(Star), R(14),
B(JumpIfUndefinedOrNull), U8(86),
B(Mov), R(context), R(15),
@@ -261,7 +262,7 @@ bytecodes: [
B(JumpIfTrue), U8(18),
B(Wide), B(LdaSmi), I16(159),
B(Star), R(16),
- B(LdaConstant), U8(8),
+ B(LdaConstant), U8(9),
B(Star), R(17),
B(CallRuntime), U16(Runtime::kNewTypeError), R(16), U8(2),
B(Throw),
@@ -293,7 +294,7 @@ bytecodes: [
B(Ldar), R(10),
B(SetPendingMessage),
B(Ldar), R(8),
- B(SwitchOnSmiNoFeedback), U8(9), U8(2), I8(0),
+ B(SwitchOnSmiNoFeedback), U8(10), U8(2), I8(0),
B(Jump), U8(19),
B(Ldar), R(9),
B(ReThrow),
@@ -311,7 +312,7 @@ bytecodes: [
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(5), U8(3),
/* 68 S> */ B(Return),
B(Star), R(5),
- B(CreateCatchContext), R(5), U8(11),
+ B(CreateCatchContext), R(5), U8(12),
B(Star), R(4),
B(LdaTheHole),
B(SetPendingMessage),
@@ -326,10 +327,11 @@ bytecodes: [
/* 68 S> */ B(Return),
]
constant pool: [
- Smi [95],
- Smi [228],
+ Smi [96],
+ Smi [229],
ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
SYMBOL_TYPE,
+ SYMBOL_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
@@ -340,9 +342,9 @@ constant pool: [
SCOPE_INFO_TYPE,
]
handlers: [
- [20, 313, 313],
- [74, 158, 166],
- [190, 259, 261],
+ [20, 314, 314],
+ [75, 159, 167],
+ [191, 260, 262],
]
---
@@ -357,7 +359,7 @@ snippet: "
"
frame size: 19
parameter count: 1
-bytecode array length: 336
+bytecode array length: 337
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(4),
@@ -372,15 +374,15 @@ bytecodes: [
B(JumpIfUndefinedOrNull), U8(15),
B(Star), R(8),
B(CallProperty0), R(8), R(7), U8(3),
- B(JumpIfJSReceiver), U8(22),
+ B(JumpIfJSReceiver), U8(23),
B(CallRuntime), U16(Runtime::kThrowSymbolAsyncIteratorInvalid), R(0), U8(0),
- B(GetIterator), R(7), U8(5),
+ B(LdaNamedProperty), R(7), U8(4), U8(5),
B(Star), R(8),
B(CallProperty0), R(8), R(7), U8(7),
B(Star), R(8),
B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(8), U8(1),
B(Star), R(6),
- B(LdaNamedProperty), R(6), U8(4), U8(9),
+ B(LdaNamedProperty), R(6), U8(5), U8(9),
B(Star), R(5),
B(LdaFalse),
B(Star), R(7),
@@ -405,9 +407,9 @@ bytecodes: [
B(Mov), R(12), R(11),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
- B(LdaNamedProperty), R(11), U8(5), U8(13),
+ B(LdaNamedProperty), R(11), U8(6), U8(13),
B(JumpIfToBooleanTrue), U8(39),
- B(LdaNamedProperty), R(11), U8(6), U8(15),
+ B(LdaNamedProperty), R(11), U8(7), U8(15),
B(Star), R(11),
B(LdaFalse),
B(Star), R(7),
@@ -435,7 +437,7 @@ bytecodes: [
B(Star), R(10),
B(Ldar), R(7),
B(JumpIfToBooleanTrue), U8(94),
- B(LdaNamedProperty), R(6), U8(7), U8(19),
+ B(LdaNamedProperty), R(6), U8(8), U8(19),
B(Star), R(14),
B(JumpIfUndefinedOrNull), U8(86),
B(Mov), R(context), R(15),
@@ -443,7 +445,7 @@ bytecodes: [
B(JumpIfTrue), U8(18),
B(Wide), B(LdaSmi), I16(159),
B(Star), R(16),
- B(LdaConstant), U8(8),
+ B(LdaConstant), U8(9),
B(Star), R(17),
B(CallRuntime), U16(Runtime::kNewTypeError), R(16), U8(2),
B(Throw),
@@ -487,7 +489,7 @@ bytecodes: [
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(5), U8(3),
/* 114 S> */ B(Return),
B(Star), R(5),
- B(CreateCatchContext), R(5), U8(9),
+ B(CreateCatchContext), R(5), U8(10),
B(Star), R(4),
B(LdaTheHole),
B(SetPendingMessage),
@@ -502,10 +504,11 @@ bytecodes: [
/* 114 S> */ B(Return),
]
constant pool: [
- Smi [95],
- Smi [240],
+ Smi [96],
+ Smi [241],
ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
SYMBOL_TYPE,
+ SYMBOL_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
@@ -514,9 +517,9 @@ constant pool: [
SCOPE_INFO_TYPE,
]
handlers: [
- [20, 308, 308],
- [74, 170, 178],
- [202, 271, 273],
+ [20, 309, 309],
+ [75, 171, 179],
+ [203, 272, 274],
]
---
@@ -529,7 +532,7 @@ snippet: "
"
frame size: 15
parameter count: 1
-bytecode array length: 258
+bytecode array length: 246
bytecodes: [
B(Mov), R(closure), R(2),
B(Mov), R(this), R(3),
@@ -541,11 +544,7 @@ bytecodes: [
B(Star), R(1),
/* 68 S> */ B(CreateArrayLiteral), U8(1), U8(1), U8(37),
B(Star), R(5),
- B(GetIterator), R(5), U8(2),
- B(Star), R(6),
- B(CallProperty0), R(6), R(5), U8(4),
- B(JumpIfJSReceiver), U8(7),
- B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
+ B(GetIterator), R(5), U8(2), U8(4),
B(Star), R(4),
B(LdaNamedProperty), R(4), U8(2), U8(6),
B(Star), R(3),
@@ -657,8 +656,8 @@ constant pool: [
SCOPE_INFO_TYPE,
]
handlers: [
- [16, 230, 230],
- [58, 111, 119],
- [143, 176, 178],
+ [16, 218, 218],
+ [46, 99, 107],
+ [131, 164, 166],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
index 1557e8d2a8..6c599df00c 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
@@ -11,16 +11,12 @@ snippet: "
"
frame size: 13
parameter count: 1
-bytecode array length: 170
+bytecode array length: 158
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 48 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
B(Star), R(4),
- B(GetIterator), R(4), U8(1),
- B(Star), R(5),
- B(CallProperty0), R(5), R(4), U8(3),
- B(JumpIfJSReceiver), U8(7),
- B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
+ B(GetIterator), R(4), U8(1), U8(3),
B(Star), R(3),
B(LdaNamedProperty), R(3), U8(1), U8(5),
B(Star), R(2),
@@ -98,8 +94,8 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
]
handlers: [
- [37, 80, 88],
- [112, 145, 147],
+ [25, 68, 76],
+ [100, 133, 135],
]
---
@@ -109,16 +105,12 @@ snippet: "
"
frame size: 14
parameter count: 1
-bytecode array length: 178
+bytecode array length: 166
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaConstant), U8(0),
B(Star), R(0),
- /* 68 S> */ B(GetIterator), R(0), U8(0),
- B(Star), R(6),
- B(CallProperty0), R(6), R(0), U8(2),
- B(JumpIfJSReceiver), U8(7),
- B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
+ /* 68 S> */ B(GetIterator), R(0), U8(0), U8(2),
B(Star), R(4),
B(LdaNamedProperty), R(4), U8(1), U8(4),
B(Star), R(3),
@@ -202,8 +194,8 @@ constant pool: [
Smi [9],
]
handlers: [
- [35, 82, 90],
- [114, 147, 149],
+ [23, 70, 78],
+ [102, 135, 137],
]
---
@@ -215,16 +207,12 @@ snippet: "
"
frame size: 13
parameter count: 1
-bytecode array length: 186
+bytecode array length: 174
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 48 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
B(Star), R(4),
- B(GetIterator), R(4), U8(1),
- B(Star), R(5),
- B(CallProperty0), R(5), R(4), U8(3),
- B(JumpIfJSReceiver), U8(7),
- B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
+ B(GetIterator), R(4), U8(1), U8(3),
B(Star), R(3),
B(LdaNamedProperty), R(3), U8(1), U8(5),
B(Star), R(2),
@@ -309,8 +297,8 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
]
handlers: [
- [37, 96, 104],
- [128, 161, 163],
+ [25, 84, 92],
+ [116, 149, 151],
]
---
@@ -320,18 +308,14 @@ snippet: "
"
frame size: 13
parameter count: 1
-bytecode array length: 192
+bytecode array length: 180
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
B(Star), R(0),
/* 77 S> */ B(CreateArrayLiteral), U8(1), U8(1), U8(37),
B(Star), R(3),
- B(GetIterator), R(3), U8(2),
- B(Star), R(4),
- B(CallProperty0), R(4), R(3), U8(4),
- B(JumpIfJSReceiver), U8(7),
- B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
+ B(GetIterator), R(3), U8(2), U8(4),
B(Star), R(2),
B(LdaNamedProperty), R(2), U8(2), U8(6),
B(Star), R(1),
@@ -419,7 +403,7 @@ constant pool: [
Smi [9],
]
handlers: [
- [43, 96, 104],
- [128, 161, 163],
+ [31, 84, 92],
+ [116, 149, 151],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
index f50891172e..c643232d4b 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
@@ -15,14 +15,10 @@ snippet: "
"
frame size: 15
parameter count: 2
-bytecode array length: 167
+bytecode array length: 155
bytecodes: [
/* 10 E> */ B(StackCheck),
- /* 34 S> */ B(GetIterator), R(arg0), U8(0),
- B(Star), R(7),
- B(CallProperty0), R(7), R(arg0), U8(2),
- B(JumpIfJSReceiver), U8(7),
- B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
+ /* 34 S> */ B(GetIterator), R(arg0), U8(0), U8(2),
B(Star), R(5),
B(LdaNamedProperty), R(5), U8(0), U8(4),
B(Star), R(4),
@@ -100,8 +96,8 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
]
handlers: [
- [31, 77, 85],
- [109, 142, 144],
+ [19, 65, 73],
+ [97, 130, 132],
]
---
@@ -113,7 +109,7 @@ snippet: "
"
frame size: 20
parameter count: 2
-bytecode array length: 251
+bytecode array length: 239
bytecodes: [
B(CreateFunctionContext), U8(0), U8(4),
B(PushContext), R(2),
@@ -132,11 +128,7 @@ bytecodes: [
B(StaCurrentContextSlot), U8(4),
/* 34 S> */ B(LdaContextSlot), R(3), U8(4), U8(0),
B(Star), R(6),
- B(GetIterator), R(6), U8(0),
- B(Star), R(7),
- B(CallProperty0), R(7), R(6), U8(2),
- B(JumpIfJSReceiver), U8(7),
- B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
+ B(GetIterator), R(6), U8(0), U8(2),
B(Star), R(5),
B(LdaNamedProperty), R(5), U8(2), U8(4),
B(Star), R(4),
@@ -241,8 +233,8 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
]
handlers: [
- [64, 159, 167],
- [191, 224, 226],
+ [52, 147, 155],
+ [179, 212, 214],
]
---
@@ -254,14 +246,10 @@ snippet: "
"
frame size: 14
parameter count: 2
-bytecode array length: 184
+bytecode array length: 172
bytecodes: [
/* 10 E> */ B(StackCheck),
- /* 34 S> */ B(GetIterator), R(arg0), U8(0),
- B(Star), R(5),
- B(CallProperty0), R(5), R(arg0), U8(2),
- B(JumpIfJSReceiver), U8(7),
- B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
+ /* 34 S> */ B(GetIterator), R(arg0), U8(0), U8(2),
B(Star), R(3),
B(LdaNamedProperty), R(3), U8(0), U8(4),
B(Star), R(2),
@@ -349,8 +337,8 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
]
handlers: [
- [31, 94, 102],
- [126, 159, 161],
+ [19, 82, 90],
+ [114, 147, 149],
]
---
@@ -362,14 +350,10 @@ snippet: "
"
frame size: 17
parameter count: 2
-bytecode array length: 178
+bytecode array length: 166
bytecodes: [
/* 10 E> */ B(StackCheck),
- /* 41 S> */ B(GetIterator), R(arg0), U8(0),
- B(Star), R(9),
- B(CallProperty0), R(9), R(arg0), U8(2),
- B(JumpIfJSReceiver), U8(7),
- B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
+ /* 41 S> */ B(GetIterator), R(arg0), U8(0), U8(2),
B(Star), R(7),
B(LdaNamedProperty), R(7), U8(0), U8(4),
B(Star), R(6),
@@ -453,8 +437,8 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
]
handlers: [
- [31, 88, 96],
- [120, 153, 155],
+ [19, 76, 84],
+ [108, 141, 143],
]
---
@@ -466,7 +450,7 @@ snippet: "
"
frame size: 16
parameter count: 2
-bytecode array length: 208
+bytecode array length: 196
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
B(Mov), R(closure), R(5),
@@ -483,11 +467,7 @@ bytecodes: [
/* 11 E> */ B(Throw),
B(Ldar), R(5),
/* 55 S> */ B(Return),
- /* 35 S> */ B(GetIterator), R(arg0), U8(0),
- B(Star), R(8),
- B(CallProperty0), R(8), R(arg0), U8(2),
- B(JumpIfJSReceiver), U8(7),
- B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
+ /* 35 S> */ B(GetIterator), R(arg0), U8(0), U8(2),
B(Star), R(6),
B(LdaNamedProperty), R(6), U8(3), U8(4),
B(Star), R(5),
@@ -568,8 +548,8 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
]
handlers: [
- [72, 118, 126],
- [150, 183, 185],
+ [60, 106, 114],
+ [138, 171, 173],
]
---
@@ -581,7 +561,7 @@ snippet: "
"
frame size: 15
parameter count: 2
-bytecode array length: 252
+bytecode array length: 240
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(4),
@@ -598,11 +578,7 @@ bytecodes: [
/* 11 E> */ B(Throw),
B(Ldar), R(4),
/* 49 S> */ B(Return),
- /* 35 S> */ B(GetIterator), R(arg0), U8(0),
- B(Star), R(7),
- B(CallProperty0), R(7), R(arg0), U8(2),
- B(JumpIfJSReceiver), U8(7),
- B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
+ /* 35 S> */ B(GetIterator), R(arg0), U8(0), U8(2),
B(Star), R(5),
B(LdaNamedProperty), R(5), U8(4), U8(4),
B(Star), R(4),
@@ -690,7 +666,7 @@ bytecodes: [
]
constant pool: [
Smi [22],
- Smi [125],
+ Smi [113],
Smi [10],
Smi [7],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
@@ -704,8 +680,8 @@ constant pool: [
Smi [9],
]
handlers: [
- [72, 156, 164],
- [188, 221, 223],
+ [60, 144, 152],
+ [176, 209, 211],
]
---
@@ -717,7 +693,7 @@ snippet: "
"
frame size: 17
parameter count: 2
-bytecode array length: 222
+bytecode array length: 210
bytecodes: [
B(Mov), R(closure), R(5),
B(Mov), R(this), R(6),
@@ -725,11 +701,7 @@ bytecodes: [
B(Star), R(0),
/* 16 E> */ B(StackCheck),
B(Mov), R(context), R(5),
- /* 40 S> */ B(GetIterator), R(arg0), U8(0),
- B(Star), R(9),
- B(CallProperty0), R(9), R(arg0), U8(2),
- B(JumpIfJSReceiver), U8(7),
- B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
+ /* 40 S> */ B(GetIterator), R(arg0), U8(0), U8(2),
B(Star), R(7),
B(LdaNamedProperty), R(7), U8(0), U8(4),
B(Star), R(6),
@@ -827,9 +799,9 @@ constant pool: [
SCOPE_INFO_TYPE,
]
handlers: [
- [16, 194, 194],
- [46, 92, 100],
- [124, 157, 159],
+ [16, 182, 182],
+ [34, 80, 88],
+ [112, 145, 147],
]
---
@@ -841,7 +813,7 @@ snippet: "
"
frame size: 16
parameter count: 2
-bytecode array length: 258
+bytecode array length: 246
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
B(Mov), R(closure), R(4),
@@ -850,11 +822,7 @@ bytecodes: [
B(Star), R(0),
/* 16 E> */ B(StackCheck),
B(Mov), R(context), R(4),
- /* 40 S> */ B(GetIterator), R(arg0), U8(0),
- B(Star), R(8),
- B(CallProperty0), R(8), R(arg0), U8(2),
- B(JumpIfJSReceiver), U8(7),
- B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
+ /* 40 S> */ B(GetIterator), R(arg0), U8(0), U8(2),
B(Star), R(6),
B(LdaNamedProperty), R(6), U8(1), U8(4),
B(Star), R(5),
@@ -956,7 +924,7 @@ bytecodes: [
/* 54 S> */ B(Return),
]
constant pool: [
- Smi [103],
+ Smi [91],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
@@ -965,8 +933,8 @@ constant pool: [
SCOPE_INFO_TYPE,
]
handlers: [
- [20, 230, 230],
- [50, 128, 136],
- [160, 193, 195],
+ [20, 218, 218],
+ [38, 116, 124],
+ [148, 181, 183],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
index 157b58d81d..ca3ef0bef3 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
@@ -100,7 +100,7 @@ snippet: "
"
frame size: 15
parameter count: 1
-bytecode array length: 258
+bytecode array length: 246
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(4),
@@ -119,11 +119,7 @@ bytecodes: [
/* 44 S> */ B(Return),
/* 30 S> */ B(CreateArrayLiteral), U8(4), U8(0), U8(37),
B(Star), R(6),
- B(GetIterator), R(6), U8(1),
- B(Star), R(7),
- B(CallProperty0), R(7), R(6), U8(3),
- B(JumpIfJSReceiver), U8(7),
- B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
+ B(GetIterator), R(6), U8(1), U8(3),
B(Star), R(5),
B(LdaNamedProperty), R(5), U8(5), U8(5),
B(Star), R(4),
@@ -211,7 +207,7 @@ bytecodes: [
]
constant pool: [
Smi [22],
- Smi [131],
+ Smi [119],
Smi [10],
Smi [7],
ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
@@ -226,8 +222,8 @@ constant pool: [
Smi [9],
]
handlers: [
- [78, 162, 170],
- [194, 227, 229],
+ [66, 150, 158],
+ [182, 215, 217],
]
---
@@ -236,9 +232,9 @@ snippet: "
function* f() { yield* g() }
f();
"
-frame size: 8
+frame size: 7
parameter count: 1
-bytecode array length: 210
+bytecode array length: 198
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(1),
@@ -259,11 +255,7 @@ bytecodes: [
B(Star), R(5),
/* 50 E> */ B(CallUndefinedReceiver0), R(5), U8(2),
B(Star), R(6),
- B(GetIterator), R(6), U8(4),
- B(Star), R(7),
- B(CallProperty0), R(7), R(6), U8(6),
- B(JumpIfJSReceiver), U8(7),
- B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
+ B(GetIterator), R(6), U8(4), U8(6),
B(Star), R(3),
B(LdaNamedProperty), R(3), U8(5), U8(8),
B(Star), R(5),
@@ -320,7 +312,7 @@ bytecodes: [
]
constant pool: [
Smi [22],
- Smi [178],
+ Smi [166],
Smi [10],
Smi [7],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["g"],
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden
index dce8d7ac8c..c29b74c0e2 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden
@@ -10,29 +10,28 @@ snippet: "
class A { constructor(...args) { this.args = args; } }
new A(...[1, 2, 3]);
"
-frame size: 7
+frame size: 6
parameter count: 1
-bytecode array length: 51
+bytecode array length: 48
bytecodes: [
/* 30 E> */ B(StackCheck),
B(CreateBlockContext), U8(0),
- B(PushContext), R(2),
+ B(PushContext), R(1),
B(LdaTheHole),
- B(Star), R(6),
+ B(Star), R(5),
B(CreateClosure), U8(2), U8(0), U8(2),
- B(Star), R(3),
+ B(Star), R(2),
B(LdaConstant), U8(1),
- B(Star), R(4),
- B(Mov), R(3), R(5),
- B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(3),
- B(Star), R(4),
- B(Mov), R(5), R(1),
- B(PopContext), R(2),
- B(Mov), R(1), R(0),
- /* 89 S> */ B(CreateArrayLiteral), U8(3), U8(0), U8(37),
B(Star), R(3),
+ B(Mov), R(2), R(4),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(3),
+ B(Star), R(3),
+ B(PopContext), R(1),
+ B(Mov), R(4), R(0),
+ /* 89 S> */ B(CreateArrayLiteral), U8(3), U8(0), U8(37),
+ B(Star), R(2),
B(Ldar), R(0),
- /* 89 E> */ B(ConstructWithSpread), R(0), R(3), U8(1), U8(1),
+ /* 89 E> */ B(ConstructWithSpread), R(0), R(2), U8(1), U8(1),
B(LdaUndefined),
/* 110 S> */ B(Return),
]
@@ -50,31 +49,30 @@ snippet: "
class A { constructor(...args) { this.args = args; } }
new A(0, ...[1, 2, 3]);
"
-frame size: 7
+frame size: 6
parameter count: 1
-bytecode array length: 54
+bytecode array length: 51
bytecodes: [
/* 30 E> */ B(StackCheck),
B(CreateBlockContext), U8(0),
- B(PushContext), R(2),
+ B(PushContext), R(1),
B(LdaTheHole),
- B(Star), R(6),
+ B(Star), R(5),
B(CreateClosure), U8(2), U8(0), U8(2),
- B(Star), R(3),
+ B(Star), R(2),
B(LdaConstant), U8(1),
- B(Star), R(4),
- B(Mov), R(3), R(5),
- B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(3),
- B(Star), R(4),
- B(Mov), R(5), R(1),
- B(PopContext), R(2),
- B(Mov), R(1), R(0),
- /* 89 S> */ B(LdaZero),
B(Star), R(3),
+ B(Mov), R(2), R(4),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(3),
+ B(Star), R(3),
+ B(PopContext), R(1),
+ B(Mov), R(4), R(0),
+ /* 89 S> */ B(LdaZero),
+ B(Star), R(2),
B(CreateArrayLiteral), U8(3), U8(0), U8(37),
- B(Star), R(4),
+ B(Star), R(3),
B(Ldar), R(0),
- /* 89 E> */ B(ConstructWithSpread), R(0), R(3), U8(2), U8(1),
+ /* 89 E> */ B(ConstructWithSpread), R(0), R(2), U8(2), U8(1),
B(LdaUndefined),
/* 113 S> */ B(Return),
]
@@ -92,56 +90,51 @@ snippet: "
class A { constructor(...args) { this.args = args; } }
new A(0, ...[1, 2, 3], 4);
"
-frame size: 9
+frame size: 7
parameter count: 1
-bytecode array length: 130
+bytecode array length: 115
bytecodes: [
/* 30 E> */ B(StackCheck),
B(CreateBlockContext), U8(0),
- B(PushContext), R(2),
+ B(PushContext), R(1),
B(LdaTheHole),
- B(Star), R(6),
+ B(Star), R(5),
B(CreateClosure), U8(2), U8(0), U8(2),
- B(Star), R(3),
+ B(Star), R(2),
B(LdaConstant), U8(1),
- B(Star), R(4),
- B(Mov), R(3), R(5),
- B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(3),
- B(Star), R(4),
- B(Mov), R(5), R(1),
- B(PopContext), R(2),
- B(Mov), R(1), R(0),
+ B(Star), R(3),
+ B(Mov), R(2), R(4),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(3),
+ B(Star), R(3),
+ B(PopContext), R(1),
+ B(Mov), R(4), R(0),
/* 89 S> */ B(CreateArrayLiteral), U8(3), U8(0), U8(37),
- B(Star), R(4),
- B(LdaConstant), U8(4),
B(Star), R(3),
+ B(LdaConstant), U8(4),
+ B(Star), R(2),
/* 101 S> */ B(CreateArrayLiteral), U8(5), U8(1), U8(37),
- B(Star), R(7),
- B(GetIterator), R(7), U8(2),
- B(Star), R(8),
- B(CallProperty0), R(8), R(7), U8(4),
- B(Mov), R(5), R(2),
- B(JumpIfJSReceiver), U8(7),
- B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star), R(6),
- B(LdaNamedProperty), R(6), U8(6), U8(6),
+ B(GetIterator), R(6), U8(2), U8(4),
B(Star), R(5),
- B(CallProperty0), R(5), R(6), U8(15),
- B(Star), R(7),
+ B(LdaNamedProperty), R(5), U8(6), U8(6),
+ B(Star), R(4),
+ B(Mov), R(0), R(1),
+ B(CallProperty0), R(4), R(5), U8(15),
+ B(Star), R(6),
B(JumpIfJSReceiver), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(7), U8(1),
- B(LdaNamedProperty), R(7), U8(7), U8(17),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
+ B(LdaNamedProperty), R(6), U8(7), U8(17),
B(JumpIfToBooleanTrue), U8(19),
- B(LdaNamedProperty), R(7), U8(8), U8(8),
- B(StaInArrayLiteral), R(4), R(3), U8(13),
- B(Ldar), R(3),
+ B(LdaNamedProperty), R(6), U8(8), U8(8),
+ B(StaInArrayLiteral), R(3), R(2), U8(13),
+ B(Ldar), R(2),
B(Inc), U8(12),
- B(Star), R(3),
+ B(Star), R(2),
B(JumpLoop), U8(33), I8(0),
B(LdaSmi), I8(4),
- B(StaInArrayLiteral), R(4), R(3), U8(13),
- B(Mov), R(4), R(3),
- B(CallJSRuntime), U8(%reflect_construct), R(2), U8(2),
+ B(StaInArrayLiteral), R(3), R(2), U8(13),
+ B(Mov), R(3), R(2),
+ B(CallJSRuntime), U8(%reflect_construct), R(1), U8(2),
B(LdaUndefined),
/* 116 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorDeclaration.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorDeclaration.golden
index aceee552b5..2c0af93787 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorDeclaration.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorDeclaration.golden
@@ -15,45 +15,44 @@ snippet: "
}
}
"
-frame size: 8
+frame size: 7
parameter count: 1
-bytecode array length: 68
+bytecode array length: 65
bytecodes: [
/* 30 E> */ B(StackCheck),
B(CreateBlockContext), U8(0),
- B(PushContext), R(2),
+ B(PushContext), R(1),
+ B(LdaConstant), U8(2),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(3), U8(1),
+ B(StaCurrentContextSlot), U8(5),
B(LdaTheHole),
B(Star), R(6),
- B(CreateClosure), U8(2), U8(0), U8(2),
- B(Star), R(3),
+ B(CreateClosure), U8(3), U8(0), U8(2),
+ B(Star), R(2),
B(LdaConstant), U8(1),
B(Star), R(4),
- B(Mov), R(3), R(5),
+ B(Mov), R(2), R(5),
B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(3),
B(Star), R(4),
- B(Mov), R(5), R(1),
- B(LdaConstant), U8(3),
- B(Star), R(5),
- B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(5), U8(1),
- B(StaCurrentContextSlot), U8(5),
B(CreateClosure), U8(4), U8(1), U8(2),
- B(Star), R(6),
+ B(Star), R(5),
B(CreateClosure), U8(5), U8(2), U8(2),
- B(Star), R(7),
- B(CallRuntime), U16(Runtime::kCreatePrivateAccessors), R(6), U8(2),
+ B(Star), R(6),
+ B(CallRuntime), U16(Runtime::kCreatePrivateAccessors), R(5), U8(2),
B(StaCurrentContextSlot), U8(4),
- B(PopContext), R(2),
- B(Mov), R(1), R(0),
+ B(PopContext), R(1),
+ B(Mov), R(2), R(0),
B(LdaUndefined),
/* 101 S> */ B(Return),
]
constant pool: [
SCOPE_INFO_TYPE,
FIXED_ARRAY_TYPE,
- SHARED_FUNCTION_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["A"],
SHARED_FUNCTION_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
]
handlers: [
]
@@ -66,44 +65,43 @@ snippet: "
}
}
"
-frame size: 8
+frame size: 7
parameter count: 1
-bytecode array length: 65
+bytecode array length: 62
bytecodes: [
/* 30 E> */ B(StackCheck),
B(CreateBlockContext), U8(0),
- B(PushContext), R(2),
+ B(PushContext), R(1),
+ B(LdaConstant), U8(2),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(3), U8(1),
+ B(StaCurrentContextSlot), U8(5),
B(LdaTheHole),
B(Star), R(6),
- B(CreateClosure), U8(2), U8(0), U8(2),
- B(Star), R(3),
+ B(CreateClosure), U8(3), U8(0), U8(2),
+ B(Star), R(2),
B(LdaConstant), U8(1),
B(Star), R(4),
- B(Mov), R(3), R(5),
+ B(Mov), R(2), R(5),
B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(3),
B(Star), R(4),
- B(Mov), R(5), R(1),
- B(LdaConstant), U8(3),
- B(Star), R(5),
- B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(5), U8(1),
- B(StaCurrentContextSlot), U8(5),
B(CreateClosure), U8(4), U8(1), U8(2),
- B(Star), R(6),
+ B(Star), R(5),
B(LdaNull),
- B(Star), R(7),
- B(CallRuntime), U16(Runtime::kCreatePrivateAccessors), R(6), U8(2),
+ B(Star), R(6),
+ B(CallRuntime), U16(Runtime::kCreatePrivateAccessors), R(5), U8(2),
B(StaCurrentContextSlot), U8(4),
- B(PopContext), R(2),
- B(Mov), R(1), R(0),
+ B(PopContext), R(1),
+ B(Mov), R(2), R(0),
B(LdaUndefined),
/* 81 S> */ B(Return),
]
constant pool: [
SCOPE_INFO_TYPE,
FIXED_ARRAY_TYPE,
- SHARED_FUNCTION_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["B"],
SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
]
handlers: [
]
@@ -116,44 +114,43 @@ snippet: "
}
}
"
-frame size: 8
+frame size: 7
parameter count: 1
-bytecode array length: 65
+bytecode array length: 62
bytecodes: [
/* 30 E> */ B(StackCheck),
B(CreateBlockContext), U8(0),
- B(PushContext), R(2),
+ B(PushContext), R(1),
+ B(LdaConstant), U8(2),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(3), U8(1),
+ B(StaCurrentContextSlot), U8(5),
B(LdaTheHole),
B(Star), R(6),
- B(CreateClosure), U8(2), U8(0), U8(2),
- B(Star), R(3),
+ B(CreateClosure), U8(3), U8(0), U8(2),
+ B(Star), R(2),
B(LdaConstant), U8(1),
B(Star), R(4),
- B(Mov), R(3), R(5),
+ B(Mov), R(2), R(5),
B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(3),
B(Star), R(4),
- B(Mov), R(5), R(1),
- B(LdaConstant), U8(3),
- B(Star), R(5),
- B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(5), U8(1),
- B(StaCurrentContextSlot), U8(5),
B(LdaNull),
- B(Star), R(6),
+ B(Star), R(5),
B(CreateClosure), U8(4), U8(1), U8(2),
- B(Star), R(7),
- B(CallRuntime), U16(Runtime::kCreatePrivateAccessors), R(6), U8(2),
+ B(Star), R(6),
+ B(CallRuntime), U16(Runtime::kCreatePrivateAccessors), R(5), U8(2),
B(StaCurrentContextSlot), U8(4),
- B(PopContext), R(2),
- B(Mov), R(1), R(0),
+ B(PopContext), R(1),
+ B(Mov), R(2), R(0),
B(LdaUndefined),
/* 74 S> */ B(Return),
]
constant pool: [
SCOPE_INFO_TYPE,
FIXED_ARRAY_TYPE,
- SHARED_FUNCTION_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["C"],
SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
]
handlers: [
]
@@ -172,74 +169,72 @@ snippet: "
}
}
"
-frame size: 10
+frame size: 8
parameter count: 1
-bytecode array length: 133
+bytecode array length: 127
bytecodes: [
/* 30 E> */ B(StackCheck),
B(CreateBlockContext), U8(0),
- B(PushContext), R(4),
+ B(PushContext), R(2),
+ B(LdaConstant), U8(2),
+ B(Star), R(4),
+ B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(4), U8(1),
+ B(StaCurrentContextSlot), U8(5),
B(LdaTheHole),
- B(Star), R(8),
- B(CreateClosure), U8(2), U8(0), U8(2),
- B(Star), R(5),
- B(LdaConstant), U8(1),
- B(Star), R(6),
- B(Mov), R(5), R(7),
- B(CallRuntime), U16(Runtime::kDefineClass), R(6), U8(3),
- B(Star), R(6),
- B(Mov), R(7), R(3),
- B(LdaConstant), U8(3),
B(Star), R(7),
- B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(7), U8(1),
- B(StaCurrentContextSlot), U8(5),
+ B(CreateClosure), U8(3), U8(0), U8(2),
+ B(Star), R(3),
+ B(LdaConstant), U8(1),
+ B(Star), R(5),
+ B(Mov), R(3), R(6),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(3),
+ B(Star), R(5),
B(CreateClosure), U8(4), U8(1), U8(2),
- B(Star), R(8),
+ B(Star), R(6),
B(CreateClosure), U8(5), U8(2), U8(2),
- B(Star), R(9),
- B(CallRuntime), U16(Runtime::kCreatePrivateAccessors), R(8), U8(2),
+ B(Star), R(7),
+ B(CallRuntime), U16(Runtime::kCreatePrivateAccessors), R(6), U8(2),
B(StaCurrentContextSlot), U8(4),
- B(PopContext), R(4),
+ B(PopContext), R(2),
B(Mov), R(3), R(0),
/* 38 E> */ B(CreateBlockContext), U8(6),
- B(PushContext), R(4),
- /* 118 E> */ B(CreateClosure), U8(8), U8(3), U8(2),
- B(Star), R(5),
- B(LdaConstant), U8(7),
- B(Star), R(6),
- B(Mov), R(5), R(7),
- B(Mov), R(3), R(8),
- B(CallRuntime), U16(Runtime::kDefineClass), R(6), U8(3),
- B(Star), R(6),
- B(Mov), R(7), R(2),
- B(LdaConstant), U8(9),
- B(Star), R(7),
- B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(7), U8(1),
+ B(PushContext), R(2),
+ B(LdaConstant), U8(8),
+ B(Star), R(4),
+ B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(4), U8(1),
B(StaCurrentContextSlot), U8(5),
+ /* 118 E> */ B(CreateClosure), U8(9), U8(3), U8(2),
+ B(Star), R(3),
+ B(LdaConstant), U8(7),
+ B(Star), R(5),
+ B(Mov), R(3), R(6),
+ B(Mov), R(0), R(7),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(3),
+ B(Star), R(5),
B(CreateClosure), U8(10), U8(4), U8(2),
- B(Star), R(8),
+ B(Star), R(6),
B(CreateClosure), U8(11), U8(5), U8(2),
- B(Star), R(9),
- B(CallRuntime), U16(Runtime::kCreatePrivateAccessors), R(8), U8(2),
+ B(Star), R(7),
+ B(CallRuntime), U16(Runtime::kCreatePrivateAccessors), R(6), U8(2),
B(StaCurrentContextSlot), U8(4),
- B(PopContext), R(4),
- B(Mov), R(2), R(1),
+ B(PopContext), R(2),
+ B(Mov), R(3), R(1),
B(LdaUndefined),
/* 175 S> */ B(Return),
]
constant pool: [
SCOPE_INFO_TYPE,
FIXED_ARRAY_TYPE,
- SHARED_FUNCTION_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["D"],
SHARED_FUNCTION_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
SCOPE_INFO_TYPE,
FIXED_ARRAY_TYPE,
- SHARED_FUNCTION_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["E"],
SHARED_FUNCTION_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
]
handlers: [
]
@@ -254,52 +249,50 @@ snippet: "
new C();
}
"
-frame size: 10
+frame size: 8
parameter count: 1
-bytecode array length: 119
+bytecode array length: 113
bytecodes: [
/* 30 E> */ B(StackCheck),
B(CreateBlockContext), U8(0),
- B(PushContext), R(4),
+ B(PushContext), R(2),
B(LdaTheHole),
- B(Star), R(8),
+ B(Star), R(6),
B(CreateClosure), U8(2), U8(0), U8(2),
- B(Star), R(5),
+ B(Star), R(3),
B(LdaConstant), U8(1),
- B(Star), R(6),
+ B(Star), R(4),
B(CreateClosure), U8(3), U8(1), U8(2),
- B(Star), R(9),
- B(Mov), R(5), R(7),
- B(CallRuntime), U16(Runtime::kDefineClass), R(6), U8(4),
- B(Star), R(6),
- B(Mov), R(7), R(3),
- B(PopContext), R(4),
- B(Mov), R(3), R(0),
- /* 38 E> */ B(CreateBlockContext), U8(4),
- B(PushContext), R(4),
- /* 77 E> */ B(CreateClosure), U8(6), U8(2), U8(2),
- B(Star), R(5),
- B(LdaConstant), U8(5),
- B(Star), R(6),
- B(Mov), R(5), R(7),
- B(Mov), R(3), R(8),
- B(CallRuntime), U16(Runtime::kDefineClass), R(6), U8(3),
- B(Star), R(6),
- B(Mov), R(7), R(2),
- B(LdaConstant), U8(7),
B(Star), R(7),
- B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(7), U8(1),
+ B(Mov), R(3), R(5),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(4),
+ B(Star), R(4),
+ B(PopContext), R(2),
+ B(Mov), R(5), R(0),
+ /* 38 E> */ B(CreateBlockContext), U8(4),
+ B(PushContext), R(2),
+ B(LdaConstant), U8(6),
+ B(Star), R(4),
+ B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(4), U8(1),
B(StaCurrentContextSlot), U8(5),
+ /* 77 E> */ B(CreateClosure), U8(7), U8(2), U8(2),
+ B(Star), R(3),
+ B(LdaConstant), U8(5),
+ B(Star), R(5),
+ B(Mov), R(3), R(6),
+ B(Mov), R(0), R(7),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(3),
+ B(Star), R(5),
B(CreateClosure), U8(8), U8(3), U8(2),
- B(Star), R(8),
- B(Ldar), R(6),
- B(StaNamedProperty), R(8), U8(9), U8(0),
+ B(Star), R(6),
+ B(Ldar), R(5),
+ B(StaNamedProperty), R(6), U8(9), U8(0),
B(LdaNull),
- B(Star), R(9),
- B(CallRuntime), U16(Runtime::kCreatePrivateAccessors), R(8), U8(2),
+ B(Star), R(7),
+ B(CallRuntime), U16(Runtime::kCreatePrivateAccessors), R(6), U8(2),
B(StaCurrentContextSlot), U8(4),
- B(PopContext), R(4),
- B(Mov), R(2), R(1),
+ B(PopContext), R(2),
+ B(Mov), R(3), R(1),
/* 122 S> */ B(Ldar), R(1),
/* 122 E> */ B(Construct), R(1), R(0), U8(0), U8(2),
B(LdaUndefined),
@@ -312,9 +305,9 @@ constant pool: [
SHARED_FUNCTION_INFO_TYPE,
SCOPE_INFO_TYPE,
FIXED_ARRAY_TYPE,
- SHARED_FUNCTION_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["C"],
SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
SYMBOL_TYPE,
]
handlers: [
@@ -330,52 +323,50 @@ snippet: "
new C();
}
"
-frame size: 10
+frame size: 8
parameter count: 1
-bytecode array length: 119
+bytecode array length: 113
bytecodes: [
/* 30 E> */ B(StackCheck),
B(CreateBlockContext), U8(0),
- B(PushContext), R(4),
+ B(PushContext), R(2),
B(LdaTheHole),
- B(Star), R(8),
+ B(Star), R(6),
B(CreateClosure), U8(2), U8(0), U8(2),
- B(Star), R(5),
+ B(Star), R(3),
B(LdaConstant), U8(1),
- B(Star), R(6),
+ B(Star), R(4),
B(CreateClosure), U8(3), U8(1), U8(2),
- B(Star), R(9),
- B(Mov), R(5), R(7),
- B(CallRuntime), U16(Runtime::kDefineClass), R(6), U8(4),
- B(Star), R(6),
- B(Mov), R(7), R(3),
- B(PopContext), R(4),
- B(Mov), R(3), R(0),
- /* 38 E> */ B(CreateBlockContext), U8(4),
- B(PushContext), R(4),
- /* 80 E> */ B(CreateClosure), U8(6), U8(2), U8(2),
- B(Star), R(5),
- B(LdaConstant), U8(5),
- B(Star), R(6),
- B(Mov), R(5), R(7),
- B(Mov), R(3), R(8),
- B(CallRuntime), U16(Runtime::kDefineClass), R(6), U8(3),
- B(Star), R(6),
- B(Mov), R(7), R(2),
- B(LdaConstant), U8(7),
B(Star), R(7),
- B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(7), U8(1),
+ B(Mov), R(3), R(5),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(4),
+ B(Star), R(4),
+ B(PopContext), R(2),
+ B(Mov), R(5), R(0),
+ /* 38 E> */ B(CreateBlockContext), U8(4),
+ B(PushContext), R(2),
+ B(LdaConstant), U8(6),
+ B(Star), R(4),
+ B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(4), U8(1),
B(StaCurrentContextSlot), U8(5),
+ /* 80 E> */ B(CreateClosure), U8(7), U8(2), U8(2),
+ B(Star), R(3),
+ B(LdaConstant), U8(5),
+ B(Star), R(5),
+ B(Mov), R(3), R(6),
+ B(Mov), R(0), R(7),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(3),
+ B(Star), R(5),
B(LdaNull),
- B(Star), R(8),
+ B(Star), R(6),
B(CreateClosure), U8(8), U8(3), U8(2),
- B(Star), R(9),
- B(Ldar), R(6),
- B(StaNamedProperty), R(9), U8(9), U8(0),
- B(CallRuntime), U16(Runtime::kCreatePrivateAccessors), R(8), U8(2),
+ B(Star), R(7),
+ B(Ldar), R(5),
+ B(StaNamedProperty), R(7), U8(9), U8(0),
+ B(CallRuntime), U16(Runtime::kCreatePrivateAccessors), R(6), U8(2),
B(StaCurrentContextSlot), U8(4),
- B(PopContext), R(4),
- B(Mov), R(2), R(1),
+ B(PopContext), R(2),
+ B(Mov), R(3), R(1),
/* 126 S> */ B(Ldar), R(1),
/* 126 E> */ B(Construct), R(1), R(0), U8(0), U8(2),
B(LdaUndefined),
@@ -388,9 +379,9 @@ constant pool: [
SHARED_FUNCTION_INFO_TYPE,
SCOPE_INFO_TYPE,
FIXED_ARRAY_TYPE,
- SHARED_FUNCTION_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["C"],
SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
SYMBOL_TYPE,
]
handlers: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFields.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFields.golden
index dbe688f814..62603a93f8 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFields.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFields.golden
@@ -22,70 +22,68 @@ snippet: "
new B;
}
"
-frame size: 10
+frame size: 7
parameter count: 1
-bytecode array length: 137
+bytecode array length: 131
bytecodes: [
/* 30 E> */ B(StackCheck),
B(CreateBlockContext), U8(0),
- B(PushContext), R(4),
- B(LdaTheHole),
- B(Star), R(8),
- B(CreateClosure), U8(2), U8(0), U8(2),
- B(Star), R(5),
- B(LdaConstant), U8(1),
- B(Star), R(6),
- B(LdaConstant), U8(3),
- B(Star), R(9),
- B(LdaConstant), U8(3),
- B(Star), R(9),
- B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(9), U8(1),
+ B(PushContext), R(2),
+ B(LdaConstant), U8(2),
+ B(Star), R(4),
+ B(LdaConstant), U8(2),
+ B(Star), R(4),
+ B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(4), U8(1),
B(StaCurrentContextSlot), U8(4),
- B(Mov), R(5), R(7),
- B(CallRuntime), U16(Runtime::kDefineClass), R(6), U8(3),
+ B(LdaTheHole),
B(Star), R(6),
- B(Mov), R(7), R(3),
+ B(CreateClosure), U8(3), U8(0), U8(2),
+ B(Star), R(3),
+ B(LdaConstant), U8(1),
+ B(Star), R(4),
+ B(Mov), R(3), R(5),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(3),
+ B(Star), R(4),
B(CreateClosure), U8(4), U8(1), U8(2),
- B(Star), R(7),
- B(StaNamedProperty), R(5), U8(5), U8(0),
- B(PopContext), R(4),
+ B(Star), R(5),
+ B(StaNamedProperty), R(3), U8(5), U8(0),
+ B(PopContext), R(2),
B(Mov), R(3), R(0),
/* 38 E> */ B(CreateBlockContext), U8(6),
- B(PushContext), R(4),
+ B(PushContext), R(2),
+ B(LdaConstant), U8(2),
+ B(Star), R(4),
+ B(LdaConstant), U8(2),
+ B(Star), R(4),
+ B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(4), U8(1),
+ B(StaCurrentContextSlot), U8(4),
B(LdaTheHole),
- B(Star), R(8),
+ B(Star), R(6),
B(CreateClosure), U8(8), U8(2), U8(2),
- B(Star), R(5),
+ B(Star), R(3),
B(LdaConstant), U8(7),
- B(Star), R(6),
- B(LdaConstant), U8(3),
- B(Star), R(9),
- B(LdaConstant), U8(3),
- B(Star), R(9),
- B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(9), U8(1),
- B(StaCurrentContextSlot), U8(4),
- B(Mov), R(5), R(7),
- B(CallRuntime), U16(Runtime::kDefineClass), R(6), U8(3),
- B(Star), R(6),
- B(Mov), R(7), R(2),
+ B(Star), R(4),
+ B(Mov), R(3), R(5),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(3),
+ B(Star), R(4),
B(CreateClosure), U8(9), U8(3), U8(2),
- B(Star), R(7),
- B(StaNamedProperty), R(5), U8(5), U8(2),
- B(PopContext), R(4),
- B(Mov), R(2), R(1),
- /* 136 S> */ B(Ldar), R(3),
- /* 136 E> */ B(Construct), R(3), R(0), U8(0), U8(4),
- /* 145 S> */ B(Ldar), R(2),
- /* 145 E> */ B(Construct), R(2), R(0), U8(0), U8(6),
+ B(Star), R(5),
+ B(StaNamedProperty), R(3), U8(5), U8(2),
+ B(PopContext), R(2),
+ B(Mov), R(3), R(1),
+ /* 136 S> */ B(Ldar), R(0),
+ /* 136 E> */ B(Construct), R(0), R(0), U8(0), U8(4),
+ /* 145 S> */ B(Ldar), R(1),
+ /* 145 E> */ B(Construct), R(1), R(0), U8(0), U8(6),
B(LdaUndefined),
/* 154 S> */ B(Return),
]
constant pool: [
SCOPE_INFO_TYPE,
FIXED_ARRAY_TYPE,
- SHARED_FUNCTION_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["#a"],
SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
SYMBOL_TYPE,
SCOPE_INFO_TYPE,
FIXED_ARRAY_TYPE,
@@ -130,129 +128,126 @@ snippet: "
new C;
};
"
-frame size: 15
+frame size: 12
parameter count: 1
-bytecode array length: 277
+bytecode array length: 268
bytecodes: [
/* 30 E> */ B(StackCheck),
B(CreateBlockContext), U8(0),
- B(PushContext), R(6),
- B(LdaTheHole),
- B(Star), R(14),
- B(CreateClosure), U8(3), U8(0), U8(2),
- B(Star), R(11),
+ B(PushContext), R(3),
B(LdaConstant), U8(2),
- B(Star), R(12),
- B(Mov), R(11), R(13),
- B(CallRuntime), U16(Runtime::kDefineClass), R(12), U8(3),
- B(Star), R(12),
- B(CreateClosure), U8(4), U8(1), U8(2),
- B(Star), R(7),
- B(LdaConstant), U8(1),
- B(Star), R(8),
- B(LdaConstant), U8(5),
- B(Star), R(11),
- B(LdaConstant), U8(5),
- B(Star), R(11),
- B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(11), U8(1),
+ B(Star), R(5),
+ B(LdaConstant), U8(2),
+ B(Star), R(5),
+ B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(5), U8(1),
B(StaCurrentContextSlot), U8(4),
- B(Mov), R(7), R(9),
- B(Mov), R(13), R(10),
- B(CallRuntime), U16(Runtime::kDefineClass), R(8), U8(3),
+ B(LdaTheHole),
+ B(Star), R(11),
+ B(CreateClosure), U8(4), U8(0), U8(2),
B(Star), R(8),
- B(Mov), R(9), R(5),
- B(CreateClosure), U8(6), U8(2), U8(2),
+ B(LdaConstant), U8(3),
+ B(Star), R(9),
+ B(Mov), R(8), R(10),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(9), U8(3),
B(Star), R(9),
- B(StaNamedProperty), R(7), U8(7), U8(0),
- B(PopContext), R(6),
- B(Mov), R(5), R(0),
+ B(CreateClosure), U8(5), U8(1), U8(2),
+ B(Star), R(4),
+ B(LdaConstant), U8(1),
+ B(Star), R(5),
+ B(Mov), R(4), R(6),
+ B(Mov), R(10), R(7),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(3),
+ B(Star), R(5),
+ B(CreateClosure), U8(6), U8(2), U8(2),
+ B(Star), R(6),
+ B(StaNamedProperty), R(4), U8(7), U8(0),
+ B(PopContext), R(3),
+ B(Mov), R(4), R(0),
/* 38 E> */ B(CreateBlockContext), U8(8),
- B(PushContext), R(6),
+ B(PushContext), R(3),
+ B(LdaConstant), U8(2),
+ B(Star), R(5),
+ B(LdaConstant), U8(2),
+ B(Star), R(5),
+ B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(5), U8(1),
+ B(StaCurrentContextSlot), U8(4),
+ B(LdaConstant), U8(10),
+ B(Star), R(5),
+ B(LdaConstant), U8(10),
+ B(Star), R(5),
+ B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(5), U8(1),
+ B(StaCurrentContextSlot), U8(5),
B(LdaTheHole),
- B(Star), R(14),
- B(CreateClosure), U8(11), U8(3), U8(2),
B(Star), R(11),
- B(LdaConstant), U8(10),
- B(Star), R(12),
- B(Mov), R(11), R(13),
- B(CallRuntime), U16(Runtime::kDefineClass), R(12), U8(3),
- B(Star), R(12),
- B(CreateClosure), U8(12), U8(4), U8(2),
- B(Star), R(7),
- B(LdaConstant), U8(9),
+ B(CreateClosure), U8(12), U8(3), U8(2),
B(Star), R(8),
- B(LdaConstant), U8(5),
- B(Star), R(11),
- B(LdaConstant), U8(5),
- B(Star), R(11),
- B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(11), U8(1),
- B(StaCurrentContextSlot), U8(4),
- B(LdaConstant), U8(13),
- B(Star), R(11),
- B(LdaConstant), U8(13),
- B(Star), R(11),
- B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(11), U8(1),
- B(StaCurrentContextSlot), U8(5),
+ B(LdaConstant), U8(11),
+ B(Star), R(9),
+ B(Mov), R(8), R(10),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(9), U8(3),
+ B(Star), R(9),
+ B(CreateClosure), U8(13), U8(4), U8(2),
+ B(Star), R(4),
+ B(LdaConstant), U8(9),
+ B(Star), R(5),
B(CreateClosure), U8(14), U8(5), U8(2),
- B(Star), R(11),
- B(CreateClosure), U8(15), U8(6), U8(2),
- B(Star), R(12),
- B(Mov), R(7), R(9),
- B(Mov), R(13), R(10),
- B(CallRuntime), U16(Runtime::kDefineClass), R(8), U8(5),
B(Star), R(8),
- B(Mov), R(9), R(4),
- B(CreateClosure), U8(16), U8(7), U8(2),
+ B(CreateClosure), U8(15), U8(6), U8(2),
B(Star), R(9),
- B(StaNamedProperty), R(7), U8(7), U8(2),
- B(PopContext), R(6),
+ B(Mov), R(4), R(6),
+ B(Mov), R(10), R(7),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(5),
+ B(Star), R(5),
+ B(CreateClosure), U8(16), U8(7), U8(2),
+ B(Star), R(6),
+ B(StaNamedProperty), R(4), U8(7), U8(2),
+ B(PopContext), R(3),
B(Mov), R(4), R(1),
/* 140 E> */ B(CreateBlockContext), U8(17),
- B(PushContext), R(6),
+ B(PushContext), R(3),
+ B(LdaConstant), U8(2),
+ B(Star), R(5),
+ B(LdaConstant), U8(2),
+ B(Star), R(5),
+ B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(5), U8(1),
+ B(StaCurrentContextSlot), U8(4),
/* 356 E> */ B(CreateClosure), U8(19), U8(8), U8(2),
- B(Star), R(7),
+ B(Star), R(4),
B(LdaConstant), U8(18),
- B(Star), R(8),
- B(LdaConstant), U8(5),
- B(Star), R(11),
- B(LdaConstant), U8(5),
- B(Star), R(11),
- B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(11), U8(1),
- B(StaCurrentContextSlot), U8(4),
- B(Mov), R(7), R(9),
- B(Mov), R(4), R(10),
- B(CallRuntime), U16(Runtime::kDefineClass), R(8), U8(3),
- B(Star), R(8),
- B(Mov), R(9), R(3),
+ B(Star), R(5),
+ B(Mov), R(4), R(6),
+ B(Mov), R(1), R(7),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(3),
+ B(Star), R(5),
B(CreateClosure), U8(20), U8(9), U8(2),
- B(Star), R(9),
- B(StaNamedProperty), R(7), U8(7), U8(4),
- B(PopContext), R(6),
- B(Mov), R(3), R(2),
- /* 430 S> */ B(Ldar), R(5),
- /* 430 E> */ B(Construct), R(5), R(0), U8(0), U8(6),
- /* 439 S> */ B(Ldar), R(4),
- /* 439 E> */ B(Construct), R(4), R(0), U8(0), U8(8),
- /* 448 S> */ B(Ldar), R(3),
- /* 448 E> */ B(Construct), R(3), R(0), U8(0), U8(10),
+ B(Star), R(6),
+ B(StaNamedProperty), R(4), U8(7), U8(4),
+ B(PopContext), R(3),
+ B(Mov), R(4), R(2),
+ /* 430 S> */ B(Ldar), R(0),
+ /* 430 E> */ B(Construct), R(0), R(0), U8(0), U8(6),
+ /* 439 S> */ B(Ldar), R(1),
+ /* 439 E> */ B(Construct), R(1), R(0), U8(0), U8(8),
+ /* 448 S> */ B(Ldar), R(2),
+ /* 448 E> */ B(Construct), R(2), R(0), U8(0), U8(10),
B(LdaUndefined),
/* 458 S> */ B(Return),
]
constant pool: [
SCOPE_INFO_TYPE,
FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["#a"],
FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
- ONE_BYTE_INTERNALIZED_STRING_TYPE ["#a"],
SHARED_FUNCTION_INFO_TYPE,
SYMBOL_TYPE,
SCOPE_INFO_TYPE,
FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["#b"],
FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
- ONE_BYTE_INTERNALIZED_STRING_TYPE ["#b"],
SHARED_FUNCTION_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodDeclaration.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodDeclaration.golden
index d1aab34fda..6456245741 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodDeclaration.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodDeclaration.golden
@@ -16,38 +16,37 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 55
+bytecode array length: 52
bytecodes: [
/* 30 E> */ B(StackCheck),
B(CreateBlockContext), U8(0),
- B(PushContext), R(2),
+ B(PushContext), R(1),
+ B(LdaConstant), U8(2),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(3), U8(1),
+ B(StaCurrentContextSlot), U8(5),
B(LdaTheHole),
B(Star), R(6),
- B(CreateClosure), U8(2), U8(0), U8(2),
- B(Star), R(3),
+ B(CreateClosure), U8(3), U8(0), U8(2),
+ B(Star), R(2),
B(LdaConstant), U8(1),
B(Star), R(4),
- B(CreateClosure), U8(3), U8(1), U8(2),
- B(StaCurrentContextSlot), U8(4),
- B(Mov), R(3), R(5),
+ B(Mov), R(2), R(5),
B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(3),
B(Star), R(4),
- B(Mov), R(5), R(1),
- B(LdaConstant), U8(4),
- B(Star), R(5),
- B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(5), U8(1),
- B(StaCurrentContextSlot), U8(5),
- B(PopContext), R(2),
- B(Mov), R(1), R(0),
+ B(CreateClosure), U8(4), U8(1), U8(2),
+ B(StaCurrentContextSlot), U8(4),
+ B(PopContext), R(1),
+ B(Mov), R(5), R(0),
B(LdaUndefined),
/* 77 S> */ B(Return),
]
constant pool: [
SCOPE_INFO_TYPE,
FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["A"],
SHARED_FUNCTION_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
- ONE_BYTE_INTERNALIZED_STRING_TYPE ["A"],
]
handlers: [
]
@@ -63,64 +62,62 @@ snippet: "
}
}
"
-frame size: 9
+frame size: 8
parameter count: 1
-bytecode array length: 107
+bytecode array length: 101
bytecodes: [
/* 30 E> */ B(StackCheck),
B(CreateBlockContext), U8(0),
- B(PushContext), R(4),
+ B(PushContext), R(2),
+ B(LdaConstant), U8(2),
+ B(Star), R(4),
+ B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(4), U8(1),
+ B(StaCurrentContextSlot), U8(5),
B(LdaTheHole),
- B(Star), R(8),
- B(CreateClosure), U8(2), U8(0), U8(2),
- B(Star), R(5),
+ B(Star), R(7),
+ B(CreateClosure), U8(3), U8(0), U8(2),
+ B(Star), R(3),
B(LdaConstant), U8(1),
- B(Star), R(6),
- B(CreateClosure), U8(3), U8(1), U8(2),
+ B(Star), R(5),
+ B(Mov), R(3), R(6),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(3),
+ B(Star), R(5),
+ B(CreateClosure), U8(4), U8(1), U8(2),
B(StaCurrentContextSlot), U8(4),
- B(Mov), R(5), R(7),
- B(CallRuntime), U16(Runtime::kDefineClass), R(6), U8(3),
- B(Star), R(6),
- B(Mov), R(7), R(3),
- B(LdaConstant), U8(4),
- B(Star), R(7),
- B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(7), U8(1),
- B(StaCurrentContextSlot), U8(5),
- B(PopContext), R(4),
- B(Mov), R(3), R(0),
+ B(PopContext), R(2),
+ B(Mov), R(6), R(0),
/* 38 E> */ B(CreateBlockContext), U8(5),
- B(PushContext), R(4),
- /* 93 E> */ B(CreateClosure), U8(7), U8(2), U8(2),
- B(Star), R(5),
+ B(PushContext), R(2),
+ B(LdaConstant), U8(7),
+ B(Star), R(4),
+ B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(4), U8(1),
+ B(StaCurrentContextSlot), U8(5),
+ /* 93 E> */ B(CreateClosure), U8(8), U8(2), U8(2),
+ B(Star), R(3),
B(LdaConstant), U8(6),
- B(Star), R(6),
- B(CreateClosure), U8(8), U8(3), U8(2),
+ B(Star), R(5),
+ B(Mov), R(3), R(6),
+ B(Mov), R(0), R(7),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(3),
+ B(Star), R(5),
+ B(CreateClosure), U8(9), U8(3), U8(2),
B(StaCurrentContextSlot), U8(4),
- B(Mov), R(5), R(7),
- B(Mov), R(3), R(8),
- B(CallRuntime), U16(Runtime::kDefineClass), R(6), U8(3),
- B(Star), R(6),
- B(Mov), R(7), R(2),
- B(LdaConstant), U8(9),
- B(Star), R(7),
- B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(7), U8(1),
- B(StaCurrentContextSlot), U8(5),
- B(PopContext), R(4),
- B(Mov), R(2), R(1),
+ B(PopContext), R(2),
+ B(Mov), R(6), R(1),
B(LdaUndefined),
/* 126 S> */ B(Return),
]
constant pool: [
SCOPE_INFO_TYPE,
FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["D"],
SHARED_FUNCTION_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
- ONE_BYTE_INTERNALIZED_STRING_TYPE ["D"],
SCOPE_INFO_TYPE,
FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["E"],
SHARED_FUNCTION_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
- ONE_BYTE_INTERNALIZED_STRING_TYPE ["E"],
]
handlers: [
]
@@ -134,50 +131,47 @@ snippet: "
}
}
"
-frame size: 10
+frame size: 8
parameter count: 1
-bytecode array length: 106
+bytecode array length: 98
bytecodes: [
/* 30 E> */ B(StackCheck),
B(CreateBlockContext), U8(0),
- B(PushContext), R(4),
+ B(PushContext), R(2),
B(LdaTheHole),
- B(Star), R(8),
+ B(Star), R(6),
B(CreateClosure), U8(2), U8(0), U8(2),
- B(Star), R(5),
+ B(Star), R(3),
B(LdaConstant), U8(1),
- B(Star), R(6),
+ B(Star), R(4),
B(CreateClosure), U8(3), U8(1), U8(2),
- B(Star), R(9),
- B(Mov), R(5), R(7),
- B(CallRuntime), U16(Runtime::kDefineClass), R(6), U8(4),
- B(Star), R(6),
- B(Mov), R(7), R(3),
- B(PopContext), R(4),
- B(Mov), R(3), R(0),
+ B(Star), R(7),
+ B(Mov), R(3), R(5),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(4),
+ B(Star), R(4),
+ B(PopContext), R(2),
+ B(Mov), R(5), R(0),
/* 38 E> */ B(CreateBlockContext), U8(4),
- B(PushContext), R(4),
- /* 77 E> */ B(CreateClosure), U8(6), U8(2), U8(2),
- B(Star), R(5),
+ B(PushContext), R(2),
+ B(LdaConstant), U8(6),
+ B(Star), R(4),
+ B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(4), U8(1),
+ B(StaCurrentContextSlot), U8(5),
+ /* 77 E> */ B(CreateClosure), U8(7), U8(2), U8(2),
+ B(Star), R(3),
B(LdaConstant), U8(5),
- B(Star), R(6),
- B(CreateClosure), U8(7), U8(3), U8(2),
+ B(Star), R(5),
+ B(Mov), R(3), R(6),
+ B(Mov), R(0), R(7),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(3),
+ B(Star), R(5),
+ B(CreateClosure), U8(8), U8(3), U8(2),
B(StaCurrentContextSlot), U8(4),
- B(Mov), R(5), R(7),
- B(Mov), R(3), R(8),
- B(CallRuntime), U16(Runtime::kDefineClass), R(6), U8(3),
B(Star), R(6),
- B(Mov), R(7), R(2),
- B(LdaConstant), U8(8),
- B(Star), R(7),
- B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(7), U8(1),
- B(StaCurrentContextSlot), U8(5),
- B(LdaCurrentContextSlot), U8(4),
- B(Star), R(8),
- B(Ldar), R(6),
- B(StaNamedProperty), R(8), U8(9), U8(0),
- B(PopContext), R(4),
- B(Mov), R(2), R(1),
+ B(Ldar), R(5),
+ B(StaNamedProperty), R(6), U8(9), U8(0),
+ B(PopContext), R(2),
+ B(Mov), R(3), R(1),
B(LdaUndefined),
/* 118 S> */ B(Return),
]
@@ -188,9 +182,9 @@ constant pool: [
SHARED_FUNCTION_INFO_TYPE,
SCOPE_INFO_TYPE,
FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["C"],
SHARED_FUNCTION_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
- ONE_BYTE_INTERNALIZED_STRING_TYPE ["C"],
SYMBOL_TYPE,
]
handlers: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PublicClassFields.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PublicClassFields.golden
index c91e7b06aa..4b893861bf 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PublicClassFields.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PublicClassFields.golden
@@ -21,59 +21,57 @@ snippet: "
new B;
}
"
-frame size: 10
+frame size: 8
parameter count: 1
-bytecode array length: 125
+bytecode array length: 119
bytecodes: [
/* 30 E> */ B(StackCheck),
B(CreateBlockContext), U8(0),
- B(PushContext), R(4),
+ B(PushContext), R(2),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
B(LdaTheHole),
- B(Star), R(8),
+ B(Star), R(6),
B(CreateClosure), U8(2), U8(0), U8(2),
- B(Star), R(5),
+ B(Star), R(3),
B(LdaConstant), U8(1),
- B(Star), R(6),
+ B(Star), R(4),
/* 60 S> */ B(LdaConstant), U8(3),
B(StaCurrentContextSlot), U8(4),
- B(Star), R(9),
- B(Mov), R(5), R(7),
- B(CallRuntime), U16(Runtime::kDefineClass), R(6), U8(4),
- B(Star), R(6),
- B(Mov), R(7), R(3),
- B(CreateClosure), U8(4), U8(1), U8(2),
B(Star), R(7),
- B(StaNamedProperty), R(5), U8(5), U8(0),
- B(PopContext), R(4),
+ B(Mov), R(3), R(5),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(4),
+ B(Star), R(4),
+ B(CreateClosure), U8(4), U8(1), U8(2),
+ B(Star), R(5),
+ B(StaNamedProperty), R(3), U8(5), U8(0),
+ B(PopContext), R(2),
B(Mov), R(3), R(0),
/* 38 E> */ B(CreateBlockContext), U8(6),
- B(PushContext), R(4),
+ B(PushContext), R(2),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
B(LdaTheHole),
- B(Star), R(8),
+ B(Star), R(6),
B(CreateClosure), U8(8), U8(2), U8(2),
- B(Star), R(5),
+ B(Star), R(3),
B(LdaConstant), U8(7),
- B(Star), R(6),
+ B(Star), R(4),
/* 99 S> */ B(LdaConstant), U8(3),
B(StaCurrentContextSlot), U8(4),
- B(Star), R(9),
- B(Mov), R(5), R(7),
- B(CallRuntime), U16(Runtime::kDefineClass), R(6), U8(4),
- B(Star), R(6),
- B(Mov), R(7), R(2),
- B(CreateClosure), U8(9), U8(3), U8(2),
B(Star), R(7),
- B(StaNamedProperty), R(5), U8(5), U8(2),
- B(PopContext), R(4),
- B(Mov), R(2), R(1),
- /* 120 S> */ B(Ldar), R(3),
- /* 120 E> */ B(Construct), R(3), R(0), U8(0), U8(4),
- /* 129 S> */ B(Ldar), R(2),
- /* 129 E> */ B(Construct), R(2), R(0), U8(0), U8(6),
+ B(Mov), R(3), R(5),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(4),
+ B(Star), R(4),
+ B(CreateClosure), U8(9), U8(3), U8(2),
+ B(Star), R(5),
+ B(StaNamedProperty), R(3), U8(5), U8(2),
+ B(PopContext), R(2),
+ B(Mov), R(3), R(1),
+ /* 120 S> */ B(Ldar), R(0),
+ /* 120 E> */ B(Construct), R(0), R(0), U8(0), U8(4),
+ /* 129 S> */ B(Ldar), R(1),
+ /* 129 E> */ B(Construct), R(1), R(0), U8(0), U8(6),
B(LdaUndefined),
/* 138 S> */ B(Return),
]
@@ -122,100 +120,97 @@ snippet: "
new C;
}
"
-frame size: 15
+frame size: 12
parameter count: 1
-bytecode array length: 238
+bytecode array length: 229
bytecodes: [
/* 30 E> */ B(StackCheck),
B(CreateBlockContext), U8(0),
- B(PushContext), R(6),
+ B(PushContext), R(3),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
B(LdaTheHole),
- B(Star), R(14),
- B(CreateClosure), U8(3), U8(0), U8(2),
B(Star), R(11),
+ B(CreateClosure), U8(3), U8(0), U8(2),
+ B(Star), R(8),
B(LdaConstant), U8(2),
- B(Star), R(12),
- B(Mov), R(11), R(13),
- B(CallRuntime), U16(Runtime::kDefineClass), R(12), U8(3),
- B(Star), R(12),
+ B(Star), R(9),
+ B(Mov), R(8), R(10),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(9), U8(3),
+ B(Star), R(9),
B(CreateClosure), U8(4), U8(1), U8(2),
- B(Star), R(7),
+ B(Star), R(4),
B(LdaConstant), U8(1),
- B(Star), R(8),
+ B(Star), R(5),
/* 77 S> */ B(LdaConstant), U8(5),
B(StaCurrentContextSlot), U8(4),
- B(Star), R(11),
- B(Mov), R(7), R(9),
- B(Mov), R(13), R(10),
- B(CallRuntime), U16(Runtime::kDefineClass), R(8), U8(4),
B(Star), R(8),
- B(Mov), R(9), R(5),
+ B(Mov), R(4), R(6),
+ B(Mov), R(10), R(7),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(4),
+ B(Star), R(5),
B(CreateClosure), U8(6), U8(2), U8(2),
- B(Star), R(9),
- B(StaNamedProperty), R(7), U8(7), U8(0),
- B(PopContext), R(6),
- B(Mov), R(5), R(0),
+ B(Star), R(6),
+ B(StaNamedProperty), R(4), U8(7), U8(0),
+ B(PopContext), R(3),
+ B(Mov), R(4), R(0),
/* 38 E> */ B(CreateBlockContext), U8(8),
- B(PushContext), R(6),
+ B(PushContext), R(3),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
B(LdaTheHole),
- B(Star), R(14),
- B(CreateClosure), U8(11), U8(3), U8(2),
B(Star), R(11),
+ B(CreateClosure), U8(11), U8(3), U8(2),
+ B(Star), R(8),
B(LdaConstant), U8(10),
- B(Star), R(12),
- B(Mov), R(11), R(13),
- B(CallRuntime), U16(Runtime::kDefineClass), R(12), U8(3),
- B(Star), R(12),
+ B(Star), R(9),
+ B(Mov), R(8), R(10),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(9), U8(3),
+ B(Star), R(9),
B(CreateClosure), U8(12), U8(4), U8(2),
- B(Star), R(7),
+ B(Star), R(4),
B(LdaConstant), U8(9),
- B(Star), R(8),
+ B(Star), R(5),
/* 133 S> */ B(LdaConstant), U8(5),
B(StaCurrentContextSlot), U8(4),
- B(Star), R(11),
- B(CreateClosure), U8(13), U8(5), U8(2),
- B(Star), R(12),
- B(Mov), R(7), R(9),
- B(Mov), R(13), R(10),
- B(CallRuntime), U16(Runtime::kDefineClass), R(8), U8(5),
B(Star), R(8),
- B(Mov), R(9), R(4),
- B(CreateClosure), U8(14), U8(6), U8(2),
+ B(CreateClosure), U8(13), U8(5), U8(2),
B(Star), R(9),
- B(StaNamedProperty), R(7), U8(7), U8(2),
- B(PopContext), R(6),
+ B(Mov), R(4), R(6),
+ B(Mov), R(10), R(7),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(5),
+ B(Star), R(5),
+ B(CreateClosure), U8(14), U8(6), U8(2),
+ B(Star), R(6),
+ B(StaNamedProperty), R(4), U8(7), U8(2),
+ B(PopContext), R(3),
B(Mov), R(4), R(1),
/* 90 E> */ B(CreateBlockContext), U8(15),
- B(PushContext), R(6),
+ B(PushContext), R(3),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
/* 236 E> */ B(CreateClosure), U8(17), U8(7), U8(2),
- B(Star), R(7),
+ B(Star), R(4),
B(LdaConstant), U8(16),
- B(Star), R(8),
+ B(Star), R(5),
/* 256 S> */ B(LdaConstant), U8(5),
B(StaCurrentContextSlot), U8(4),
- B(Star), R(11),
- B(Mov), R(7), R(9),
- B(Mov), R(4), R(10),
- B(CallRuntime), U16(Runtime::kDefineClass), R(8), U8(4),
B(Star), R(8),
- B(Mov), R(9), R(3),
+ B(Mov), R(4), R(6),
+ B(Mov), R(1), R(7),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(4),
+ B(Star), R(5),
B(CreateClosure), U8(18), U8(8), U8(2),
- B(Star), R(9),
- B(StaNamedProperty), R(7), U8(7), U8(4),
- B(PopContext), R(6),
- B(Mov), R(3), R(2),
- /* 329 S> */ B(Ldar), R(5),
- /* 329 E> */ B(Construct), R(5), R(0), U8(0), U8(6),
- /* 338 S> */ B(Ldar), R(4),
- /* 338 E> */ B(Construct), R(4), R(0), U8(0), U8(8),
- /* 347 S> */ B(Ldar), R(3),
- /* 347 E> */ B(Construct), R(3), R(0), U8(0), U8(10),
+ B(Star), R(6),
+ B(StaNamedProperty), R(4), U8(7), U8(4),
+ B(PopContext), R(3),
+ B(Mov), R(4), R(2),
+ /* 329 S> */ B(Ldar), R(0),
+ /* 329 E> */ B(Construct), R(0), R(0), U8(0), U8(6),
+ /* 338 S> */ B(Ldar), R(1),
+ /* 338 E> */ B(Construct), R(1), R(0), U8(0), U8(8),
+ /* 347 S> */ B(Ldar), R(2),
+ /* 347 E> */ B(Construct), R(2), R(0), U8(0), U8(10),
B(LdaUndefined),
/* 356 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticClassFields.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticClassFields.golden
index f03337e4aa..f47a701358 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticClassFields.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticClassFields.golden
@@ -25,85 +25,83 @@ snippet: "
new B;
}
"
-frame size: 11
+frame size: 9
parameter count: 1
-bytecode array length: 191
+bytecode array length: 185
bytecodes: [
/* 30 E> */ B(StackCheck),
B(CreateBlockContext), U8(0),
- B(PushContext), R(4),
+ B(PushContext), R(2),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(5),
B(LdaTheHole),
- B(Star), R(8),
+ B(Star), R(6),
B(CreateClosure), U8(2), U8(0), U8(2),
- B(Star), R(5),
+ B(Star), R(3),
B(LdaConstant), U8(1),
- B(Star), R(6),
+ B(Star), R(4),
/* 60 S> */ B(LdaConstant), U8(3),
B(StaCurrentContextSlot), U8(4),
- B(Star), R(9),
+ B(Star), R(7),
/* 92 S> */ B(LdaConstant), U8(4),
- B(Star), R(10),
+ B(Star), R(8),
B(LdaConstant), U8(5),
- B(TestEqualStrict), R(10), U8(0),
- B(Mov), R(5), R(7),
+ B(TestEqualStrict), R(8), U8(0),
+ B(Mov), R(3), R(5),
B(JumpIfFalse), U8(7),
B(CallRuntime), U16(Runtime::kThrowStaticPrototypeError), R(0), U8(0),
- B(Ldar), R(10),
+ B(Ldar), R(8),
B(StaCurrentContextSlot), U8(5),
- B(CallRuntime), U16(Runtime::kDefineClass), R(6), U8(5),
- B(Star), R(6),
- B(Mov), R(5), R(3),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(5),
+ B(Star), R(4),
B(CreateClosure), U8(6), U8(1), U8(2),
- B(Star), R(7),
- B(StaNamedProperty), R(5), U8(7), U8(1),
+ B(Star), R(5),
+ B(StaNamedProperty), R(3), U8(7), U8(1),
B(CreateClosure), U8(8), U8(2), U8(2),
- B(Star), R(9),
- B(CallProperty0), R(9), R(3), U8(3),
- B(PopContext), R(4),
+ B(Star), R(7),
+ B(CallProperty0), R(7), R(3), U8(3),
+ B(PopContext), R(2),
B(Mov), R(3), R(0),
/* 38 E> */ B(CreateBlockContext), U8(9),
- B(PushContext), R(4),
+ B(PushContext), R(2),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(5),
B(LdaTheHole),
- B(Star), R(8),
+ B(Star), R(6),
B(CreateClosure), U8(11), U8(3), U8(2),
- B(Star), R(5),
+ B(Star), R(3),
B(LdaConstant), U8(10),
- B(Star), R(6),
+ B(Star), R(4),
/* 131 S> */ B(LdaConstant), U8(3),
B(StaCurrentContextSlot), U8(4),
- B(Star), R(9),
+ B(Star), R(7),
/* 176 S> */ B(LdaConstant), U8(4),
- B(Star), R(10),
+ B(Star), R(8),
B(LdaConstant), U8(5),
- B(TestEqualStrict), R(10), U8(0),
- B(Mov), R(5), R(7),
+ B(TestEqualStrict), R(8), U8(0),
+ B(Mov), R(3), R(5),
B(JumpIfFalse), U8(7),
B(CallRuntime), U16(Runtime::kThrowStaticPrototypeError), R(0), U8(0),
- B(Ldar), R(10),
+ B(Ldar), R(8),
B(StaCurrentContextSlot), U8(5),
- B(CallRuntime), U16(Runtime::kDefineClass), R(6), U8(5),
- B(Star), R(6),
- B(Mov), R(5), R(2),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(5),
+ B(Star), R(4),
B(CreateClosure), U8(12), U8(4), U8(2),
- B(Star), R(7),
- B(StaNamedProperty), R(5), U8(7), U8(5),
+ B(Star), R(5),
+ B(StaNamedProperty), R(3), U8(7), U8(5),
B(CreateClosure), U8(13), U8(5), U8(2),
- B(Star), R(9),
- B(CallProperty0), R(9), R(2), U8(7),
- B(PopContext), R(4),
- B(Mov), R(2), R(1),
+ B(Star), R(7),
+ B(CallProperty0), R(7), R(3), U8(7),
+ B(PopContext), R(2),
+ B(Mov), R(3), R(1),
/* 197 S> */ B(Ldar), R(0),
/* 197 E> */ B(Construct), R(0), R(0), U8(0), U8(9),
- /* 206 S> */ B(Ldar), R(2),
- /* 206 E> */ B(Construct), R(2), R(0), U8(0), U8(11),
+ /* 206 S> */ B(Ldar), R(1),
+ /* 206 E> */ B(Construct), R(1), R(0), U8(0), U8(11),
B(LdaUndefined),
/* 215 S> */ B(Return),
]
@@ -162,141 +160,138 @@ snippet: "
new C;
}
"
-frame size: 15
+frame size: 12
parameter count: 1
-bytecode array length: 343
+bytecode array length: 334
bytecodes: [
/* 30 E> */ B(StackCheck),
B(CreateBlockContext), U8(0),
- B(PushContext), R(6),
+ B(PushContext), R(3),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(5),
B(LdaTheHole),
- B(Star), R(14),
- B(CreateClosure), U8(3), U8(0), U8(2),
B(Star), R(11),
+ B(CreateClosure), U8(3), U8(0), U8(2),
+ B(Star), R(8),
B(LdaConstant), U8(2),
- B(Star), R(12),
- B(Mov), R(11), R(13),
- B(CallRuntime), U16(Runtime::kDefineClass), R(12), U8(3),
- B(Star), R(12),
+ B(Star), R(9),
+ B(Mov), R(8), R(10),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(9), U8(3),
+ B(Star), R(9),
B(CreateClosure), U8(4), U8(1), U8(2),
- B(Star), R(7),
+ B(Star), R(4),
B(LdaConstant), U8(1),
- B(Star), R(8),
+ B(Star), R(5),
/* 77 S> */ B(LdaConstant), U8(5),
B(StaCurrentContextSlot), U8(4),
- B(Star), R(11),
+ B(Star), R(8),
/* 109 S> */ B(LdaConstant), U8(6),
- B(Star), R(12),
+ B(Star), R(9),
B(LdaConstant), U8(7),
- B(TestEqualStrict), R(12), U8(0),
- B(Mov), R(13), R(10),
- B(Mov), R(7), R(9),
+ B(TestEqualStrict), R(9), U8(0),
+ B(Mov), R(10), R(7),
+ B(Mov), R(4), R(6),
B(JumpIfFalse), U8(7),
B(CallRuntime), U16(Runtime::kThrowStaticPrototypeError), R(0), U8(0),
- B(Ldar), R(12),
+ B(Ldar), R(9),
B(StaCurrentContextSlot), U8(5),
- B(CallRuntime), U16(Runtime::kDefineClass), R(8), U8(5),
- B(Star), R(8),
- B(Mov), R(7), R(5),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(5),
+ B(Star), R(5),
B(CreateClosure), U8(8), U8(2), U8(2),
- B(Star), R(9),
- B(StaNamedProperty), R(7), U8(9), U8(1),
+ B(Star), R(6),
+ B(StaNamedProperty), R(4), U8(9), U8(1),
B(CreateClosure), U8(10), U8(3), U8(2),
- B(Star), R(11),
- B(CallProperty0), R(11), R(5), U8(3),
- B(PopContext), R(6),
- B(Mov), R(5), R(0),
+ B(Star), R(8),
+ B(CallProperty0), R(8), R(4), U8(3),
+ B(PopContext), R(3),
+ B(Mov), R(4), R(0),
/* 38 E> */ B(CreateBlockContext), U8(11),
- B(PushContext), R(6),
+ B(PushContext), R(3),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(5),
B(LdaTheHole),
- B(Star), R(14),
- B(CreateClosure), U8(14), U8(4), U8(2),
B(Star), R(11),
+ B(CreateClosure), U8(14), U8(4), U8(2),
+ B(Star), R(8),
B(LdaConstant), U8(13),
- B(Star), R(12),
- B(Mov), R(11), R(13),
- B(CallRuntime), U16(Runtime::kDefineClass), R(12), U8(3),
- B(Star), R(12),
+ B(Star), R(9),
+ B(Mov), R(8), R(10),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(9), U8(3),
+ B(Star), R(9),
B(CreateClosure), U8(15), U8(5), U8(2),
- B(Star), R(7),
+ B(Star), R(4),
B(LdaConstant), U8(12),
- B(Star), R(8),
+ B(Star), R(5),
/* 165 S> */ B(LdaConstant), U8(5),
B(StaCurrentContextSlot), U8(4),
- B(Star), R(11),
+ B(Star), R(8),
/* 210 S> */ B(LdaConstant), U8(6),
- B(Star), R(12),
+ B(Star), R(9),
B(LdaConstant), U8(7),
- B(TestEqualStrict), R(12), U8(0),
- B(Mov), R(7), R(9),
- B(Mov), R(13), R(10),
+ B(TestEqualStrict), R(9), U8(0),
+ B(Mov), R(4), R(6),
+ B(Mov), R(10), R(7),
B(JumpIfFalse), U8(7),
B(CallRuntime), U16(Runtime::kThrowStaticPrototypeError), R(0), U8(0),
- B(Ldar), R(12),
+ B(Ldar), R(9),
B(StaCurrentContextSlot), U8(5),
B(CreateClosure), U8(16), U8(6), U8(2),
- B(Star), R(13),
- B(CallRuntime), U16(Runtime::kDefineClass), R(8), U8(6),
- B(Star), R(8),
- B(Mov), R(7), R(4),
+ B(Star), R(10),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(6),
+ B(Star), R(5),
B(CreateClosure), U8(17), U8(7), U8(2),
- B(Star), R(9),
- B(StaNamedProperty), R(7), U8(9), U8(5),
+ B(Star), R(6),
+ B(StaNamedProperty), R(4), U8(9), U8(5),
B(CreateClosure), U8(18), U8(8), U8(2),
- B(Star), R(11),
- B(CallProperty0), R(11), R(4), U8(7),
- B(PopContext), R(6),
+ B(Star), R(8),
+ B(CallProperty0), R(8), R(4), U8(7),
+ B(PopContext), R(3),
B(Mov), R(4), R(1),
/* 122 E> */ B(CreateBlockContext), U8(19),
- B(PushContext), R(6),
+ B(PushContext), R(3),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(5),
/* 313 E> */ B(CreateClosure), U8(21), U8(9), U8(2),
- B(Star), R(7),
+ B(Star), R(4),
B(LdaConstant), U8(20),
- B(Star), R(8),
+ B(Star), R(5),
/* 333 S> */ B(LdaConstant), U8(5),
B(StaCurrentContextSlot), U8(4),
- B(Star), R(11),
+ B(Star), R(8),
/* 378 S> */ B(LdaConstant), U8(6),
- B(Star), R(12),
+ B(Star), R(9),
B(LdaConstant), U8(7),
- B(TestEqualStrict), R(12), U8(0),
- B(Mov), R(4), R(10),
- B(Mov), R(7), R(9),
+ B(TestEqualStrict), R(9), U8(0),
+ B(Mov), R(4), R(6),
+ B(Mov), R(1), R(7),
B(JumpIfFalse), U8(7),
B(CallRuntime), U16(Runtime::kThrowStaticPrototypeError), R(0), U8(0),
- B(Ldar), R(12),
+ B(Ldar), R(9),
B(StaCurrentContextSlot), U8(5),
- B(CallRuntime), U16(Runtime::kDefineClass), R(8), U8(5),
- B(Star), R(8),
- B(Mov), R(7), R(3),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(5),
+ B(Star), R(5),
B(CreateClosure), U8(22), U8(10), U8(2),
- B(Star), R(9),
- B(StaNamedProperty), R(7), U8(9), U8(9),
+ B(Star), R(6),
+ B(StaNamedProperty), R(4), U8(9), U8(9),
B(CreateClosure), U8(23), U8(11), U8(2),
- B(Star), R(11),
- B(Ldar), R(3),
- B(StaNamedProperty), R(11), U8(24), U8(11),
- B(CallProperty0), R(11), R(3), U8(13),
- B(PopContext), R(6),
- B(Mov), R(3), R(2),
+ B(Star), R(8),
+ B(Ldar), R(4),
+ B(StaNamedProperty), R(8), U8(24), U8(11),
+ B(CallProperty0), R(8), R(4), U8(13),
+ B(PopContext), R(3),
+ B(Mov), R(4), R(2),
/* 456 S> */ B(Ldar), R(0),
/* 456 E> */ B(Construct), R(0), R(0), U8(0), U8(15),
/* 465 S> */ B(Ldar), R(1),
/* 465 E> */ B(Construct), R(1), R(0), U8(0), U8(17),
- /* 474 S> */ B(Ldar), R(3),
- /* 474 E> */ B(Construct), R(3), R(0), U8(0), U8(19),
+ /* 474 S> */ B(Ldar), R(2),
+ /* 474 E> */ B(Construct), R(2), R(0), U8(0), U8(19),
B(LdaUndefined),
/* 483 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden
new file mode 100644
index 0000000000..888fca1cf8
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden
@@ -0,0 +1,290 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+wrap: no
+test function name: test
+private methods: yes
+
+---
+snippet: "
+ class A {
+ static #a() { return 1; }
+ static test() { return this.#a(); }
+ }
+
+ var test = A.test;
+ test();
+"
+frame size: 4
+parameter count: 1
+bytecode array length: 36
+bytecodes: [
+ /* 51 E> */ B(StackCheck),
+ /* 56 S> */ B(LdaCurrentContextSlot), U8(5),
+ B(TestReferenceEqual), R(this),
+ B(Mov), R(this), R(1),
+ B(JumpIfTrue), U8(18),
+ B(Wide), B(LdaSmi), I16(259),
+ B(Star), R(2),
+ B(LdaConstant), U8(0),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(2), U8(2),
+ B(Throw),
+ B(LdaCurrentContextSlot), U8(4),
+ B(Star), R(0),
+ /* 70 E> */ B(CallAnyReceiver), R(0), R(1), U8(1), U8(0),
+ /* 73 S> */ B(Return),
+]
+constant pool: [
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["#a"],
+]
+handlers: [
+]
+
+---
+snippet: "
+ class B {
+ static #b() { return 1; }
+ static test() { this.#b = 1; }
+ }
+
+ var test = B.test;
+ test();
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 17
+bytecodes: [
+ /* 51 E> */ B(StackCheck),
+ /* 56 S> */ B(Wide), B(LdaSmi), I16(261),
+ B(Star), R(0),
+ B(LdaConstant), U8(0),
+ B(Star), R(1),
+ /* 64 E> */ B(CallRuntime), U16(Runtime::kNewTypeError), R(0), U8(2),
+ B(Throw),
+]
+constant pool: [
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["#b"],
+]
+handlers: [
+]
+
+---
+snippet: "
+ class C {
+ static #c() { return 1; }
+ static test() { this.#c++; }
+ }
+
+ var test = C.test;
+ test();
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 17
+bytecodes: [
+ /* 51 E> */ B(StackCheck),
+ /* 56 S> */ B(Wide), B(LdaSmi), I16(261),
+ B(Star), R(0),
+ B(LdaConstant), U8(0),
+ B(Star), R(1),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(0), U8(2),
+ B(Throw),
+]
+constant pool: [
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["#c"],
+]
+handlers: [
+]
+
+---
+snippet: "
+ class D {
+ static get #d() { return 1; }
+ static set #d(val) { }
+
+ static test() {
+ this.#d++;
+ this.#d = 1;
+ return this.#d;
+ }
+ }
+
+ var test = D.test;
+ test();
+"
+frame size: 5
+parameter count: 1
+bytecode array length: 143
+bytecodes: [
+ /* 81 E> */ B(StackCheck),
+ /* 90 S> */ B(LdaCurrentContextSlot), U8(4),
+ B(Star), R(1),
+ B(LdaCurrentContextSlot), U8(5),
+ /* 94 E> */ B(TestReferenceEqual), R(this),
+ B(Mov), R(this), R(0),
+ B(JumpIfTrue), U8(18),
+ B(Wide), B(LdaSmi), I16(259),
+ B(Star), R(2),
+ B(LdaConstant), U8(0),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(2), U8(2),
+ B(Throw),
+ B(CallRuntime), U16(Runtime::kLoadPrivateGetter), R(1), U8(1),
+ B(Star), R(2),
+ B(CallProperty0), R(2), R(0), U8(0),
+ B(Inc), U8(2),
+ B(Star), R(2),
+ /* 97 E> */ B(CallRuntime), U16(Runtime::kLoadPrivateSetter), R(1), U8(1),
+ B(Star), R(3),
+ B(CallProperty1), R(3), R(0), R(2), U8(3),
+ /* 105 S> */ B(LdaSmi), I8(1),
+ B(Star), R(0),
+ B(LdaCurrentContextSlot), U8(4),
+ B(Star), R(2),
+ B(LdaCurrentContextSlot), U8(5),
+ /* 109 E> */ B(TestReferenceEqual), R(this),
+ B(Mov), R(this), R(1),
+ B(JumpIfTrue), U8(18),
+ B(Wide), B(LdaSmi), I16(260),
+ B(Star), R(3),
+ B(LdaConstant), U8(0),
+ B(Star), R(4),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(3), U8(2),
+ B(Throw),
+ B(CallRuntime), U16(Runtime::kLoadPrivateSetter), R(2), U8(1),
+ B(Star), R(3),
+ B(CallProperty1), R(3), R(1), R(0), U8(5),
+ /* 122 S> */ B(LdaCurrentContextSlot), U8(4),
+ B(Star), R(1),
+ B(LdaCurrentContextSlot), U8(5),
+ /* 133 E> */ B(TestReferenceEqual), R(this),
+ B(Mov), R(this), R(0),
+ B(JumpIfTrue), U8(18),
+ B(Wide), B(LdaSmi), I16(259),
+ B(Star), R(2),
+ B(LdaConstant), U8(0),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(2), U8(2),
+ B(Throw),
+ B(CallRuntime), U16(Runtime::kLoadPrivateGetter), R(1), U8(1),
+ B(Star), R(2),
+ B(CallProperty0), R(2), R(0), U8(7),
+ /* 137 S> */ B(Return),
+]
+constant pool: [
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["#d"],
+]
+handlers: [
+]
+
+---
+snippet: "
+ class E {
+ static get #e() { return 1; }
+ static test() { this.#e++; }
+ }
+ var test = E.test;
+ test();
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 17
+bytecodes: [
+ /* 55 E> */ B(StackCheck),
+ /* 60 S> */ B(Wide), B(LdaSmi), I16(263),
+ B(Star), R(0),
+ B(LdaConstant), U8(0),
+ B(Star), R(1),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(0), U8(2),
+ B(Throw),
+]
+constant pool: [
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["#e"],
+]
+handlers: [
+]
+
+---
+snippet: "
+ class F {
+ static set #f(val) { }
+ static test() { this.#f++; }
+ }
+ var test = F.test;
+ test();
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 17
+bytecodes: [
+ /* 48 E> */ B(StackCheck),
+ /* 53 S> */ B(Wide), B(LdaSmi), I16(262),
+ B(Star), R(0),
+ B(LdaConstant), U8(0),
+ B(Star), R(1),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(0), U8(2),
+ B(Throw),
+]
+constant pool: [
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["#f"],
+]
+handlers: [
+]
+
+---
+snippet: "
+ class G {
+ static get #d() { return 1; }
+ static test() { this.#d = 1; }
+ }
+ var test = G.test;
+ test();
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 17
+bytecodes: [
+ /* 55 E> */ B(StackCheck),
+ /* 60 S> */ B(Wide), B(LdaSmi), I16(263),
+ B(Star), R(0),
+ B(LdaConstant), U8(0),
+ B(Star), R(1),
+ /* 68 E> */ B(CallRuntime), U16(Runtime::kNewTypeError), R(0), U8(2),
+ B(Throw),
+]
+constant pool: [
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["#d"],
+]
+handlers: [
+]
+
+---
+snippet: "
+ class H {
+ set #h(val) { }
+ static test() { this.#h; }
+ }
+ var test = H.test;
+ test();
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 17
+bytecodes: [
+ /* 41 E> */ B(StackCheck),
+ /* 46 S> */ B(Wide), B(LdaSmi), I16(262),
+ B(Star), R(1),
+ B(LdaConstant), U8(0),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(1), U8(2),
+ B(Throw),
+]
+constant pool: [
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["#h"],
+]
+handlers: [
+]
+
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodDeclaration.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodDeclaration.golden
new file mode 100644
index 0000000000..fda73f08d2
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodDeclaration.golden
@@ -0,0 +1,231 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+wrap: yes
+private methods: yes
+
+---
+snippet: "
+ {
+ class A {
+ static #a() { return 1; }
+ }
+ }
+"
+frame size: 6
+parameter count: 1
+bytecode array length: 41
+bytecodes: [
+ /* 30 E> */ B(StackCheck),
+ B(CreateBlockContext), U8(0),
+ B(PushContext), R(1),
+ B(LdaTheHole),
+ B(Star), R(5),
+ B(CreateClosure), U8(2), U8(0), U8(2),
+ B(Star), R(2),
+ B(LdaConstant), U8(1),
+ B(Star), R(3),
+ B(Mov), R(2), R(4),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(3),
+ B(Star), R(3),
+ B(CreateClosure), U8(3), U8(1), U8(2),
+ B(StaCurrentContextSlot), U8(4),
+ B(PopContext), R(1),
+ B(Mov), R(4), R(0),
+ B(LdaUndefined),
+ /* 84 S> */ B(Return),
+]
+constant pool: [
+ SCOPE_INFO_TYPE,
+ FIXED_ARRAY_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ {
+ class A {
+ static get #a() { return 1; }
+ }
+ }
+"
+frame size: 6
+parameter count: 1
+bytecode array length: 51
+bytecodes: [
+ /* 30 E> */ B(StackCheck),
+ B(CreateBlockContext), U8(0),
+ B(PushContext), R(1),
+ B(LdaTheHole),
+ B(Star), R(5),
+ B(CreateClosure), U8(2), U8(0), U8(2),
+ B(Star), R(2),
+ B(LdaConstant), U8(1),
+ B(Star), R(3),
+ B(Mov), R(2), R(4),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(3),
+ B(Star), R(3),
+ B(CreateClosure), U8(3), U8(1), U8(2),
+ B(Star), R(4),
+ B(LdaNull),
+ B(Star), R(5),
+ B(CallRuntime), U16(Runtime::kCreatePrivateAccessors), R(4), U8(2),
+ B(StaCurrentContextSlot), U8(4),
+ B(PopContext), R(1),
+ B(Mov), R(2), R(0),
+ B(LdaUndefined),
+ /* 88 S> */ B(Return),
+]
+constant pool: [
+ SCOPE_INFO_TYPE,
+ FIXED_ARRAY_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ {
+ class A {
+ static set #a(val) { }
+ }
+ }
+"
+frame size: 6
+parameter count: 1
+bytecode array length: 51
+bytecodes: [
+ /* 30 E> */ B(StackCheck),
+ B(CreateBlockContext), U8(0),
+ B(PushContext), R(1),
+ B(LdaTheHole),
+ B(Star), R(5),
+ B(CreateClosure), U8(2), U8(0), U8(2),
+ B(Star), R(2),
+ B(LdaConstant), U8(1),
+ B(Star), R(3),
+ B(Mov), R(2), R(4),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(3),
+ B(Star), R(3),
+ B(LdaNull),
+ B(Star), R(4),
+ B(CreateClosure), U8(3), U8(1), U8(2),
+ B(Star), R(5),
+ B(CallRuntime), U16(Runtime::kCreatePrivateAccessors), R(4), U8(2),
+ B(StaCurrentContextSlot), U8(4),
+ B(PopContext), R(1),
+ B(Mov), R(2), R(0),
+ B(LdaUndefined),
+ /* 81 S> */ B(Return),
+]
+constant pool: [
+ SCOPE_INFO_TYPE,
+ FIXED_ARRAY_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ {
+ class A {
+ static get #a() { return 1; }
+ static set #a(val) { }
+ }
+ }
+"
+frame size: 6
+parameter count: 1
+bytecode array length: 54
+bytecodes: [
+ /* 30 E> */ B(StackCheck),
+ B(CreateBlockContext), U8(0),
+ B(PushContext), R(1),
+ B(LdaTheHole),
+ B(Star), R(5),
+ B(CreateClosure), U8(2), U8(0), U8(2),
+ B(Star), R(2),
+ B(LdaConstant), U8(1),
+ B(Star), R(3),
+ B(Mov), R(2), R(4),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(3),
+ B(Star), R(3),
+ B(CreateClosure), U8(3), U8(1), U8(2),
+ B(Star), R(4),
+ B(CreateClosure), U8(4), U8(2), U8(2),
+ B(Star), R(5),
+ B(CallRuntime), U16(Runtime::kCreatePrivateAccessors), R(4), U8(2),
+ B(StaCurrentContextSlot), U8(4),
+ B(PopContext), R(1),
+ B(Mov), R(2), R(0),
+ B(LdaUndefined),
+ /* 115 S> */ B(Return),
+]
+constant pool: [
+ SCOPE_INFO_TYPE,
+ FIXED_ARRAY_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ {
+ class A {
+ static #a() { }
+ #b() { }
+ }
+ }
+"
+frame size: 7
+parameter count: 1
+bytecode array length: 58
+bytecodes: [
+ /* 30 E> */ B(StackCheck),
+ B(CreateBlockContext), U8(0),
+ B(PushContext), R(1),
+ B(LdaConstant), U8(2),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(3), U8(1),
+ B(StaCurrentContextSlot), U8(6),
+ B(LdaTheHole),
+ B(Star), R(6),
+ B(CreateClosure), U8(3), U8(0), U8(2),
+ B(Star), R(2),
+ B(LdaConstant), U8(1),
+ B(Star), R(4),
+ B(Mov), R(2), R(5),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(3),
+ B(Star), R(4),
+ B(CreateClosure), U8(4), U8(1), U8(2),
+ B(StaCurrentContextSlot), U8(4),
+ B(CreateClosure), U8(5), U8(2), U8(2),
+ B(StaCurrentContextSlot), U8(5),
+ B(PopContext), R(1),
+ B(Mov), R(5), R(0),
+ B(LdaUndefined),
+ /* 87 S> */ B(Return),
+]
+constant pool: [
+ SCOPE_INFO_TYPE,
+ FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["A"],
+ SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+]
+handlers: [
+]
+
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden
index e3eed68138..2ec0a8baa5 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden
@@ -91,9 +91,9 @@ snippet: "
test = new B(1, 2, 3).constructor;
})();
"
-frame size: 12
+frame size: 11
parameter count: 1
-bytecode array length: 124
+bytecode array length: 112
bytecodes: [
B(CreateRestParameter),
B(Star), R(3),
@@ -111,15 +111,11 @@ bytecodes: [
B(Ldar), R(6),
B(Inc), U8(3),
/* 152 S> */ B(Star), R(6),
- B(GetIterator), R(3), U8(4),
- B(Star), R(11),
- B(CallProperty0), R(11), R(3), U8(6),
- B(Mov), R(1), R(4),
- B(JumpIfJSReceiver), U8(7),
- B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
+ B(GetIterator), R(3), U8(4), U8(6),
B(Star), R(9),
B(LdaNamedProperty), R(9), U8(0), U8(8),
B(Star), R(8),
+ B(Mov), R(1), R(4),
B(CallProperty0), R(8), R(9), U8(14),
B(Star), R(10),
B(JumpIfJSReceiver), U8(7),
diff --git a/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc b/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
index 746c554087..2fe3658813 100644
--- a/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
+++ b/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
@@ -47,11 +47,12 @@ class ProgramOptions final {
oneshot_opt_(false),
async_iteration_(false),
private_methods_(false),
+ top_level_await_(false),
verbose_(false) {}
bool Validate() const;
- void UpdateFromHeader(std::istream& stream); // NOLINT
- void PrintHeader(std::ostream& stream) const; // NOLINT
+ void UpdateFromHeader(std::istream* stream);
+ void PrintHeader(std::ostream* stream) const;
bool parsing_failed() const { return parsing_failed_; }
bool print_help() const { return print_help_; }
@@ -70,6 +71,7 @@ class ProgramOptions final {
bool oneshot_opt() const { return oneshot_opt_; }
bool async_iteration() const { return async_iteration_; }
bool private_methods() const { return private_methods_; }
+ bool top_level_await() const { return top_level_await_; }
bool verbose() const { return verbose_; }
bool suppress_runtime_errors() const { return baseline() && !verbose_; }
std::vector<std::string> input_filenames() const { return input_filenames_; }
@@ -90,6 +92,7 @@ class ProgramOptions final {
bool oneshot_opt_;
bool async_iteration_;
bool private_methods_;
+ bool top_level_await_;
bool verbose_;
std::vector<std::string> input_filenames_;
std::string output_filename_;
@@ -196,6 +199,8 @@ ProgramOptions ProgramOptions::FromCommandLine(int argc, char** argv) {
options.async_iteration_ = true;
} else if (strcmp(argv[i], "--private-methods") == 0) {
options.private_methods_ = true;
+ } else if (strcmp(argv[i], "--harmony-top-level-await") == 0) {
+ options.top_level_await_ = true;
} else if (strcmp(argv[i], "--verbose") == 0) {
options.verbose_ = true;
} else if (strncmp(argv[i], "--output=", 9) == 0) {
@@ -291,17 +296,17 @@ bool ProgramOptions::Validate() const {
return true;
}
-void ProgramOptions::UpdateFromHeader(std::istream& stream) {
+void ProgramOptions::UpdateFromHeader(std::istream* stream) {
std::string line;
const char* kPrintCallee = "print callee: ";
const char* kOneshotOpt = "oneshot opt: ";
// Skip to the beginning of the options header
- while (std::getline(stream, line)) {
+ while (std::getline(*stream, line)) {
if (line == "---") break;
}
- while (std::getline(stream, line)) {
+ while (std::getline(*stream, line)) {
if (line.compare(0, 8, "module: ") == 0) {
module_ = ParseBoolean(line.c_str() + 8);
} else if (line.compare(0, 6, "wrap: ") == 0) {
@@ -318,6 +323,8 @@ void ProgramOptions::UpdateFromHeader(std::istream& stream) {
async_iteration_ = ParseBoolean(line.c_str() + 17);
} else if (line.compare(0, 17, "private methods: ") == 0) {
private_methods_ = ParseBoolean(line.c_str() + 17);
+ } else if (line.compare(0, 17, "top level await: ") == 0) {
+ top_level_await_ = ParseBoolean(line.c_str() + 17);
} else if (line == "---") {
break;
} else if (line.empty()) {
@@ -328,22 +335,23 @@ void ProgramOptions::UpdateFromHeader(std::istream& stream) {
}
}
-void ProgramOptions::PrintHeader(std::ostream& stream) const { // NOLINT
- stream << "---"
- << "\nwrap: " << BooleanToString(wrap_);
+void ProgramOptions::PrintHeader(std::ostream* stream) const {
+ *stream << "---"
+ << "\nwrap: " << BooleanToString(wrap_);
if (!test_function_name_.empty()) {
- stream << "\ntest function name: " << test_function_name_;
+ *stream << "\ntest function name: " << test_function_name_;
}
- if (module_) stream << "\nmodule: yes";
- if (top_level_) stream << "\ntop level: yes";
- if (print_callee_) stream << "\nprint callee: yes";
- if (oneshot_opt_) stream << "\noneshot opt: yes";
- if (async_iteration_) stream << "\nasync iteration: yes";
- if (private_methods_) stream << "\nprivate methods: yes";
+ if (module_) *stream << "\nmodule: yes";
+ if (top_level_) *stream << "\ntop level: yes";
+ if (print_callee_) *stream << "\nprint callee: yes";
+ if (oneshot_opt_) *stream << "\noneshot opt: yes";
+ if (async_iteration_) *stream << "\nasync iteration: yes";
+ if (private_methods_) *stream << "\nprivate methods: yes";
+ if (top_level_await_) *stream << "\ntop level await: yes";
- stream << "\n\n";
+ *stream << "\n\n";
}
V8InitializationScope::V8InitializationScope(const char* exec_path)
@@ -370,17 +378,17 @@ V8InitializationScope::~V8InitializationScope() {
v8::V8::ShutdownPlatform();
}
-std::string ReadRawJSSnippet(std::istream& stream) { // NOLINT
+std::string ReadRawJSSnippet(std::istream* stream) {
std::stringstream body_buffer;
- CHECK(body_buffer << stream.rdbuf());
+ CHECK(body_buffer << stream->rdbuf());
return body_buffer.str();
}
-bool ReadNextSnippet(std::istream& stream, std::string* string_out) { // NOLINT
+bool ReadNextSnippet(std::istream* stream, std::string* string_out) {
std::string line;
bool found_begin_snippet = false;
string_out->clear();
- while (std::getline(stream, line)) {
+ while (std::getline(*stream, line)) {
if (line == "snippet: \"") {
found_begin_snippet = true;
continue;
@@ -420,8 +428,7 @@ std::string UnescapeString(const std::string& escaped_string) {
}
void ExtractSnippets(std::vector<std::string>* snippet_list,
- std::istream& body_stream, // NOLINT
- bool read_raw_js_snippet) {
+ std::istream* body_stream, bool read_raw_js_snippet) {
if (read_raw_js_snippet) {
snippet_list->push_back(ReadRawJSSnippet(body_stream));
} else {
@@ -432,7 +439,7 @@ void ExtractSnippets(std::vector<std::string>* snippet_list,
}
}
-void GenerateExpectationsFile(std::ostream& stream, // NOLINT
+void GenerateExpectationsFile(std::ostream* stream,
const std::vector<std::string>& snippet_list,
const V8InitializationScope& platform,
const ProgramOptions& options) {
@@ -452,14 +459,16 @@ void GenerateExpectationsFile(std::ostream& stream, // NOLINT
}
if (options.private_methods()) i::FLAG_harmony_private_methods = true;
+ if (options.top_level_await()) i::FLAG_harmony_top_level_await = true;
- stream << "#\n# Autogenerated by generate-bytecode-expectations.\n#\n\n";
+ *stream << "#\n# Autogenerated by generate-bytecode-expectations.\n#\n\n";
options.PrintHeader(stream);
for (const std::string& snippet : snippet_list) {
printer.PrintExpectation(stream, snippet);
}
i::FLAG_harmony_private_methods = false;
+ i::FLAG_harmony_top_level_await = false;
}
bool WriteExpectationsFile(const std::vector<std::string>& snippet_list,
@@ -477,7 +486,7 @@ bool WriteExpectationsFile(const std::vector<std::string>& snippet_list,
std::ostream& output_stream =
options.write_to_stdout() ? std::cout : output_file_handle;
- GenerateExpectationsFile(output_stream, snippet_list, platform, options);
+ GenerateExpectationsFile(&output_stream, snippet_list, platform, options);
return true;
}
@@ -487,7 +496,7 @@ std::string WriteExpectationsToString(
const V8InitializationScope& platform, const ProgramOptions& options) {
std::stringstream output_string;
- GenerateExpectationsFile(output_string, snippet_list, platform, options);
+ GenerateExpectationsFile(&output_string, snippet_list, platform, options);
return output_string.str();
}
@@ -520,6 +529,7 @@ void PrintUsage(const char* exec_path) {
"Specify the name of the test function.\n"
" --top-level Process top level code, not the top-level function.\n"
" --private-methods Enable harmony_private_methods flag.\n"
+ " --top-level-await Enable await at the module level.\n"
" --output=file.name\n"
" Specify the output file. If not specified, output goes to "
"stdout.\n"
@@ -612,7 +622,7 @@ int main(int argc, char** argv) {
// Rebaseline will never get here, so we will always take the
// GenerateExpectationsFile at the end of this function.
DCHECK(!options.rebaseline() && !options.check_baseline());
- ExtractSnippets(&snippet_list, std::cin, options.read_raw_js_snippet());
+ ExtractSnippets(&snippet_list, &std::cin, options.read_raw_js_snippet());
} else {
bool check_failed = false;
for (const std::string& input_filename : options.input_filenames()) {
@@ -628,11 +638,11 @@ int main(int argc, char** argv) {
ProgramOptions updated_options = options;
if (options.baseline()) {
- updated_options.UpdateFromHeader(input_stream);
+ updated_options.UpdateFromHeader(&input_stream);
CHECK(updated_options.Validate());
}
- ExtractSnippets(&snippet_list, input_stream,
+ ExtractSnippets(&snippet_list, &input_stream,
options.read_raw_js_snippet());
input_stream.close();
diff --git a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
index fda02933aa..be0b129418 100644
--- a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
+++ b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
@@ -95,10 +95,10 @@ class InitializedIgnitionHandleScope : public InitializedHandleScope {
}
};
-void SkipGoldenFileHeader(std::istream& stream) { // NOLINT
+void SkipGoldenFileHeader(std::istream* stream) {
std::string line;
int separators_seen = 0;
- while (std::getline(stream, line)) {
+ while (std::getline(*stream, line)) {
if (line == "---") separators_seen += 1;
if (separators_seen == 2) return;
}
@@ -107,7 +107,7 @@ void SkipGoldenFileHeader(std::istream& stream) { // NOLINT
std::string LoadGolden(const std::string& golden_filename) {
std::ifstream expected_file((kGoldenFileDirectory + golden_filename).c_str());
CHECK(expected_file.is_open());
- SkipGoldenFileHeader(expected_file);
+ SkipGoldenFileHeader(&expected_file);
std::ostringstream expected_stream;
// Restore the first separator, which was consumed by SkipGoldenFileHeader
expected_stream << "---\n" << expected_file.rdbuf();
@@ -125,31 +125,30 @@ std::string BuildActual(const BytecodeExpectationsPrinter& printer,
if (prologue) source_code += prologue;
source_code += snippet;
if (epilogue) source_code += epilogue;
- printer.PrintExpectation(actual_stream, source_code);
+ printer.PrintExpectation(&actual_stream, source_code);
}
return actual_stream.str();
}
// inplace left trim
-static inline void ltrim(std::string& str) { // NOLINT(runtime/references)
- str.erase(str.begin(),
- std::find_if(str.begin(), str.end(),
- [](unsigned char ch) { return !std::isspace(ch); }));
+static inline void ltrim(std::string* str) {
+ str->erase(str->begin(),
+ std::find_if(str->begin(), str->end(),
+ [](unsigned char ch) { return !std::isspace(ch); }));
}
// inplace right trim
-static inline void rtrim(std::string& str) { // NOLINT(runtime/references)
- str.erase(std::find_if(str.rbegin(), str.rend(),
- [](unsigned char ch) { return !std::isspace(ch); })
- .base(),
- str.end());
+static inline void rtrim(std::string* str) {
+ str->erase(std::find_if(str->rbegin(), str->rend(),
+ [](unsigned char ch) { return !std::isspace(ch); })
+ .base(),
+ str->end());
}
-static inline std::string trim(
- std::string& str) { // NOLINT(runtime/references)
+static inline std::string trim(std::string* str) {
ltrim(str);
rtrim(str);
- return str;
+ return *str;
}
bool CompareTexts(const std::string& generated, const std::string& expected) {
@@ -181,7 +180,7 @@ bool CompareTexts(const std::string& generated, const std::string& expected) {
return false;
}
- if (trim(generated_line) != trim(expected_line)) {
+ if (trim(&generated_line) != trim(&expected_line)) {
std::cerr << "Inputs differ at line " << line_number << "\n";
std::cerr << " Generated: '" << generated_line << "'\n";
std::cerr << " Expected: '" << expected_line << "'\n";
@@ -2885,6 +2884,130 @@ TEST(PrivateAccessorAccess) {
i::FLAG_harmony_private_methods = old_methods_flag;
}
+TEST(StaticPrivateMethodDeclaration) {
+ bool old_methods_flag = i::FLAG_harmony_private_methods;
+ i::FLAG_harmony_private_methods = true;
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
+
+ const char* snippets[] = {
+ "{\n"
+ " class A {\n"
+ " static #a() { return 1; }\n"
+ " }\n"
+ "}\n",
+
+ "{\n"
+ " class A {\n"
+ " static get #a() { return 1; }\n"
+ " }\n"
+ "}\n",
+
+ "{\n"
+ " class A {\n"
+ " static set #a(val) { }\n"
+ " }\n"
+ "}\n",
+
+ "{\n"
+ " class A {\n"
+ " static get #a() { return 1; }\n"
+ " static set #a(val) { }\n"
+ " }\n"
+ "}\n",
+
+ "{\n"
+ " class A {\n"
+ " static #a() { }\n"
+ " #b() { }\n"
+ " }\n"
+ "}\n"};
+
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("StaticPrivateMethodDeclaration.golden")));
+ i::FLAG_harmony_private_methods = old_methods_flag;
+}
+
+TEST(StaticPrivateMethodAccess) {
+ bool old_methods_flag = i::FLAG_harmony_private_methods;
+ i::FLAG_harmony_private_methods = true;
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
+ printer.set_wrap(false);
+ printer.set_test_function_name("test");
+
+ const char* snippets[] = {
+ "class A {\n"
+ " static #a() { return 1; }\n"
+ " static test() { return this.#a(); }\n"
+ "}\n"
+ "\n"
+ "var test = A.test;\n"
+ "test();\n",
+
+ "class B {\n"
+ " static #b() { return 1; }\n"
+ " static test() { this.#b = 1; }\n"
+ "}\n"
+ "\n"
+ "var test = B.test;\n"
+ "test();\n",
+
+ "class C {\n"
+ " static #c() { return 1; }\n"
+ " static test() { this.#c++; }\n"
+ "}\n"
+ "\n"
+ "var test = C.test;\n"
+ "test();\n",
+
+ "class D {\n"
+ " static get #d() { return 1; }\n"
+ " static set #d(val) { }\n"
+ "\n"
+ " static test() {\n"
+ " this.#d++;\n"
+ " this.#d = 1;\n"
+ " return this.#d;\n"
+ " }\n"
+ "}\n"
+ "\n"
+ "var test = D.test;\n"
+ "test();\n",
+
+ "class E {\n"
+ " static get #e() { return 1; }\n"
+ " static test() { this.#e++; }\n"
+ "}\n"
+ "var test = E.test;\n"
+ "test();\n",
+
+ "class F {\n"
+ " static set #f(val) { }\n"
+ " static test() { this.#f++; }\n"
+ "}\n"
+ "var test = F.test;\n"
+ "test();\n",
+
+ "class G {\n"
+ " static get #d() { return 1; }\n"
+ " static test() { this.#d = 1; }\n"
+ "}\n"
+ "var test = G.test;\n"
+ "test();\n",
+
+ "class H {\n"
+ " set #h(val) { }\n"
+ " static test() { this.#h; }\n"
+ "}\n"
+ "var test = H.test;\n"
+ "test();\n"};
+
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("StaticPrivateMethodAccess.golden")));
+ i::FLAG_harmony_private_methods = old_methods_flag;
+}
+
TEST(PrivateAccessorDeclaration) {
bool old_methods_flag = i::FLAG_harmony_private_methods;
i::FLAG_harmony_private_methods = true;
@@ -3099,6 +3222,35 @@ TEST(Modules) {
LoadGolden("Modules.golden")));
}
+TEST(AsyncModules) {
+ bool previous_top_level_await_flag = i::FLAG_harmony_top_level_await;
+ i::FLAG_harmony_top_level_await = true;
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
+ printer.set_wrap(false);
+ printer.set_module(true);
+ printer.set_top_level(true);
+
+ const char* snippets[] = {
+ "await 42;\n",
+
+ "await import(\"foo\");\n",
+
+ "await 42;\n"
+ "async function foo() {\n"
+ " await 42;\n"
+ "}\n"
+ "foo();\n",
+
+ "import * as foo from \"bar\";\n"
+ "await import(\"goo\");\n",
+ };
+
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("AsyncModules.golden")));
+ i::FLAG_harmony_top_level_await = previous_top_level_await_flag;
+}
+
TEST(SuperCallAndSpread) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
diff --git a/deps/v8/test/cctest/interpreter/test-interpreter.cc b/deps/v8/test/cctest/interpreter/test-interpreter.cc
index 466e768d7d..0ddc8fe608 100644
--- a/deps/v8/test/cctest/interpreter/test-interpreter.cc
+++ b/deps/v8/test/cctest/interpreter/test-interpreter.cc
@@ -1485,19 +1485,20 @@ TEST(InterpreterCall) {
}
}
-static BytecodeArrayBuilder& SetRegister(
- BytecodeArrayBuilder& builder, // NOLINT(runtime/references)
- Register reg, int value, Register scratch) {
- return builder.StoreAccumulatorInRegister(scratch)
+static BytecodeArrayBuilder& SetRegister(BytecodeArrayBuilder* builder,
+ Register reg, int value,
+ Register scratch) {
+ return builder->StoreAccumulatorInRegister(scratch)
.LoadLiteral(Smi::FromInt(value))
.StoreAccumulatorInRegister(reg)
.LoadAccumulatorWithRegister(scratch);
}
-static BytecodeArrayBuilder& IncrementRegister(
- BytecodeArrayBuilder& builder, // NOLINT(runtime/references)
- Register reg, int value, Register scratch, int slot_index) {
- return builder.StoreAccumulatorInRegister(scratch)
+static BytecodeArrayBuilder& IncrementRegister(BytecodeArrayBuilder* builder,
+ Register reg, int value,
+ Register scratch,
+ int slot_index) {
+ return builder->StoreAccumulatorInRegister(scratch)
.LoadLiteral(Smi::FromInt(value))
.BinaryOperation(Token::Value::ADD, reg, slot_index)
.StoreAccumulatorInRegister(reg)
@@ -1525,13 +1526,13 @@ TEST(InterpreterJumps) {
builder.LoadLiteral(Smi::zero())
.StoreAccumulatorInRegister(reg)
.Jump(&label[0]);
- SetRegister(builder, reg, 1024, scratch).Bind(&loop_header);
- IncrementRegister(builder, reg, 1, scratch, GetIndex(slot)).Jump(&label[1]);
- SetRegister(builder, reg, 2048, scratch).Bind(&label[0]);
- IncrementRegister(builder, reg, 2, scratch, GetIndex(slot1))
+ SetRegister(&builder, reg, 1024, scratch).Bind(&loop_header);
+ IncrementRegister(&builder, reg, 1, scratch, GetIndex(slot)).Jump(&label[1]);
+ SetRegister(&builder, reg, 2048, scratch).Bind(&label[0]);
+ IncrementRegister(&builder, reg, 2, scratch, GetIndex(slot1))
.JumpLoop(&loop_header, 0);
- SetRegister(builder, reg, 4096, scratch).Bind(&label[1]);
- IncrementRegister(builder, reg, 4, scratch, GetIndex(slot2))
+ SetRegister(&builder, reg, 4096, scratch).Bind(&label[1]);
+ IncrementRegister(&builder, reg, 4, scratch, GetIndex(slot2))
.LoadAccumulatorWithRegister(reg)
.Return();
@@ -1566,19 +1567,19 @@ TEST(InterpreterConditionalJumps) {
.StoreAccumulatorInRegister(reg)
.LoadFalse()
.JumpIfFalse(ToBooleanMode::kAlreadyBoolean, &label[0]);
- IncrementRegister(builder, reg, 1024, scratch, GetIndex(slot))
+ IncrementRegister(&builder, reg, 1024, scratch, GetIndex(slot))
.Bind(&label[0])
.LoadTrue()
.JumpIfFalse(ToBooleanMode::kAlreadyBoolean, &done);
- IncrementRegister(builder, reg, 1, scratch, GetIndex(slot1))
+ IncrementRegister(&builder, reg, 1, scratch, GetIndex(slot1))
.LoadTrue()
.JumpIfTrue(ToBooleanMode::kAlreadyBoolean, &label[1]);
- IncrementRegister(builder, reg, 2048, scratch, GetIndex(slot2))
+ IncrementRegister(&builder, reg, 2048, scratch, GetIndex(slot2))
.Bind(&label[1]);
- IncrementRegister(builder, reg, 2, scratch, GetIndex(slot3))
+ IncrementRegister(&builder, reg, 2, scratch, GetIndex(slot3))
.LoadFalse()
.JumpIfTrue(ToBooleanMode::kAlreadyBoolean, &done1);
- IncrementRegister(builder, reg, 4, scratch, GetIndex(slot4))
+ IncrementRegister(&builder, reg, 4, scratch, GetIndex(slot4))
.LoadAccumulatorWithRegister(reg)
.Bind(&done)
.Bind(&done1)
@@ -1616,19 +1617,19 @@ TEST(InterpreterConditionalJumps2) {
.StoreAccumulatorInRegister(reg)
.LoadFalse()
.JumpIfFalse(ToBooleanMode::kAlreadyBoolean, &label[0]);
- IncrementRegister(builder, reg, 1024, scratch, GetIndex(slot))
+ IncrementRegister(&builder, reg, 1024, scratch, GetIndex(slot))
.Bind(&label[0])
.LoadTrue()
.JumpIfFalse(ToBooleanMode::kAlreadyBoolean, &done);
- IncrementRegister(builder, reg, 1, scratch, GetIndex(slot1))
+ IncrementRegister(&builder, reg, 1, scratch, GetIndex(slot1))
.LoadTrue()
.JumpIfTrue(ToBooleanMode::kAlreadyBoolean, &label[1]);
- IncrementRegister(builder, reg, 2048, scratch, GetIndex(slot2))
+ IncrementRegister(&builder, reg, 2048, scratch, GetIndex(slot2))
.Bind(&label[1]);
- IncrementRegister(builder, reg, 2, scratch, GetIndex(slot3))
+ IncrementRegister(&builder, reg, 2, scratch, GetIndex(slot3))
.LoadFalse()
.JumpIfTrue(ToBooleanMode::kAlreadyBoolean, &done1);
- IncrementRegister(builder, reg, 4, scratch, GetIndex(slot4))
+ IncrementRegister(&builder, reg, 4, scratch, GetIndex(slot4))
.LoadAccumulatorWithRegister(reg)
.Bind(&done)
.Bind(&done1)
diff --git a/deps/v8/test/cctest/libplatform/DEPS b/deps/v8/test/cctest/libplatform/DEPS
index b2bee408ab..54415e157b 100644
--- a/deps/v8/test/cctest/libplatform/DEPS
+++ b/deps/v8/test/cctest/libplatform/DEPS
@@ -1,3 +1,3 @@
include_rules = [
- "+perfetto",
+ "+protos/perfetto",
]
diff --git a/deps/v8/test/cctest/libplatform/test-tracing.cc b/deps/v8/test/cctest/libplatform/test-tracing.cc
index a98445be97..1f1cb55f9b 100644
--- a/deps/v8/test/cctest/libplatform/test-tracing.cc
+++ b/deps/v8/test/cctest/libplatform/test-tracing.cc
@@ -10,11 +10,11 @@
#include "test/cctest/cctest.h"
#ifdef V8_USE_PERFETTO
-#include "perfetto/trace/chrome/chrome_trace_event.pb.h"
-#include "perfetto/trace/chrome/chrome_trace_event.pbzero.h"
-#include "perfetto/trace/chrome/chrome_trace_packet.pb.h"
-#include "perfetto/trace/trace.pb.h"
#include "perfetto/tracing.h"
+#include "protos/perfetto/trace/chrome/chrome_trace_event.pb.h"
+#include "protos/perfetto/trace/chrome/chrome_trace_event.pbzero.h"
+#include "protos/perfetto/trace/chrome/chrome_trace_packet.pb.h"
+#include "protos/perfetto/trace/trace.pb.h"
#include "src/libplatform/tracing/json-trace-event-listener.h"
#include "src/libplatform/tracing/trace-event-listener.h"
#endif // V8_USE_PERFETTO
@@ -157,7 +157,7 @@ void PopulateJSONWriter(TraceWriter* writer) {
std::unique_ptr<v8::Platform> default_platform(
v8::platform::NewDefaultPlatform());
i::V8::SetPlatformForTesting(default_platform.get());
- auto tracing = base::make_unique<v8::platform::tracing::TracingController>();
+ auto tracing = std::make_unique<v8::platform::tracing::TracingController>();
v8::platform::tracing::TracingController* tracing_controller = tracing.get();
static_cast<v8::platform::DefaultPlatform*>(default_platform.get())
->SetTracingController(std::move(tracing));
@@ -242,7 +242,7 @@ TEST(TestTracingController) {
v8::platform::NewDefaultPlatform());
i::V8::SetPlatformForTesting(default_platform.get());
- auto tracing = base::make_unique<v8::platform::tracing::TracingController>();
+ auto tracing = std::make_unique<v8::platform::tracing::TracingController>();
v8::platform::tracing::TracingController* tracing_controller = tracing.get();
static_cast<v8::platform::DefaultPlatform*>(default_platform.get())
->SetTracingController(std::move(tracing));
@@ -301,8 +301,7 @@ TEST(TestTracingControllerMultipleArgsAndCopy) {
v8::platform::NewDefaultPlatform());
i::V8::SetPlatformForTesting(default_platform.get());
- auto tracing =
- base::make_unique<v8::platform::tracing::TracingController>();
+ auto tracing = std::make_unique<v8::platform::tracing::TracingController>();
v8::platform::tracing::TracingController* tracing_controller =
tracing.get();
static_cast<v8::platform::DefaultPlatform*>(default_platform.get())
@@ -424,7 +423,7 @@ TEST(TracingObservers) {
v8::platform::NewDefaultPlatform());
i::V8::SetPlatformForTesting(default_platform.get());
- auto tracing = base::make_unique<v8::platform::tracing::TracingController>();
+ auto tracing = std::make_unique<v8::platform::tracing::TracingController>();
v8::platform::tracing::TracingController* tracing_controller = tracing.get();
static_cast<v8::platform::DefaultPlatform*>(default_platform.get())
->SetTracingController(std::move(tracing));
@@ -517,7 +516,7 @@ TEST(AddTraceEventMultiThreaded) {
v8::platform::NewDefaultPlatform());
i::V8::SetPlatformForTesting(default_platform.get());
- auto tracing = base::make_unique<v8::platform::tracing::TracingController>();
+ auto tracing = std::make_unique<v8::platform::tracing::TracingController>();
v8::platform::tracing::TracingController* tracing_controller = tracing.get();
static_cast<v8::platform::DefaultPlatform*>(default_platform.get())
->SetTracingController(std::move(tracing));
@@ -576,8 +575,7 @@ class TracingTestHarness {
default_platform_ = v8::platform::NewDefaultPlatform();
i::V8::SetPlatformForTesting(default_platform_.get());
- auto tracing =
- base::make_unique<v8::platform::tracing::TracingController>();
+ auto tracing = std::make_unique<v8::platform::tracing::TracingController>();
tracing_controller_ = tracing.get();
static_cast<v8::platform::DefaultPlatform*>(default_platform_.get())
->SetTracingController(std::move(tracing));
diff --git a/deps/v8/test/cctest/manually-externalized-buffer.h b/deps/v8/test/cctest/manually-externalized-buffer.h
new file mode 100644
index 0000000000..b5eeed7382
--- /dev/null
+++ b/deps/v8/test/cctest/manually-externalized-buffer.h
@@ -0,0 +1,34 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CCTEST_MANUALLY_EXTERNALIZED_BUFFER_H_
+#define V8_CCTEST_MANUALLY_EXTERNALIZED_BUFFER_H_
+
+#include "src/api/api-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace testing {
+
+// Utility to free the allocated memory for a buffer that is manually
+// externalized in a test.
+struct ManuallyExternalizedBuffer {
+ Handle<JSArrayBuffer> buffer_;
+ v8::ArrayBuffer::Contents contents_;
+
+ explicit ManuallyExternalizedBuffer(Handle<JSArrayBuffer> buffer)
+ : buffer_(buffer),
+ contents_(v8::Utils::ToLocal(buffer_)->Externalize()) {}
+ ~ManuallyExternalizedBuffer() {
+ contents_.Deleter()(contents_.Data(), contents_.ByteLength(),
+ contents_.DeleterData());
+ }
+ void* backing_store() { return contents_.Data(); }
+};
+
+} // namespace testing
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CCTEST_MANUALLY_EXTERNALIZED_BUFFER_H_
diff --git a/deps/v8/test/cctest/test-accessor-assembler.cc b/deps/v8/test/cctest/test-accessor-assembler.cc
index c88c85b586..6183ef970c 100644
--- a/deps/v8/test/cctest/test-accessor-assembler.cc
+++ b/deps/v8/test/cctest/test-accessor-assembler.cc
@@ -18,7 +18,6 @@ namespace internal {
using compiler::CodeAssemblerTester;
using compiler::FunctionTester;
using compiler::Node;
-using compiler::TNode;
namespace {
diff --git a/deps/v8/test/cctest/test-api-accessors.cc b/deps/v8/test/cctest/test-api-accessors.cc
index 8c2f92d665..5f82d78711 100644
--- a/deps/v8/test/cctest/test-api-accessors.cc
+++ b/deps/v8/test/cctest/test-api-accessors.cc
@@ -287,21 +287,30 @@ TEST(AccessorSetHasNoSideEffect) {
v8::Local<v8::Object> obj = templ->NewInstance(env.local()).ToLocalChecked();
CHECK(env->Global()->Set(env.local(), v8_str("obj"), obj).FromJust());
obj->SetAccessor(context, v8_str("foo"), Getter).ToChecked();
- CHECK(v8::debug::EvaluateGlobal(isolate, v8_str("obj.foo"), true).IsEmpty());
+ CHECK(v8::debug::EvaluateGlobal(
+ isolate, v8_str("obj.foo"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
+ .IsEmpty());
obj->SetAccessor(context, v8_str("foo"), Getter, nullptr,
v8::MaybeLocal<v8::Value>(), v8::AccessControl::DEFAULT,
v8::PropertyAttribute::None,
v8::SideEffectType::kHasNoSideEffect)
.ToChecked();
- v8::debug::EvaluateGlobal(isolate, v8_str("obj.foo"), true).ToLocalChecked();
+ v8::debug::EvaluateGlobal(
+ isolate, v8_str("obj.foo"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
+ .ToLocalChecked();
// Check that setter is not whitelisted.
v8::TryCatch try_catch(isolate);
- CHECK(v8::debug::EvaluateGlobal(isolate, v8_str("obj.foo = 1"), true)
+ CHECK(v8::debug::EvaluateGlobal(
+ isolate, v8_str("obj.foo = 1"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
.IsEmpty());
CHECK(try_catch.HasCaught());
- CHECK_NE(1, v8::debug::EvaluateGlobal(isolate, v8_str("obj.foo"), false)
+ CHECK_NE(1, v8::debug::EvaluateGlobal(isolate, v8_str("obj.foo"),
+ v8::debug::EvaluateGlobalMode::kDefault)
.ToLocalChecked()
->Int32Value(env.local())
.FromJust());
@@ -323,12 +332,16 @@ TEST(SetAccessorSetSideEffectReceiverCheck1) {
v8::SideEffectType::kHasNoSideEffect,
v8::SideEffectType::kHasSideEffectToReceiver)
.ToChecked();
- CHECK(v8::debug::EvaluateGlobal(isolate, v8_str("obj.foo"), true)
+ CHECK(v8::debug::EvaluateGlobal(
+ isolate, v8_str("obj.foo"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
.ToLocalChecked()
->Equals(env.local(), v8_str("return value"))
.FromJust());
v8::TryCatch try_catch(isolate);
- CHECK(v8::debug::EvaluateGlobal(isolate, v8_str("obj.foo = 1"), true)
+ CHECK(v8::debug::EvaluateGlobal(
+ isolate, v8_str("obj.foo = 1"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
.IsEmpty());
CHECK(try_catch.HasCaught());
CHECK_EQ(0, set_accessor_call_count);
@@ -357,11 +370,15 @@ TEST(SetAccessorSetSideEffectReceiverCheck2) {
->Set(env.local(), v8_str("f"),
templ->GetFunction(env.local()).ToLocalChecked())
.FromJust());
- CHECK(v8::debug::EvaluateGlobal(isolate, v8_str("new f().bar"), true)
+ CHECK(v8::debug::EvaluateGlobal(
+ isolate, v8_str("new f().bar"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
.ToLocalChecked()
->Equals(env.local(), v8_str("return value"))
.FromJust());
- v8::debug::EvaluateGlobal(isolate, v8_str("new f().bar = 1"), true)
+ v8::debug::EvaluateGlobal(
+ isolate, v8_str("new f().bar = 1"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
.ToLocalChecked();
CHECK_EQ(1, set_accessor_call_count);
}
@@ -377,20 +394,29 @@ TEST(AccessorSetNativeDataPropertyHasNoSideEffect) {
v8::Local<v8::Object> obj = templ->NewInstance(env.local()).ToLocalChecked();
CHECK(env->Global()->Set(env.local(), v8_str("obj"), obj).FromJust());
obj->SetNativeDataProperty(context, v8_str("foo"), Getter).ToChecked();
- CHECK(v8::debug::EvaluateGlobal(isolate, v8_str("obj.foo"), true).IsEmpty());
+ CHECK(v8::debug::EvaluateGlobal(
+ isolate, v8_str("obj.foo"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
+ .IsEmpty());
obj->SetNativeDataProperty(
context, v8_str("foo"), Getter, nullptr, v8::Local<v8::Value>(),
v8::PropertyAttribute::None, v8::SideEffectType::kHasNoSideEffect)
.ToChecked();
- v8::debug::EvaluateGlobal(isolate, v8_str("obj.foo"), true).ToLocalChecked();
+ v8::debug::EvaluateGlobal(
+ isolate, v8_str("obj.foo"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
+ .ToLocalChecked();
// Check that setter is not whitelisted.
v8::TryCatch try_catch(isolate);
- CHECK(v8::debug::EvaluateGlobal(isolate, v8_str("obj.foo = 1"), true)
+ CHECK(v8::debug::EvaluateGlobal(
+ isolate, v8_str("obj.foo = 1"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
.IsEmpty());
CHECK(try_catch.HasCaught());
- CHECK_NE(1, v8::debug::EvaluateGlobal(isolate, v8_str("obj.foo"), false)
+ CHECK_NE(1, v8::debug::EvaluateGlobal(isolate, v8_str("obj.foo"),
+ v8::debug::EvaluateGlobalMode::kDefault)
.ToLocalChecked()
->Int32Value(env.local())
.FromJust());
@@ -407,20 +433,29 @@ TEST(AccessorSetLazyDataPropertyHasNoSideEffect) {
v8::Local<v8::Object> obj = templ->NewInstance(env.local()).ToLocalChecked();
CHECK(env->Global()->Set(env.local(), v8_str("obj"), obj).FromJust());
obj->SetLazyDataProperty(context, v8_str("foo"), Getter).ToChecked();
- CHECK(v8::debug::EvaluateGlobal(isolate, v8_str("obj.foo"), true).IsEmpty());
+ CHECK(v8::debug::EvaluateGlobal(
+ isolate, v8_str("obj.foo"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
+ .IsEmpty());
obj->SetLazyDataProperty(context, v8_str("foo"), Getter,
v8::Local<v8::Value>(), v8::PropertyAttribute::None,
v8::SideEffectType::kHasNoSideEffect)
.ToChecked();
- v8::debug::EvaluateGlobal(isolate, v8_str("obj.foo"), true).ToLocalChecked();
+ v8::debug::EvaluateGlobal(
+ isolate, v8_str("obj.foo"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
+ .ToLocalChecked();
// Check that setter is not whitelisted.
v8::TryCatch try_catch(isolate);
- CHECK(v8::debug::EvaluateGlobal(isolate, v8_str("obj.foo = 1"), true)
+ CHECK(v8::debug::EvaluateGlobal(
+ isolate, v8_str("obj.foo = 1"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
.IsEmpty());
CHECK(try_catch.HasCaught());
- CHECK_NE(1, v8::debug::EvaluateGlobal(isolate, v8_str("obj.foo"), false)
+ CHECK_NE(1, v8::debug::EvaluateGlobal(isolate, v8_str("obj.foo"),
+ v8::debug::EvaluateGlobalMode::kDefault)
.ToLocalChecked()
->Int32Value(env.local())
.FromJust());
@@ -440,15 +475,24 @@ TEST(ObjectTemplateSetAccessorHasNoSideEffect) {
v8::Local<v8::Object> obj = templ->NewInstance(env.local()).ToLocalChecked();
CHECK(env->Global()->Set(env.local(), v8_str("obj"), obj).FromJust());
- CHECK(v8::debug::EvaluateGlobal(isolate, v8_str("obj.foo"), true).IsEmpty());
- v8::debug::EvaluateGlobal(isolate, v8_str("obj.foo2"), true).ToLocalChecked();
+ CHECK(v8::debug::EvaluateGlobal(
+ isolate, v8_str("obj.foo"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
+ .IsEmpty());
+ v8::debug::EvaluateGlobal(
+ isolate, v8_str("obj.foo2"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
+ .ToLocalChecked();
// Check that setter is not whitelisted.
v8::TryCatch try_catch(isolate);
- CHECK(v8::debug::EvaluateGlobal(isolate, v8_str("obj.foo2 = 1"), true)
+ CHECK(v8::debug::EvaluateGlobal(
+ isolate, v8_str("obj.foo2 = 1"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
.IsEmpty());
CHECK(try_catch.HasCaught());
- CHECK_NE(1, v8::debug::EvaluateGlobal(isolate, v8_str("obj.foo2"), false)
+ CHECK_NE(1, v8::debug::EvaluateGlobal(isolate, v8_str("obj.foo2"),
+ v8::debug::EvaluateGlobalMode::kDefault)
.ToLocalChecked()
->Int32Value(env.local())
.FromJust());
@@ -468,15 +512,24 @@ TEST(ObjectTemplateSetNativePropertyHasNoSideEffect) {
v8::Local<v8::Object> obj = templ->NewInstance(env.local()).ToLocalChecked();
CHECK(env->Global()->Set(env.local(), v8_str("obj"), obj).FromJust());
- CHECK(v8::debug::EvaluateGlobal(isolate, v8_str("obj.foo"), true).IsEmpty());
- v8::debug::EvaluateGlobal(isolate, v8_str("obj.foo2"), true).ToLocalChecked();
+ CHECK(v8::debug::EvaluateGlobal(
+ isolate, v8_str("obj.foo"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
+ .IsEmpty());
+ v8::debug::EvaluateGlobal(
+ isolate, v8_str("obj.foo2"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
+ .ToLocalChecked();
// Check that setter is not whitelisted.
v8::TryCatch try_catch(isolate);
- CHECK(v8::debug::EvaluateGlobal(isolate, v8_str("obj.foo2 = 1"), true)
+ CHECK(v8::debug::EvaluateGlobal(
+ isolate, v8_str("obj.foo2 = 1"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
.IsEmpty());
CHECK(try_catch.HasCaught());
- CHECK_NE(1, v8::debug::EvaluateGlobal(isolate, v8_str("obj.foo2"), false)
+ CHECK_NE(1, v8::debug::EvaluateGlobal(isolate, v8_str("obj.foo2"),
+ v8::debug::EvaluateGlobalMode::kDefault)
.ToLocalChecked()
->Int32Value(env.local())
.FromJust());
@@ -495,15 +548,24 @@ TEST(ObjectTemplateSetLazyPropertyHasNoSideEffect) {
v8::Local<v8::Object> obj = templ->NewInstance(env.local()).ToLocalChecked();
CHECK(env->Global()->Set(env.local(), v8_str("obj"), obj).FromJust());
- CHECK(v8::debug::EvaluateGlobal(isolate, v8_str("obj.foo"), true).IsEmpty());
- v8::debug::EvaluateGlobal(isolate, v8_str("obj.foo2"), true).ToLocalChecked();
+ CHECK(v8::debug::EvaluateGlobal(
+ isolate, v8_str("obj.foo"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
+ .IsEmpty());
+ v8::debug::EvaluateGlobal(
+ isolate, v8_str("obj.foo2"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
+ .ToLocalChecked();
// Check that setter is not whitelisted.
v8::TryCatch try_catch(isolate);
- CHECK(v8::debug::EvaluateGlobal(isolate, v8_str("obj.foo2 = 1"), true)
+ CHECK(v8::debug::EvaluateGlobal(
+ isolate, v8_str("obj.foo2 = 1"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
.IsEmpty());
CHECK(try_catch.HasCaught());
- CHECK_NE(1, v8::debug::EvaluateGlobal(isolate, v8_str("obj.foo2"), false)
+ CHECK_NE(1, v8::debug::EvaluateGlobal(isolate, v8_str("obj.foo2"),
+ v8::debug::EvaluateGlobalMode::kDefault)
.ToLocalChecked()
->Int32Value(env.local())
.FromJust());
diff --git a/deps/v8/test/cctest/test-api-array-buffer.cc b/deps/v8/test/cctest/test-api-array-buffer.cc
index 5b8433a6a2..508604aa41 100644
--- a/deps/v8/test/cctest/test-api-array-buffer.cc
+++ b/deps/v8/test/cctest/test-api-array-buffer.cc
@@ -13,43 +13,6 @@ using ::v8::Value;
namespace {
-class ScopedArrayBufferContents {
- public:
- explicit ScopedArrayBufferContents(const v8::ArrayBuffer::Contents& contents)
- : contents_(contents) {}
- ~ScopedArrayBufferContents() { free(contents_.AllocationBase()); }
- void* Data() const { return contents_.Data(); }
- size_t ByteLength() const { return contents_.ByteLength(); }
-
- void* AllocationBase() const { return contents_.AllocationBase(); }
- size_t AllocationLength() const { return contents_.AllocationLength(); }
- v8::ArrayBuffer::Allocator::AllocationMode AllocationMode() const {
- return contents_.AllocationMode();
- }
-
- private:
- const v8::ArrayBuffer::Contents contents_;
-};
-
-class ScopedSharedArrayBufferContents {
- public:
- explicit ScopedSharedArrayBufferContents(
- const v8::SharedArrayBuffer::Contents& contents)
- : contents_(contents) {}
- ~ScopedSharedArrayBufferContents() { free(contents_.AllocationBase()); }
- void* Data() const { return contents_.Data(); }
- size_t ByteLength() const { return contents_.ByteLength(); }
-
- void* AllocationBase() const { return contents_.AllocationBase(); }
- size_t AllocationLength() const { return contents_.AllocationLength(); }
- v8::ArrayBuffer::Allocator::AllocationMode AllocationMode() const {
- return contents_.AllocationMode();
- }
-
- private:
- const v8::SharedArrayBuffer::Contents contents_;
-};
-
void CheckDataViewIsDetached(v8::Local<v8::DataView> dv) {
CHECK_EQ(0, static_cast<int>(dv->ByteLength()));
CHECK_EQ(0, static_cast<int>(dv->ByteOffset()));
@@ -83,6 +46,20 @@ Local<TypedArray> CreateAndCheck(Local<v8::ArrayBuffer> ab, int byteOffset,
return ta;
}
+std::shared_ptr<v8::BackingStore> Externalize(Local<v8::ArrayBuffer> ab) {
+ std::shared_ptr<v8::BackingStore> backing_store = ab->GetBackingStore();
+ ab->Externalize(backing_store);
+ CHECK(ab->IsExternal());
+ return backing_store;
+}
+
+std::shared_ptr<v8::BackingStore> Externalize(Local<v8::SharedArrayBuffer> ab) {
+ std::shared_ptr<v8::BackingStore> backing_store = ab->GetBackingStore();
+ ab->Externalize(backing_store);
+ CHECK(ab->IsExternal());
+ return backing_store;
+}
+
} // namespace
THREADED_TEST(ArrayBuffer_ApiInternalToExternal) {
@@ -92,15 +69,14 @@ THREADED_TEST(ArrayBuffer_ApiInternalToExternal) {
Local<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(isolate, 1024);
CheckInternalFieldsAreZero(ab);
- CHECK_EQ(1024, static_cast<int>(ab->ByteLength()));
+ CHECK_EQ(1024, ab->ByteLength());
CHECK(!ab->IsExternal());
CcTest::CollectAllGarbage();
- ScopedArrayBufferContents ab_contents(ab->Externalize());
- CHECK(ab->IsExternal());
+ std::shared_ptr<v8::BackingStore> backing_store = Externalize(ab);
+ CHECK_EQ(1024, backing_store->ByteLength());
- CHECK_EQ(1024, static_cast<int>(ab_contents.ByteLength()));
- uint8_t* data = static_cast<uint8_t*>(ab_contents.Data());
+ uint8_t* data = static_cast<uint8_t*>(backing_store->Data());
CHECK_NOT_NULL(data);
CHECK(env->Global()->Set(env.local(), v8_str("ab"), ab).FromJust());
@@ -133,10 +109,9 @@ THREADED_TEST(ArrayBuffer_JSInternalToExternal) {
"u8_a[1] = 0xFF; u8_a.buffer");
Local<v8::ArrayBuffer> ab1 = Local<v8::ArrayBuffer>::Cast(result);
CheckInternalFieldsAreZero(ab1);
- CHECK_EQ(2, static_cast<int>(ab1->ByteLength()));
+ CHECK_EQ(2, ab1->ByteLength());
CHECK(!ab1->IsExternal());
- ScopedArrayBufferContents ab1_contents(ab1->Externalize());
- CHECK(ab1->IsExternal());
+ std::shared_ptr<v8::BackingStore> backing_store = Externalize(ab1);
result = CompileRun("ab1.byteLength");
CHECK_EQ(2, result->Int32Value(env.local()).FromJust());
@@ -152,8 +127,8 @@ THREADED_TEST(ArrayBuffer_JSInternalToExternal) {
result = CompileRun("u8_b[1]");
CHECK_EQ(0xFF, result->Int32Value(env.local()).FromJust());
- CHECK_EQ(2, static_cast<int>(ab1_contents.ByteLength()));
- uint8_t* ab1_data = static_cast<uint8_t*>(ab1_contents.Data());
+ CHECK_EQ(2, backing_store->ByteLength());
+ uint8_t* ab1_data = static_cast<uint8_t*>(backing_store->Data());
CHECK_EQ(0xBB, ab1_data[0]);
CHECK_EQ(0xFF, ab1_data[1]);
ab1_data[0] = 0xCC;
@@ -172,7 +147,7 @@ THREADED_TEST(ArrayBuffer_External) {
Local<v8::ArrayBuffer> ab3 =
v8::ArrayBuffer::New(isolate, my_data.begin(), 100);
CheckInternalFieldsAreZero(ab3);
- CHECK_EQ(100, static_cast<int>(ab3->ByteLength()));
+ CHECK_EQ(100, ab3->ByteLength());
CHECK(ab3->IsExternal());
CHECK(env->Global()->Set(env.local(), v8_str("ab3"), ab3).FromJust());
@@ -242,12 +217,12 @@ THREADED_TEST(ArrayBuffer_DetachingApi) {
v8::Local<v8::DataView> dv = v8::DataView::New(buffer, 1, 1023);
CheckInternalFieldsAreZero<v8::ArrayBufferView>(dv);
- CHECK_EQ(1, static_cast<int>(dv->ByteOffset()));
- CHECK_EQ(1023, static_cast<int>(dv->ByteLength()));
+ CHECK_EQ(1, dv->ByteOffset());
+ CHECK_EQ(1023, dv->ByteLength());
- ScopedArrayBufferContents contents(buffer->Externalize());
+ Externalize(buffer);
buffer->Detach();
- CHECK_EQ(0, static_cast<int>(buffer->ByteLength()));
+ CHECK_EQ(0, buffer->ByteLength());
CheckIsDetached(u8a);
CheckIsDetached(u8c);
CheckIsDetached(i8a);
@@ -283,9 +258,9 @@ THREADED_TEST(ArrayBuffer_DetachingScript) {
v8::Local<v8::DataView> dv = v8::Local<v8::DataView>::Cast(CompileRun("dv"));
- ScopedArrayBufferContents contents(ab->Externalize());
+ Externalize(ab);
ab->Detach();
- CHECK_EQ(0, static_cast<int>(ab->ByteLength()));
+ CHECK_EQ(0, ab->ByteLength());
CHECK_EQ(0, v8_run_int32value(v8_compile("ab.byteLength")));
CheckIsTypedArrayVarDetached("u8a");
@@ -302,6 +277,7 @@ THREADED_TEST(ArrayBuffer_DetachingScript) {
CheckDataViewIsDetached(dv);
}
+// TODO(v8:9380) the Contents data structure should be deprecated.
THREADED_TEST(ArrayBuffer_AllocationInformation) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
@@ -309,7 +285,7 @@ THREADED_TEST(ArrayBuffer_AllocationInformation) {
const size_t ab_size = 1024;
Local<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(isolate, ab_size);
- ScopedArrayBufferContents contents(ab->Externalize());
+ v8::ArrayBuffer::Contents contents(ab->GetContents());
// Array buffers should have normal allocation mode.
CHECK_EQ(contents.AllocationMode(),
@@ -329,13 +305,13 @@ THREADED_TEST(ArrayBuffer_ExternalizeEmpty) {
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope handle_scope(isolate);
- Local<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(isolate, 0);
+ Local<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(isolate, 2);
CheckInternalFieldsAreZero(ab);
- CHECK_EQ(0, static_cast<int>(ab->ByteLength()));
+ CHECK_EQ(2, ab->ByteLength());
CHECK(!ab->IsExternal());
// Externalize the buffer (taking ownership of the backing store memory).
- ScopedArrayBufferContents ab_contents(ab->Externalize());
+ std::shared_ptr<v8::BackingStore> backing_store = Externalize(ab);
Local<v8::Uint8Array> u8a = v8::Uint8Array::New(ab, 0, 0);
// Calling Buffer() will materialize the ArrayBuffer (transitioning it from
@@ -344,6 +320,7 @@ THREADED_TEST(ArrayBuffer_ExternalizeEmpty) {
USE(u8a->Buffer());
CHECK(ab->IsExternal());
+ CHECK_EQ(2, backing_store->ByteLength());
}
THREADED_TEST(SharedArrayBuffer_ApiInternalToExternal) {
@@ -354,15 +331,14 @@ THREADED_TEST(SharedArrayBuffer_ApiInternalToExternal) {
Local<v8::SharedArrayBuffer> ab = v8::SharedArrayBuffer::New(isolate, 1024);
CheckInternalFieldsAreZero(ab);
- CHECK_EQ(1024, static_cast<int>(ab->ByteLength()));
+ CHECK_EQ(1024, ab->ByteLength());
CHECK(!ab->IsExternal());
CcTest::CollectAllGarbage();
- ScopedSharedArrayBufferContents ab_contents(ab->Externalize());
- CHECK(ab->IsExternal());
+ std::shared_ptr<v8::BackingStore> backing_store = Externalize(ab);
- CHECK_EQ(1024, static_cast<int>(ab_contents.ByteLength()));
- uint8_t* data = static_cast<uint8_t*>(ab_contents.Data());
+ CHECK_EQ(1024, backing_store->ByteLength());
+ uint8_t* data = static_cast<uint8_t*>(backing_store->Data());
CHECK_NOT_NULL(data);
CHECK(env->Global()->Set(env.local(), v8_str("ab"), ab).FromJust());
@@ -383,6 +359,35 @@ THREADED_TEST(SharedArrayBuffer_ApiInternalToExternal) {
CHECK_EQ(0xDD, result->Int32Value(env.local()).FromJust());
}
+THREADED_TEST(ArrayBuffer_ExternalReused) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
+
+ i::ScopedVector<uint8_t> data(100);
+ Local<v8::ArrayBuffer> ab1 = v8::ArrayBuffer::New(isolate, data.begin(), 100);
+ std::shared_ptr<v8::BackingStore> bs1 = ab1->GetBackingStore();
+ ab1->Detach();
+ Local<v8::ArrayBuffer> ab2 = v8::ArrayBuffer::New(isolate, data.begin(), 100);
+ std::shared_ptr<v8::BackingStore> bs2 = ab2->GetBackingStore();
+ CHECK_EQ(bs1->Data(), bs2->Data());
+}
+
+THREADED_TEST(SharedArrayBuffer_ExternalReused) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
+
+ i::ScopedVector<uint8_t> data(100);
+ Local<v8::SharedArrayBuffer> ab1 =
+ v8::SharedArrayBuffer::New(isolate, data.begin(), 100);
+ std::shared_ptr<v8::BackingStore> bs1 = ab1->GetBackingStore();
+ Local<v8::SharedArrayBuffer> ab2 =
+ v8::SharedArrayBuffer::New(isolate, data.begin(), 100);
+ std::shared_ptr<v8::BackingStore> bs2 = ab2->GetBackingStore();
+ CHECK_EQ(bs1->Data(), bs2->Data());
+}
+
THREADED_TEST(SharedArrayBuffer_JSInternalToExternal) {
i::FLAG_harmony_sharedarraybuffer = true;
LocalContext env;
@@ -396,10 +401,9 @@ THREADED_TEST(SharedArrayBuffer_JSInternalToExternal) {
"u8_a[1] = 0xFF; u8_a.buffer");
Local<v8::SharedArrayBuffer> ab1 = Local<v8::SharedArrayBuffer>::Cast(result);
CheckInternalFieldsAreZero(ab1);
- CHECK_EQ(2, static_cast<int>(ab1->ByteLength()));
+ CHECK_EQ(2, ab1->ByteLength());
CHECK(!ab1->IsExternal());
- ScopedSharedArrayBufferContents ab1_contents(ab1->Externalize());
- CHECK(ab1->IsExternal());
+ std::shared_ptr<v8::BackingStore> backing_store = Externalize(ab1);
result = CompileRun("ab1.byteLength");
CHECK_EQ(2, result->Int32Value(env.local()).FromJust());
@@ -415,8 +419,8 @@ THREADED_TEST(SharedArrayBuffer_JSInternalToExternal) {
result = CompileRun("u8_b[1]");
CHECK_EQ(0xFF, result->Int32Value(env.local()).FromJust());
- CHECK_EQ(2, static_cast<int>(ab1_contents.ByteLength()));
- uint8_t* ab1_data = static_cast<uint8_t*>(ab1_contents.Data());
+ CHECK_EQ(2, backing_store->ByteLength());
+ uint8_t* ab1_data = static_cast<uint8_t*>(backing_store->Data());
CHECK_EQ(0xBB, ab1_data[0]);
CHECK_EQ(0xFF, ab1_data[1]);
ab1_data[0] = 0xCC;
@@ -458,6 +462,7 @@ THREADED_TEST(SharedArrayBuffer_External) {
CHECK_EQ(0xDD, result->Int32Value(env.local()).FromJust());
}
+// TODO(v8:9380) the Contents data structure should be deprecated.
THREADED_TEST(SharedArrayBuffer_AllocationInformation) {
i::FLAG_harmony_sharedarraybuffer = true;
LocalContext env;
@@ -467,7 +472,7 @@ THREADED_TEST(SharedArrayBuffer_AllocationInformation) {
const size_t ab_size = 1024;
Local<v8::SharedArrayBuffer> ab =
v8::SharedArrayBuffer::New(isolate, ab_size);
- ScopedSharedArrayBufferContents contents(ab->Externalize());
+ v8::SharedArrayBuffer::Contents contents(ab->GetContents());
// Array buffers should have normal allocation mode.
CHECK_EQ(contents.AllocationMode(),
@@ -500,7 +505,7 @@ THREADED_TEST(SkipArrayBufferBackingStoreDuringGC) {
CcTest::CollectAllGarbage();
// Should not move the pointer
- CHECK_EQ(ab->GetContents().Data(), store_ptr);
+ CHECK_EQ(ab->GetBackingStore()->Data(), store_ptr);
}
THREADED_TEST(SkipArrayBufferDuringScavenge) {
@@ -525,5 +530,16 @@ THREADED_TEST(SkipArrayBufferDuringScavenge) {
CcTest::CollectGarbage(i::NEW_SPACE); // in old gen now
// Use `ab` to silence compiler warning
- CHECK_EQ(ab->GetContents().Data(), store_ptr);
+ CHECK_EQ(ab->GetBackingStore()->Data(), store_ptr);
+}
+
+THREADED_TEST(Regress1006600) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
+
+ Local<v8::Value> ab = CompileRunChecked(isolate, "new ArrayBuffer()");
+ for (int i = 0; i < v8::ArrayBuffer::kEmbedderFieldCount; i++) {
+ CHECK_NULL(ab.As<v8::Object>()->GetAlignedPointerFromInternalField(i));
+ }
}
diff --git a/deps/v8/test/cctest/test-api-interceptors.cc b/deps/v8/test/cctest/test-api-interceptors.cc
index e331d1a26a..cbf9f75be5 100644
--- a/deps/v8/test/cctest/test-api-interceptors.cc
+++ b/deps/v8/test/cctest/test-api-interceptors.cc
@@ -2712,16 +2712,26 @@ THREADED_TEST(NoSideEffectPropertyHandler) {
templ->NewInstance(context.local()).ToLocalChecked();
context->Global()->Set(context.local(), v8_str("obj"), object).FromJust();
- CHECK(v8::debug::EvaluateGlobal(isolate, v8_str("obj.x"), true).IsEmpty());
- CHECK(
- v8::debug::EvaluateGlobal(isolate, v8_str("obj.x = 1"), true).IsEmpty());
- CHECK(
- v8::debug::EvaluateGlobal(isolate, v8_str("'x' in obj"), true).IsEmpty());
- CHECK(v8::debug::EvaluateGlobal(isolate, v8_str("delete obj.x"), true)
+ CHECK(v8::debug::EvaluateGlobal(
+ isolate, v8_str("obj.x"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
+ .IsEmpty());
+ CHECK(v8::debug::EvaluateGlobal(
+ isolate, v8_str("obj.x = 1"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
+ .IsEmpty());
+ CHECK(v8::debug::EvaluateGlobal(
+ isolate, v8_str("'x' in obj"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
+ .IsEmpty());
+ CHECK(v8::debug::EvaluateGlobal(
+ isolate, v8_str("delete obj.x"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
.IsEmpty());
// Wrap the variable declaration since declaring globals is a side effect.
CHECK(v8::debug::EvaluateGlobal(
- isolate, v8_str("(function() { for (var p in obj) ; })()"), true)
+ isolate, v8_str("(function() { for (var p in obj) ; })()"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
.IsEmpty());
// Side-effect-free version.
@@ -2734,15 +2744,25 @@ THREADED_TEST(NoSideEffectPropertyHandler) {
templ2->NewInstance(context.local()).ToLocalChecked();
context->Global()->Set(context.local(), v8_str("obj2"), object2).FromJust();
- v8::debug::EvaluateGlobal(isolate, v8_str("obj2.x"), true).ToLocalChecked();
- CHECK(
- v8::debug::EvaluateGlobal(isolate, v8_str("obj2.x = 1"), true).IsEmpty());
- v8::debug::EvaluateGlobal(isolate, v8_str("'x' in obj2"), true)
+ v8::debug::EvaluateGlobal(
+ isolate, v8_str("obj2.x"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
+ .ToLocalChecked();
+ CHECK(v8::debug::EvaluateGlobal(
+ isolate, v8_str("obj2.x = 1"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
+ .IsEmpty());
+ v8::debug::EvaluateGlobal(
+ isolate, v8_str("'x' in obj2"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
.ToLocalChecked();
- CHECK(v8::debug::EvaluateGlobal(isolate, v8_str("delete obj2.x"), true)
+ CHECK(v8::debug::EvaluateGlobal(
+ isolate, v8_str("delete obj2.x"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
.IsEmpty());
v8::debug::EvaluateGlobal(
- isolate, v8_str("(function() { for (var p in obj2) ; })()"), true)
+ isolate, v8_str("(function() { for (var p in obj2) ; })()"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
.ToLocalChecked();
}
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index 345ee0bfc9..1daa19402e 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -46,6 +46,7 @@
#include "src/execution/arguments.h"
#include "src/execution/execution.h"
#include "src/execution/futex-emulation.h"
+#include "src/execution/protectors-inl.h"
#include "src/execution/vm-state.h"
#include "src/handles/global-handles.h"
#include "src/heap/heap-inl.h"
@@ -2955,11 +2956,8 @@ THREADED_TEST(SetAlignedPointerInInternalFields) {
obj->SetAlignedPointerInInternalFields(2, indices, values);
CcTest::CollectAllGarbage();
- {
- v8::SealHandleScope no_handle_leak(isolate);
- CHECK_EQ(heap_allocated_1, obj->GetAlignedPointerFromInternalField(0));
- CHECK_EQ(heap_allocated_2, obj->GetAlignedPointerFromInternalField(1));
- }
+ CHECK_EQ(heap_allocated_1, obj->GetAlignedPointerFromInternalField(0));
+ CHECK_EQ(heap_allocated_2, obj->GetAlignedPointerFromInternalField(1));
indices[0] = 1;
indices[1] = 0;
@@ -3012,7 +3010,6 @@ THREADED_TEST(EmbedderDataAlignedPointers) {
}
CcTest::CollectAllGarbage();
for (int i = 0; i < 100; i++) {
- v8::SealHandleScope no_handle_leak(env->GetIsolate());
CHECK_EQ(AlignedTestPointer(i), env->GetAlignedPointerFromEmbedderData(i));
}
}
@@ -7064,7 +7061,7 @@ static const char* kSimpleExtensionSource =
TEST(SimpleExtensions) {
v8::HandleScope handle_scope(CcTest::isolate());
v8::RegisterExtension(
- v8::base::make_unique<Extension>("simpletest", kSimpleExtensionSource));
+ std::make_unique<Extension>("simpletest", kSimpleExtensionSource));
const char* extension_names[] = {"simpletest"};
v8::ExtensionConfiguration extensions(1, extension_names);
v8::Local<Context> context = Context::New(CcTest::isolate(), &extensions);
@@ -7086,7 +7083,7 @@ static const char* kStackTraceFromExtensionSource =
TEST(StackTraceInExtension) {
v8::HandleScope handle_scope(CcTest::isolate());
- v8::RegisterExtension(v8::base::make_unique<Extension>(
+ v8::RegisterExtension(std::make_unique<Extension>(
"stacktracetest", kStackTraceFromExtensionSource));
const char* extension_names[] = {"stacktracetest"};
v8::ExtensionConfiguration extensions(1, extension_names);
@@ -7104,7 +7101,7 @@ TEST(StackTraceInExtension) {
TEST(NullExtensions) {
v8::HandleScope handle_scope(CcTest::isolate());
- v8::RegisterExtension(v8::base::make_unique<Extension>("nulltest", nullptr));
+ v8::RegisterExtension(std::make_unique<Extension>("nulltest", nullptr));
const char* extension_names[] = {"nulltest"};
v8::ExtensionConfiguration extensions(1, extension_names);
v8::Local<Context> context = Context::New(CcTest::isolate(), &extensions);
@@ -7122,8 +7119,8 @@ static const int kEmbeddedExtensionSourceValidLen = 34;
TEST(ExtensionMissingSourceLength) {
v8::HandleScope handle_scope(CcTest::isolate());
- v8::RegisterExtension(v8::base::make_unique<Extension>(
- "srclentest_fail", kEmbeddedExtensionSource));
+ v8::RegisterExtension(
+ std::make_unique<Extension>("srclentest_fail", kEmbeddedExtensionSource));
const char* extension_names[] = {"srclentest_fail"};
v8::ExtensionConfiguration extensions(1, extension_names);
v8::Local<Context> context = Context::New(CcTest::isolate(), &extensions);
@@ -7137,9 +7134,9 @@ TEST(ExtensionWithSourceLength) {
v8::HandleScope handle_scope(CcTest::isolate());
i::ScopedVector<char> extension_name(32);
i::SNPrintF(extension_name, "ext #%d", source_len);
- v8::RegisterExtension(v8::base::make_unique<Extension>(
- extension_name.begin(), kEmbeddedExtensionSource, 0, nullptr,
- source_len));
+ v8::RegisterExtension(std::make_unique<Extension>(extension_name.begin(),
+ kEmbeddedExtensionSource,
+ 0, nullptr, source_len));
const char* extension_names[1] = {extension_name.begin()};
v8::ExtensionConfiguration extensions(1, extension_names);
v8::Local<Context> context = Context::New(CcTest::isolate(), &extensions);
@@ -7177,9 +7174,9 @@ static const char* kEvalExtensionSource2 =
TEST(UseEvalFromExtension) {
v8::HandleScope handle_scope(CcTest::isolate());
v8::RegisterExtension(
- v8::base::make_unique<Extension>("evaltest1", kEvalExtensionSource1));
+ std::make_unique<Extension>("evaltest1", kEvalExtensionSource1));
v8::RegisterExtension(
- v8::base::make_unique<Extension>("evaltest2", kEvalExtensionSource2));
+ std::make_unique<Extension>("evaltest2", kEvalExtensionSource2));
const char* extension_names[] = {"evaltest1", "evaltest2"};
v8::ExtensionConfiguration extensions(2, extension_names);
v8::Local<Context> context = Context::New(CcTest::isolate(), &extensions);
@@ -7213,9 +7210,9 @@ static const char* kWithExtensionSource2 =
TEST(UseWithFromExtension) {
v8::HandleScope handle_scope(CcTest::isolate());
v8::RegisterExtension(
- v8::base::make_unique<Extension>("withtest1", kWithExtensionSource1));
+ std::make_unique<Extension>("withtest1", kWithExtensionSource1));
v8::RegisterExtension(
- v8::base::make_unique<Extension>("withtest2", kWithExtensionSource2));
+ std::make_unique<Extension>("withtest2", kWithExtensionSource2));
const char* extension_names[] = {"withtest1", "withtest2"};
v8::ExtensionConfiguration extensions(2, extension_names);
v8::Local<Context> context = Context::New(CcTest::isolate(), &extensions);
@@ -7232,7 +7229,7 @@ TEST(UseWithFromExtension) {
TEST(AutoExtensions) {
v8::HandleScope handle_scope(CcTest::isolate());
auto extension =
- v8::base::make_unique<Extension>("autotest", kSimpleExtensionSource);
+ std::make_unique<Extension>("autotest", kSimpleExtensionSource);
extension->set_auto_enable(true);
v8::RegisterExtension(std::move(extension));
v8::Local<Context> context = Context::New(CcTest::isolate());
@@ -7250,7 +7247,7 @@ static const char* kSyntaxErrorInExtensionSource = "[";
// error but results in an empty context.
TEST(SyntaxErrorExtensions) {
v8::HandleScope handle_scope(CcTest::isolate());
- v8::RegisterExtension(v8::base::make_unique<Extension>(
+ v8::RegisterExtension(std::make_unique<Extension>(
"syntaxerror", kSyntaxErrorInExtensionSource));
const char* extension_names[] = {"syntaxerror"};
v8::ExtensionConfiguration extensions(1, extension_names);
@@ -7266,8 +7263,8 @@ static const char* kExceptionInExtensionSource = "throw 42";
// a fatal error but results in an empty context.
TEST(ExceptionExtensions) {
v8::HandleScope handle_scope(CcTest::isolate());
- v8::RegisterExtension(v8::base::make_unique<Extension>(
- "exception", kExceptionInExtensionSource));
+ v8::RegisterExtension(
+ std::make_unique<Extension>("exception", kExceptionInExtensionSource));
const char* extension_names[] = {"exception"};
v8::ExtensionConfiguration extensions(1, extension_names);
v8::Local<Context> context = Context::New(CcTest::isolate(), &extensions);
@@ -7285,8 +7282,8 @@ static const char* kNativeCallTest =
// Test that a native runtime calls are supported in extensions.
TEST(NativeCallInExtensions) {
v8::HandleScope handle_scope(CcTest::isolate());
- v8::RegisterExtension(v8::base::make_unique<Extension>(
- "nativecall", kNativeCallInExtensionSource));
+ v8::RegisterExtension(
+ std::make_unique<Extension>("nativecall", kNativeCallInExtensionSource));
const char* extension_names[] = {"nativecall"};
v8::ExtensionConfiguration extensions(1, extension_names);
v8::Local<Context> context = Context::New(CcTest::isolate(), &extensions);
@@ -7320,7 +7317,7 @@ class NativeFunctionExtension : public Extension {
TEST(NativeFunctionDeclaration) {
v8::HandleScope handle_scope(CcTest::isolate());
const char* name = "nativedecl";
- v8::RegisterExtension(v8::base::make_unique<NativeFunctionExtension>(
+ v8::RegisterExtension(std::make_unique<NativeFunctionExtension>(
name, "native function foo();"));
const char* extension_names[] = {name};
v8::ExtensionConfiguration extensions(1, extension_names);
@@ -7336,7 +7333,7 @@ TEST(NativeFunctionDeclarationError) {
v8::HandleScope handle_scope(CcTest::isolate());
const char* name = "nativedeclerr";
// Syntax error in extension code.
- v8::RegisterExtension(v8::base::make_unique<NativeFunctionExtension>(
+ v8::RegisterExtension(std::make_unique<NativeFunctionExtension>(
name, "native\nfunction foo();"));
const char* extension_names[] = {name};
v8::ExtensionConfiguration extensions(1, extension_names);
@@ -7350,7 +7347,7 @@ TEST(NativeFunctionDeclarationErrorEscape) {
const char* name = "nativedeclerresc";
// Syntax error in extension code - escape code in "native" means that
// it's not treated as a keyword.
- v8::RegisterExtension(v8::base::make_unique<NativeFunctionExtension>(
+ v8::RegisterExtension(std::make_unique<NativeFunctionExtension>(
name, "nativ\\u0065 function foo();"));
const char* extension_names[] = {name};
v8::ExtensionConfiguration extensions(1, extension_names);
@@ -7382,17 +7379,17 @@ static void CheckDependencies(const char* name, const char* expected) {
THREADED_TEST(ExtensionDependency) {
static const char* kEDeps[] = {"D"};
v8::RegisterExtension(
- v8::base::make_unique<Extension>("E", "this.loaded += 'E';", 1, kEDeps));
+ std::make_unique<Extension>("E", "this.loaded += 'E';", 1, kEDeps));
static const char* kDDeps[] = {"B", "C"};
v8::RegisterExtension(
- v8::base::make_unique<Extension>("D", "this.loaded += 'D';", 2, kDDeps));
+ std::make_unique<Extension>("D", "this.loaded += 'D';", 2, kDDeps));
static const char* kBCDeps[] = {"A"};
v8::RegisterExtension(
- v8::base::make_unique<Extension>("B", "this.loaded += 'B';", 1, kBCDeps));
+ std::make_unique<Extension>("B", "this.loaded += 'B';", 1, kBCDeps));
v8::RegisterExtension(
- v8::base::make_unique<Extension>("C", "this.loaded += 'C';", 1, kBCDeps));
+ std::make_unique<Extension>("C", "this.loaded += 'C';", 1, kBCDeps));
v8::RegisterExtension(
- v8::base::make_unique<Extension>("A", "this.loaded += 'A';"));
+ std::make_unique<Extension>("A", "this.loaded += 'A';"));
CheckDependencies("A", "undefinedA");
CheckDependencies("B", "undefinedAB");
CheckDependencies("C", "undefinedAC");
@@ -7464,7 +7461,7 @@ v8::Local<v8::FunctionTemplate> FunctionExtension::GetNativeFunctionTemplate(
THREADED_TEST(FunctionLookup) {
- v8::RegisterExtension(v8::base::make_unique<FunctionExtension>());
+ v8::RegisterExtension(std::make_unique<FunctionExtension>());
v8::HandleScope handle_scope(CcTest::isolate());
static const char* exts[1] = {"functiontest"};
v8::ExtensionConfiguration config(1, exts);
@@ -7483,7 +7480,7 @@ THREADED_TEST(FunctionLookup) {
THREADED_TEST(NativeFunctionConstructCall) {
- v8::RegisterExtension(v8::base::make_unique<FunctionExtension>());
+ v8::RegisterExtension(std::make_unique<FunctionExtension>());
v8::HandleScope handle_scope(CcTest::isolate());
static const char* exts[1] = {"functiontest"};
v8::ExtensionConfiguration config(1, exts);
@@ -7520,9 +7517,9 @@ void StoringErrorCallback(const char* location, const char* message) {
TEST(ErrorReporting) {
CcTest::isolate()->SetFatalErrorHandler(StoringErrorCallback);
static const char* aDeps[] = {"B"};
- v8::RegisterExtension(v8::base::make_unique<Extension>("A", "", 1, aDeps));
+ v8::RegisterExtension(std::make_unique<Extension>("A", "", 1, aDeps));
static const char* bDeps[] = {"A"};
- v8::RegisterExtension(v8::base::make_unique<Extension>("B", "", 1, bDeps));
+ v8::RegisterExtension(std::make_unique<Extension>("B", "", 1, bDeps));
last_location = nullptr;
v8::ExtensionConfiguration config(1, bDeps);
v8::Local<Context> context = Context::New(CcTest::isolate(), &config);
@@ -10625,7 +10622,6 @@ THREADED_TEST(ShadowObjectAndDataProperty) {
i::FeedbackSlot slot = i::FeedbackVector::ToSlot(0);
i::FeedbackNexus nexus(foo->feedback_vector(), slot);
CHECK_EQ(i::FeedbackSlotKind::kStoreGlobalSloppy, nexus.kind());
- CHECK_EQ(i::PREMONOMORPHIC, nexus.ic_state());
CompileRun("foo(1)");
CHECK_EQ(i::MONOMORPHIC, nexus.ic_state());
// We go a bit further, checking that the form of monomorphism is
@@ -10676,7 +10672,6 @@ THREADED_TEST(ShadowObjectAndDataPropertyTurbo) {
i::FeedbackSlot slot = i::FeedbackVector::ToSlot(0);
i::FeedbackNexus nexus(foo->feedback_vector(), slot);
CHECK_EQ(i::FeedbackSlotKind::kStoreGlobalSloppy, nexus.kind());
- CHECK_EQ(i::PREMONOMORPHIC, nexus.ic_state());
CompileRun("%OptimizeFunctionOnNextCall(foo); foo(1)");
CHECK_EQ(i::MONOMORPHIC, nexus.ic_state());
i::HeapObject heap_object;
@@ -12310,8 +12305,14 @@ TEST(CallHandlerHasNoSideEffect) {
->Set(context.local(), v8_str("f"),
templ->GetFunction(context.local()).ToLocalChecked())
.FromJust());
- CHECK(v8::debug::EvaluateGlobal(isolate, v8_str("f()"), true).IsEmpty());
- CHECK(v8::debug::EvaluateGlobal(isolate, v8_str("new f()"), true).IsEmpty());
+ CHECK(v8::debug::EvaluateGlobal(
+ isolate, v8_str("f()"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
+ .IsEmpty());
+ CHECK(v8::debug::EvaluateGlobal(
+ isolate, v8_str("new f()"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
+ .IsEmpty());
// Side-effect-free version.
Local<v8::FunctionTemplate> templ2 = v8::FunctionTemplate::New(isolate);
@@ -12321,8 +12322,14 @@ TEST(CallHandlerHasNoSideEffect) {
->Set(context.local(), v8_str("f2"),
templ2->GetFunction(context.local()).ToLocalChecked())
.FromJust());
- v8::debug::EvaluateGlobal(isolate, v8_str("f2()"), true).ToLocalChecked();
- v8::debug::EvaluateGlobal(isolate, v8_str("new f2()"), true).ToLocalChecked();
+ v8::debug::EvaluateGlobal(
+ isolate, v8_str("f2()"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
+ .ToLocalChecked();
+ v8::debug::EvaluateGlobal(
+ isolate, v8_str("new f2()"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
+ .ToLocalChecked();
}
TEST(FunctionTemplateNewHasNoSideEffect) {
@@ -12337,8 +12344,14 @@ TEST(FunctionTemplateNewHasNoSideEffect) {
->Set(context.local(), v8_str("f"),
templ->GetFunction(context.local()).ToLocalChecked())
.FromJust());
- CHECK(v8::debug::EvaluateGlobal(isolate, v8_str("f()"), true).IsEmpty());
- CHECK(v8::debug::EvaluateGlobal(isolate, v8_str("new f()"), true).IsEmpty());
+ CHECK(v8::debug::EvaluateGlobal(
+ isolate, v8_str("f()"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
+ .IsEmpty());
+ CHECK(v8::debug::EvaluateGlobal(
+ isolate, v8_str("new f()"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
+ .IsEmpty());
// Side-effect-free version.
Local<v8::FunctionTemplate> templ2 = v8::FunctionTemplate::New(
@@ -12348,8 +12361,14 @@ TEST(FunctionTemplateNewHasNoSideEffect) {
->Set(context.local(), v8_str("f2"),
templ2->GetFunction(context.local()).ToLocalChecked())
.FromJust());
- v8::debug::EvaluateGlobal(isolate, v8_str("f2()"), true).ToLocalChecked();
- v8::debug::EvaluateGlobal(isolate, v8_str("new f2()"), true).ToLocalChecked();
+ v8::debug::EvaluateGlobal(
+ isolate, v8_str("f2()"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
+ .ToLocalChecked();
+ v8::debug::EvaluateGlobal(
+ isolate, v8_str("new f2()"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
+ .ToLocalChecked();
}
TEST(FunctionTemplateNewWithCacheHasNoSideEffect) {
@@ -12366,8 +12385,14 @@ TEST(FunctionTemplateNewWithCacheHasNoSideEffect) {
->Set(context.local(), v8_str("f"),
templ->GetFunction(context.local()).ToLocalChecked())
.FromJust());
- CHECK(v8::debug::EvaluateGlobal(isolate, v8_str("f()"), true).IsEmpty());
- CHECK(v8::debug::EvaluateGlobal(isolate, v8_str("new f()"), true).IsEmpty());
+ CHECK(v8::debug::EvaluateGlobal(
+ isolate, v8_str("f()"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
+ .IsEmpty());
+ CHECK(v8::debug::EvaluateGlobal(
+ isolate, v8_str("new f()"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
+ .IsEmpty());
// Side-effect-free version.
Local<v8::FunctionTemplate> templ2 = v8::FunctionTemplate::NewWithCache(
@@ -12377,8 +12402,14 @@ TEST(FunctionTemplateNewWithCacheHasNoSideEffect) {
->Set(context.local(), v8_str("f2"),
templ2->GetFunction(context.local()).ToLocalChecked())
.FromJust());
- v8::debug::EvaluateGlobal(isolate, v8_str("f2()"), true).ToLocalChecked();
- v8::debug::EvaluateGlobal(isolate, v8_str("new f2()"), true).ToLocalChecked();
+ v8::debug::EvaluateGlobal(
+ isolate, v8_str("f2()"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
+ .ToLocalChecked();
+ v8::debug::EvaluateGlobal(
+ isolate, v8_str("new f2()"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
+ .ToLocalChecked();
}
TEST(FunctionNewHasNoSideEffect) {
@@ -12390,8 +12421,14 @@ TEST(FunctionNewHasNoSideEffect) {
Local<Function> func =
Function::New(context.local(), EmptyHandler).ToLocalChecked();
CHECK(context->Global()->Set(context.local(), v8_str("f"), func).FromJust());
- CHECK(v8::debug::EvaluateGlobal(isolate, v8_str("f()"), true).IsEmpty());
- CHECK(v8::debug::EvaluateGlobal(isolate, v8_str("new f()"), true).IsEmpty());
+ CHECK(v8::debug::EvaluateGlobal(
+ isolate, v8_str("f()"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
+ .IsEmpty());
+ CHECK(v8::debug::EvaluateGlobal(
+ isolate, v8_str("new f()"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
+ .IsEmpty());
// Side-effect-free version.
Local<Function> func2 =
@@ -12401,8 +12438,14 @@ TEST(FunctionNewHasNoSideEffect) {
.ToLocalChecked();
CHECK(
context->Global()->Set(context.local(), v8_str("f2"), func2).FromJust());
- v8::debug::EvaluateGlobal(isolate, v8_str("f2()"), true).ToLocalChecked();
- v8::debug::EvaluateGlobal(isolate, v8_str("new f2()"), true).ToLocalChecked();
+ v8::debug::EvaluateGlobal(
+ isolate, v8_str("f2()"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
+ .ToLocalChecked();
+ v8::debug::EvaluateGlobal(
+ isolate, v8_str("new f2()"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
+ .ToLocalChecked();
}
// These handlers instantiate a function the embedder considers safe in some
@@ -12461,7 +12504,10 @@ TEST(FunctionNewInstanceHasNoSideEffect) {
v8::SideEffectType::kHasNoSideEffect)
.ToLocalChecked();
CHECK(context->Global()->Set(context.local(), v8_str("f"), func0).FromJust());
- CHECK(v8::debug::EvaluateGlobal(isolate, v8_str("f()"), true).IsEmpty());
+ CHECK(v8::debug::EvaluateGlobal(
+ isolate, v8_str("f()"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
+ .IsEmpty());
// A whitelisted function that creates a new object. Should throw.
Local<Function> func =
@@ -12470,7 +12516,10 @@ TEST(FunctionNewInstanceHasNoSideEffect) {
v8::SideEffectType::kHasNoSideEffect)
.ToLocalChecked();
CHECK(context->Global()->Set(context.local(), v8_str("f"), func).FromJust());
- CHECK(v8::debug::EvaluateGlobal(isolate, v8_str("f()"), true).IsEmpty());
+ CHECK(v8::debug::EvaluateGlobal(
+ isolate, v8_str("f()"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
+ .IsEmpty());
// A whitelisted function that creates a new object with explicit intent to
// have no side-effects (e.g. building an "object wrapper"). Should not throw.
@@ -12481,18 +12530,26 @@ TEST(FunctionNewInstanceHasNoSideEffect) {
.ToLocalChecked();
CHECK(
context->Global()->Set(context.local(), v8_str("f2"), func2).FromJust());
- v8::debug::EvaluateGlobal(isolate, v8_str("f2()"), true).ToLocalChecked();
+ v8::debug::EvaluateGlobal(
+ isolate, v8_str("f2()"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
+ .ToLocalChecked();
// Check that side effect skipping did not leak outside to future evaluations.
Local<Function> func3 =
Function::New(context.local(), EmptyHandler).ToLocalChecked();
CHECK(
context->Global()->Set(context.local(), v8_str("f3"), func3).FromJust());
- CHECK(v8::debug::EvaluateGlobal(isolate, v8_str("f3()"), true).IsEmpty());
+ CHECK(v8::debug::EvaluateGlobal(
+ isolate, v8_str("f3()"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
+ .IsEmpty());
// Check that using side effect free NewInstance works in normal evaluation
// (without throwOnSideEffect).
- v8::debug::EvaluateGlobal(isolate, v8_str("f2()"), false).ToLocalChecked();
+ v8::debug::EvaluateGlobal(isolate, v8_str("f2()"),
+ v8::debug::EvaluateGlobalMode::kDefault)
+ .ToLocalChecked();
}
TEST(CallHandlerAsFunctionHasNoSideEffectNotSupported) {
@@ -12505,7 +12562,10 @@ TEST(CallHandlerAsFunctionHasNoSideEffectNotSupported) {
templ->SetCallAsFunctionHandler(EmptyHandler);
Local<v8::Object> obj = templ->NewInstance(context.local()).ToLocalChecked();
CHECK(context->Global()->Set(context.local(), v8_str("obj"), obj).FromJust());
- CHECK(v8::debug::EvaluateGlobal(isolate, v8_str("obj()"), true).IsEmpty());
+ CHECK(v8::debug::EvaluateGlobal(
+ isolate, v8_str("obj()"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
+ .IsEmpty());
// Side-effect-free version is not supported.
i::FunctionTemplateInfo cons = i::FunctionTemplateInfo::cast(
@@ -12516,7 +12576,10 @@ TEST(CallHandlerAsFunctionHasNoSideEffectNotSupported) {
CHECK(!handler_info.IsSideEffectFreeCallHandlerInfo());
handler_info.set_map(
i::ReadOnlyRoots(heap).side_effect_free_call_handler_info_map());
- CHECK(v8::debug::EvaluateGlobal(isolate, v8_str("obj()"), true).IsEmpty());
+ CHECK(v8::debug::EvaluateGlobal(
+ isolate, v8_str("obj()"),
+ v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect)
+ .IsEmpty());
}
static void IsConstructHandler(
@@ -18192,10 +18255,10 @@ static void BreakArrayGuarantees(const char* script) {
v8::Context::Scope context_scope(context);
v8::internal::Isolate* i_isolate =
reinterpret_cast<v8::internal::Isolate*>(isolate1);
- CHECK(i_isolate->IsNoElementsProtectorIntact());
+ CHECK(v8::internal::Protectors::IsNoElementsIntact(i_isolate));
// Run something in new isolate.
CompileRun(script);
- CHECK(!i_isolate->IsNoElementsProtectorIntact());
+ CHECK(!v8::internal::Protectors::IsNoElementsIntact(i_isolate));
}
isolate1->Exit();
isolate1->Dispose();
@@ -23170,7 +23233,7 @@ void RunStreamingTest(const char** chunks,
v8::TryCatch try_catch(isolate);
v8::ScriptCompiler::StreamedSource source(
- v8::base::make_unique<TestSourceStream>(chunks), encoding);
+ std::make_unique<TestSourceStream>(chunks), encoding);
v8::ScriptCompiler::ScriptStreamingTask* task =
v8::ScriptCompiler::StartStreamingScript(isolate, &source);
@@ -23441,7 +23504,7 @@ TEST(StreamingWithDebuggingEnabledLate) {
v8::TryCatch try_catch(isolate);
v8::ScriptCompiler::StreamedSource source(
- v8::base::make_unique<TestSourceStream>(chunks),
+ std::make_unique<TestSourceStream>(chunks),
v8::ScriptCompiler::StreamedSource::ONE_BYTE);
v8::ScriptCompiler::ScriptStreamingTask* task =
v8::ScriptCompiler::StartStreamingScript(isolate, &source);
@@ -23549,7 +23612,7 @@ TEST(StreamingWithHarmonyScopes) {
v8::TryCatch try_catch(isolate);
v8::ScriptCompiler::StreamedSource source(
- v8::base::make_unique<TestSourceStream>(chunks),
+ std::make_unique<TestSourceStream>(chunks),
v8::ScriptCompiler::StreamedSource::ONE_BYTE);
v8::ScriptCompiler::ScriptStreamingTask* task =
v8::ScriptCompiler::StartStreamingScript(isolate, &source);
@@ -23658,9 +23721,7 @@ v8::MaybeLocal<Value> SyntheticModuleEvaluationStepsCallbackFail(
v8::MaybeLocal<Value> SyntheticModuleEvaluationStepsCallbackSetExport(
Local<Context> context, Local<Module> module) {
- Maybe<bool> set_export_result = module->SetSyntheticModuleExport(
- context->GetIsolate(), v8_str("test_export"), v8_num(42));
- CHECK(set_export_result.FromJust());
+ module->SetSyntheticModuleExport(v8_str("test_export"), v8_num(42));
return v8::Undefined(reinterpret_cast<v8::Isolate*>(context->GetIsolate()));
}
@@ -23771,7 +23832,13 @@ TEST(ModuleCodeCache) {
// Evaluate for possible lazy compilation.
Local<Value> completion_value =
module->Evaluate(context).ToLocalChecked();
- CHECK_EQ(42, completion_value->Int32Value(context).FromJust());
+ if (i::FLAG_harmony_top_level_await) {
+ Local<v8::Promise> promise(Local<v8::Promise>::Cast(completion_value));
+ CHECK_EQ(promise->State(), v8::Promise::kFulfilled);
+ CHECK(promise->Result()->IsUndefined());
+ } else {
+ CHECK_EQ(42, completion_value->Int32Value(context).FromJust());
+ }
// Now create the cache. Note that it is freed, obscurely, when
// ScriptCompiler::Source goes out of scope below.
@@ -23802,7 +23869,13 @@ TEST(ModuleCodeCache) {
Local<Value> completion_value =
module->Evaluate(context).ToLocalChecked();
- CHECK_EQ(42, completion_value->Int32Value(context).FromJust());
+ if (i::FLAG_harmony_top_level_await) {
+ Local<v8::Promise> promise(Local<v8::Promise>::Cast(completion_value));
+ CHECK_EQ(promise->State(), v8::Promise::kFulfilled);
+ CHECK(promise->Result()->IsUndefined());
+ } else {
+ CHECK_EQ(42, completion_value->Int32Value(context).FromJust());
+ }
}
isolate->Dispose();
}
@@ -23867,9 +23940,7 @@ TEST(SyntheticModuleSetExports) {
// undefined.
CHECK(foo_cell->value().IsUndefined());
- Maybe<bool> set_export_result =
- module->SetSyntheticModuleExport(isolate, foo_string, bar_string);
- CHECK(set_export_result.FromJust());
+ module->SetSyntheticModuleExport(foo_string, bar_string);
// After setting the export the Cell should still have the same idenitity.
CHECK_EQ(exports->Lookup(v8::Utils::OpenHandle(*foo_string)), *foo_cell);
@@ -23880,34 +23951,6 @@ TEST(SyntheticModuleSetExports) {
->Equals(*v8::Utils::OpenHandle(*bar_string)));
}
-TEST(SyntheticModuleSetMissingExport) {
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- auto i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- v8::Isolate::Scope iscope(isolate);
- v8::HandleScope scope(isolate);
- v8::Local<v8::Context> context = v8::Context::New(isolate);
- v8::Context::Scope cscope(context);
-
- Local<String> foo_string = v8_str("foo");
- Local<String> bar_string = v8_str("bar");
-
- Local<Module> module = CreateAndInstantiateSyntheticModule(
- isolate, v8_str("SyntheticModuleSetExports-TestSyntheticModule"), context,
- std::vector<v8::Local<v8::String>>(),
- UnexpectedSyntheticModuleEvaluationStepsCallback);
-
- i::Handle<i::SyntheticModule> i_module =
- i::Handle<i::SyntheticModule>::cast(v8::Utils::OpenHandle(*module));
- i::Handle<i::ObjectHashTable> exports(i_module->exports(), i_isolate);
-
- TryCatch try_catch(isolate);
- Maybe<bool> set_export_result =
- module->SetSyntheticModuleExport(isolate, foo_string, bar_string);
- CHECK(set_export_result.IsNothing());
- CHECK(try_catch.HasCaught());
-}
-
TEST(SyntheticModuleEvaluationStepsNoThrow) {
synthetic_module_callback_count = 0;
LocalContext env;
@@ -24011,7 +24054,13 @@ TEST(ImportFromSyntheticModule) {
.ToChecked();
Local<Value> completion_value = module->Evaluate(context).ToLocalChecked();
- CHECK_EQ(42, completion_value->Int32Value(context).FromJust());
+ if (i::FLAG_harmony_top_level_await) {
+ Local<v8::Promise> promise(Local<v8::Promise>::Cast(completion_value));
+ CHECK_EQ(promise->State(), v8::Promise::kFulfilled);
+ CHECK(promise->Result()->IsUndefined());
+ } else {
+ CHECK_EQ(42, completion_value->Int32Value(context).FromJust());
+ }
}
TEST(ImportFromSyntheticModuleThrow) {
@@ -24041,7 +24090,15 @@ TEST(ImportFromSyntheticModuleThrow) {
CHECK_EQ(module->GetStatus(), Module::kInstantiated);
TryCatch try_catch(isolate);
v8::MaybeLocal<Value> completion_value = module->Evaluate(context);
- CHECK(completion_value.IsEmpty());
+ if (i::FLAG_harmony_top_level_await) {
+ Local<v8::Promise> promise(
+ Local<v8::Promise>::Cast(completion_value.ToLocalChecked()));
+ CHECK_EQ(promise->State(), v8::Promise::kRejected);
+ CHECK_EQ(promise->Result(), try_catch.Exception());
+ } else {
+ CHECK(completion_value.IsEmpty());
+ }
+
CHECK_EQ(module->GetStatus(), Module::kErrored);
CHECK(try_catch.HasCaught());
}
@@ -24074,7 +24131,13 @@ TEST(CodeCacheModuleScriptMismatch) {
// Evaluate for possible lazy compilation.
Local<Value> completion_value =
module->Evaluate(context).ToLocalChecked();
- CHECK_EQ(42, completion_value->Int32Value(context).FromJust());
+ if (i::FLAG_harmony_top_level_await) {
+ Local<v8::Promise> promise(Local<v8::Promise>::Cast(completion_value));
+ CHECK_EQ(promise->State(), v8::Promise::kFulfilled);
+ CHECK(promise->Result()->IsUndefined());
+ } else {
+ CHECK_EQ(42, completion_value->Int32Value(context).FromJust());
+ }
// Now create the cache. Note that it is freed, obscurely, when
// ScriptCompiler::Source goes out of scope below.
@@ -24170,7 +24233,13 @@ TEST(CodeCacheScriptModuleMismatch) {
Local<Value> completion_value =
module->Evaluate(context).ToLocalChecked();
- CHECK_EQ(42, completion_value->Int32Value(context).FromJust());
+ if (i::FLAG_harmony_top_level_await) {
+ Local<v8::Promise> promise(Local<v8::Promise>::Cast(completion_value));
+ CHECK_EQ(promise->State(), v8::Promise::kFulfilled);
+ CHECK(promise->Result()->IsUndefined());
+ } else {
+ CHECK_EQ(42, completion_value->Int32Value(context).FromJust());
+ }
}
isolate->Dispose();
}
@@ -24206,10 +24275,14 @@ TEST(InvalidCodeCacheDataInCompileModule) {
.ToChecked();
CHECK(cached_data->rejected);
- CHECK_EQ(42, module->Evaluate(context)
- .ToLocalChecked()
- ->Int32Value(context)
- .FromJust());
+ Local<Value> completion_value = module->Evaluate(context).ToLocalChecked();
+ if (i::FLAG_harmony_top_level_await) {
+ Local<v8::Promise> promise(Local<v8::Promise>::Cast(completion_value));
+ CHECK_EQ(promise->State(), v8::Promise::kFulfilled);
+ CHECK(promise->Result()->IsUndefined());
+ } else {
+ CHECK_EQ(42, completion_value->Int32Value(context).FromJust());
+ }
}
void TestInvalidCacheData(v8::ScriptCompiler::CompileOptions option) {
@@ -24403,280 +24476,6 @@ TEST(SealHandleScopeNested) {
}
}
-
-static void ExtrasBindingTestRuntimeFunction(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
- CHECK_EQ(
- 3,
- args[0]->Int32Value(args.GetIsolate()->GetCurrentContext()).FromJust());
- args.GetReturnValue().Set(v8_num(7));
-}
-
-TEST(ExtrasFunctionSource) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope handle_scope(isolate);
- LocalContext env;
-
- v8::Local<v8::Object> binding = env->GetExtrasBindingObject();
-
- // Functions defined in extras do not expose source code.
- auto func = binding->Get(env.local(), v8_str("testFunctionToString"))
- .ToLocalChecked()
- .As<v8::Function>();
- auto undefined = v8::Undefined(isolate);
- auto result = func->Call(env.local(), undefined, 0, {})
- .ToLocalChecked()
- .As<v8::String>();
- CHECK(result->StrictEquals(v8_str("function foo() { [native code] }")));
-
- // Functions defined in extras do not show up in the stack trace.
- auto wrapper = binding->Get(env.local(), v8_str("testStackTrace"))
- .ToLocalChecked()
- .As<v8::Function>();
- CHECK(env->Global()->Set(env.local(), v8_str("wrapper"), wrapper).FromJust());
- ExpectString(
- "function f(x) { return wrapper(x) }"
- "function g() { return new Error().stack; }"
- "f(g)",
- "Error\n"
- " at g (<anonymous>:1:58)\n"
- " at f (<anonymous>:1:24)\n"
- " at <anonymous>:1:78");
-}
-
-TEST(ExtrasBindingObject) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope handle_scope(isolate);
- LocalContext env;
-
- // standalone.gypi ensures we include the test-extra.js file, which should
- // export the tested functions.
- v8::Local<v8::Object> binding = env->GetExtrasBindingObject();
-
- auto func = binding->Get(env.local(), v8_str("testExtraShouldReturnFive"))
- .ToLocalChecked()
- .As<v8::Function>();
- auto undefined = v8::Undefined(isolate);
- auto result = func->Call(env.local(), undefined, 0, {})
- .ToLocalChecked()
- .As<v8::Number>();
- CHECK_EQ(5, result->Int32Value(env.local()).FromJust());
-
- v8::Local<v8::FunctionTemplate> runtimeFunction =
- v8::FunctionTemplate::New(isolate, ExtrasBindingTestRuntimeFunction);
- binding->Set(env.local(), v8_str("runtime"),
- runtimeFunction->GetFunction(env.local()).ToLocalChecked())
- .FromJust();
- func = binding->Get(env.local(), v8_str("testExtraShouldCallToRuntime"))
- .ToLocalChecked()
- .As<v8::Function>();
- result = func->Call(env.local(), undefined, 0, {})
- .ToLocalChecked()
- .As<v8::Number>();
- CHECK_EQ(7, result->Int32Value(env.local()).FromJust());
-}
-
-
-TEST(ExtrasCreatePromise) {
- i::FLAG_allow_natives_syntax = true;
- LocalContext context;
- v8::Isolate* isolate = context->GetIsolate();
- v8::HandleScope handle_scope(isolate);
-
- LocalContext env;
- v8::Local<v8::Object> binding = env->GetExtrasBindingObject();
-
- auto func = binding->Get(env.local(), v8_str("testCreatePromise"))
- .ToLocalChecked()
- .As<v8::Function>();
- CHECK(env->Global()->Set(env.local(), v8_str("func"), func).FromJust());
-
- auto promise = CompileRun(
- "%PrepareFunctionForOptimization(func);\n"
- "func();\n"
- "func();\n"
- "%OptimizeFunctionOnNextCall(func);\n"
- "func()\n")
- .As<v8::Promise>();
- CHECK_EQ(v8::Promise::kPending, promise->State());
-}
-
-TEST(ExtrasCreatePromiseWithParent) {
- i::FLAG_allow_natives_syntax = true;
- LocalContext context;
- v8::Isolate* isolate = context->GetIsolate();
- v8::HandleScope handle_scope(isolate);
-
- LocalContext env;
- v8::Local<v8::Object> binding = env->GetExtrasBindingObject();
-
- auto func = binding->Get(env.local(), v8_str("testCreatePromiseWithParent"))
- .ToLocalChecked()
- .As<v8::Function>();
- CHECK(env->Global()->Set(env.local(), v8_str("func"), func).FromJust());
-
- auto promise = CompileRun(
- "var parent = new Promise((a, b) => {});\n"
- "%PrepareFunctionForOptimization(func);\n"
- "func(parent);\n"
- "func(parent);\n"
- "%OptimizeFunctionOnNextCall(func);\n"
- "func(parent)\n")
- .As<v8::Promise>();
- CHECK_EQ(v8::Promise::kPending, promise->State());
-}
-
-TEST(ExtrasRejectPromise) {
- i::FLAG_allow_natives_syntax = true;
- LocalContext context;
- v8::Isolate* isolate = context->GetIsolate();
- v8::HandleScope handle_scope(isolate);
-
- LocalContext env;
- v8::Local<v8::Object> binding = env->GetExtrasBindingObject();
-
- auto func = binding->Get(env.local(), v8_str("testRejectPromise"))
- .ToLocalChecked()
- .As<v8::Function>();
- CHECK(env->Global()->Set(env.local(), v8_str("func"), func).FromJust());
-
- auto rejected_promise = CompileRun(
- "function newPromise() {\n"
- " return new Promise((a, b) => {});\n"
- "}\n"
- "%PrepareFunctionForOptimization(func);\n"
- "func(newPromise(), 1);\n"
- "func(newPromise(), 1);\n"
- "%OptimizeFunctionOnNextCall(func);\n"
- "var promise = newPromise();\n"
- "func(promise, 1);\n"
- "promise;\n")
- .As<v8::Promise>();
- CHECK_EQ(v8::Promise::kRejected, rejected_promise->State());
- CHECK_EQ(1, rejected_promise->Result()->Int32Value(env.local()).FromJust());
-}
-
-TEST(ExtrasResolvePromise) {
- i::FLAG_allow_natives_syntax = true;
- LocalContext context;
- v8::Isolate* isolate = context->GetIsolate();
- v8::HandleScope handle_scope(isolate);
-
- LocalContext env;
- v8::Local<v8::Object> binding = env->GetExtrasBindingObject();
-
- auto func = binding->Get(env.local(), v8_str("testResolvePromise"))
- .ToLocalChecked()
- .As<v8::Function>();
- CHECK(env->Global()->Set(env.local(), v8_str("func"), func).FromJust());
-
- auto pending_promise = CompileRun(
- "function newPromise() {\n"
- " return new Promise((a, b) => {});\n"
- "}\n"
- "%PrepareFunctionForOptimization(func);\n"
- "func(newPromise(), newPromise());\n"
- "func(newPromise(), newPromise());\n"
- "%OptimizeFunctionOnNextCall(func);\n"
- "var promise = newPromise();\n"
- "func(promise, newPromise());\n"
- "promise;\n")
- .As<v8::Promise>();
- CHECK_EQ(v8::Promise::kPending, pending_promise->State());
-
- auto fulfilled_promise = CompileRun(
- "function newPromise() {\n"
- " return new Promise((a, b) => {});\n"
- "}\n"
- "%PrepareFunctionForOptimization(func);\n"
- "func(newPromise(), 1);\n"
- "func(newPromise(), 1);\n"
- "%OptimizeFunctionOnNextCall(func);\n"
- "var promise = newPromise();\n"
- "func(promise, 1);\n"
- "promise;\n")
- .As<v8::Promise>();
- CHECK_EQ(v8::Promise::kFulfilled, fulfilled_promise->State());
- CHECK_EQ(1, fulfilled_promise->Result()->Int32Value(env.local()).FromJust());
-}
-
-TEST(ExtrasUtilsObject) {
- LocalContext context;
- v8::Isolate* isolate = context->GetIsolate();
- v8::HandleScope handle_scope(isolate);
-
- LocalContext env;
- v8::Local<v8::Object> binding = env->GetExtrasBindingObject();
-
- auto func = binding->Get(env.local(), v8_str("testExtraCanUseUtils"))
- .ToLocalChecked()
- .As<v8::Function>();
- auto undefined = v8::Undefined(isolate);
- auto result = func->Call(env.local(), undefined, 0, {})
- .ToLocalChecked()
- .As<v8::Object>();
-
- auto private_symbol = result->Get(env.local(), v8_str("privateSymbol"))
- .ToLocalChecked()
- .As<v8::Symbol>();
- i::Handle<i::Symbol> ips = v8::Utils::OpenHandle(*private_symbol);
- CHECK(ips->IsPrivate());
-
- CompileRun("var result = 0; function store(x) { result = x; }");
- auto store = CompileRun("store").As<v8::Function>();
-
- auto fulfilled_promise = result->Get(env.local(), v8_str("fulfilledPromise"))
- .ToLocalChecked()
- .As<v8::Promise>();
- fulfilled_promise->Then(env.local(), store).ToLocalChecked();
- isolate->RunMicrotasks();
- CHECK_EQ(1, CompileRun("result")->Int32Value(env.local()).FromJust());
-
- auto fulfilled_promise_2 =
- result->Get(env.local(), v8_str("fulfilledPromise2"))
- .ToLocalChecked()
- .As<v8::Promise>();
- fulfilled_promise_2->Then(env.local(), store).ToLocalChecked();
- isolate->RunMicrotasks();
- CHECK_EQ(2, CompileRun("result")->Int32Value(env.local()).FromJust());
-
- auto rejected_promise = result->Get(env.local(), v8_str("rejectedPromise"))
- .ToLocalChecked()
- .As<v8::Promise>();
- rejected_promise->Catch(env.local(), store).ToLocalChecked();
- isolate->RunMicrotasks();
- CHECK_EQ(3, CompileRun("result")->Int32Value(env.local()).FromJust());
-
- auto rejected_but_handled_promise =
- result->Get(env.local(), v8_str("rejectedButHandledPromise"))
- .ToLocalChecked()
- .As<v8::Promise>();
- CHECK(rejected_but_handled_promise->HasHandler());
-
- auto promise_states = result->Get(env.local(), v8_str("promiseStates"))
- .ToLocalChecked()
- .As<v8::String>();
- String::Utf8Value promise_states_string(isolate, promise_states);
- CHECK_EQ(0, strcmp(*promise_states_string, "pending fulfilled rejected"));
-
- auto promise_is_promise = result->Get(env.local(), v8_str("promiseIsPromise"))
- .ToLocalChecked()
- .As<v8::Boolean>();
- CHECK_EQ(true, promise_is_promise->Value());
-
- auto thenable_is_promise =
- result->Get(env.local(), v8_str("thenableIsPromise"))
- .ToLocalChecked()
- .As<v8::Boolean>();
- CHECK_EQ(false, thenable_is_promise->Value());
-
- auto uncurry_this = result->Get(env.local(), v8_str("uncurryThis"))
- .ToLocalChecked()
- .As<v8::Boolean>();
- CHECK_EQ(true, uncurry_this->Value());
-}
-
-
TEST(Map) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
@@ -25854,7 +25653,14 @@ TEST(ImportMeta) {
module->InstantiateModule(context.local(), UnexpectedModuleResolveCallback)
.ToChecked();
Local<Value> result = module->Evaluate(context.local()).ToLocalChecked();
- CHECK(result->StrictEquals(Local<v8::Value>::Cast(v8::Utils::ToLocal(meta))));
+ if (i::FLAG_harmony_top_level_await) {
+ Local<v8::Promise> promise(Local<v8::Promise>::Cast(result));
+ CHECK_EQ(promise->State(), v8::Promise::kFulfilled);
+ CHECK(promise->Result()->IsUndefined());
+ } else {
+ CHECK(
+ result->StrictEquals(Local<v8::Value>::Cast(v8::Utils::ToLocal(meta))));
+ }
}
TEST(GetModuleNamespace) {
@@ -26180,7 +25986,7 @@ void AtomicsWaitCallbackForTesting(
wake_handle->Wake();
break;
case AtomicsWaitCallbackAction::StopFromThreadAndThrow:
- info->stop_thread = v8::base::make_unique<StopAtomicsWaitThread>(info);
+ info->stop_thread = std::make_unique<StopAtomicsWaitThread>(info);
CHECK(info->stop_thread->Start());
break;
case AtomicsWaitCallbackAction::KeepWaiting:
diff --git a/deps/v8/test/cctest/test-assembler-arm.cc b/deps/v8/test/cctest/test-assembler-arm.cc
index c96a0199bb..bae39ba2ad 100644
--- a/deps/v8/test/cctest/test-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-assembler-arm.cc
@@ -3391,7 +3391,9 @@ TEST(ARMv8_vminmax_f32) {
template <typename T, typename Inputs, typename Results>
static GeneratedCode<F_ppiii> GenerateMacroFloatMinMax(
- MacroAssembler& assm) { // NOLINT(runtime/references)
+ MacroAssembler* assm_ptr) {
+ MacroAssembler& assm = *assm_ptr;
+
T a = T::from_code(0); // d0/s0
T b = T::from_code(1); // d1/s1
T c = T::from_code(2); // d2/s2
@@ -3509,7 +3511,7 @@ TEST(macro_float_minmax_f64) {
double max_aba_;
};
- auto f = GenerateMacroFloatMinMax<DwVfpRegister, Inputs, Results>(assm);
+ auto f = GenerateMacroFloatMinMax<DwVfpRegister, Inputs, Results>(&assm);
#define CHECK_MINMAX(left, right, min, max) \
do { \
@@ -3574,7 +3576,7 @@ TEST(macro_float_minmax_f32) {
float max_aba_;
};
- auto f = GenerateMacroFloatMinMax<SwVfpRegister, Inputs, Results>(assm);
+ auto f = GenerateMacroFloatMinMax<SwVfpRegister, Inputs, Results>(&assm);
#define CHECK_MINMAX(left, right, min, max) \
do { \
diff --git a/deps/v8/test/cctest/test-assembler-arm64.cc b/deps/v8/test/cctest/test-assembler-arm64.cc
index 4fdf30ef64..44ee286587 100644
--- a/deps/v8/test/cctest/test-assembler-arm64.cc
+++ b/deps/v8/test/cctest/test-assembler-arm64.cc
@@ -2019,17 +2019,19 @@ TEST(far_branch_backward) {
START();
Label done, fail;
- Label near, far, in_range, out_of_range;
+ // Avoid using near and far as variable name because both are defined as
+ // macro in minwindef.h from Windows SDK.
+ Label near_label, far_label, in_range, out_of_range;
__ Mov(x0, 0);
__ Mov(x1, 1);
__ Mov(x10, 0);
- __ B(&near);
+ __ B(&near_label);
__ Bind(&in_range);
__ Orr(x0, x0, 1 << 0);
- __ B(&far);
+ __ B(&far_label);
__ Bind(&out_of_range);
__ Orr(x0, x0, 1 << 1);
@@ -2053,19 +2055,19 @@ TEST(far_branch_backward) {
// close to the limit.
GenerateLandingNops(&masm, budget - kSlack, &fail);
- __ Bind(&near);
+ __ Bind(&near_label);
switch (type) {
case TestBranchType:
__ Tbz(x10, 3, &in_range);
// This should be:
// TBZ <in_range>
- CHECK_EQ(1 * kInstrSize, __ SizeOfCodeGeneratedSince(&near));
+ CHECK_EQ(1 * kInstrSize, __ SizeOfCodeGeneratedSince(&near_label));
break;
case CompareBranchType:
__ Cbz(x10, &in_range);
// This should be:
// CBZ <in_range>
- CHECK_EQ(1 * kInstrSize, __ SizeOfCodeGeneratedSince(&near));
+ CHECK_EQ(1 * kInstrSize, __ SizeOfCodeGeneratedSince(&near_label));
break;
case CondBranchType:
__ Cmp(x10, 0);
@@ -2073,7 +2075,7 @@ TEST(far_branch_backward) {
// This should be:
// CMP
// B.EQ <in_range>
- CHECK_EQ(2 * kInstrSize, __ SizeOfCodeGeneratedSince(&near));
+ CHECK_EQ(2 * kInstrSize, __ SizeOfCodeGeneratedSince(&near_label));
break;
default:
UNREACHABLE();
@@ -2083,7 +2085,7 @@ TEST(far_branch_backward) {
// Now go past the limit so that branches are now out of range.
GenerateLandingNops(&masm, kSlack * 2, &fail);
- __ Bind(&far);
+ __ Bind(&far_label);
switch (type) {
case TestBranchType:
__ Tbz(x10, 5, &out_of_range);
@@ -2091,7 +2093,7 @@ TEST(far_branch_backward) {
// TBNZ <skip>
// B <out_of_range>
// skip:
- CHECK_EQ(2 * kInstrSize, __ SizeOfCodeGeneratedSince(&far));
+ CHECK_EQ(2 * kInstrSize, __ SizeOfCodeGeneratedSince(&far_label));
break;
case CompareBranchType:
__ Cbz(x10, &out_of_range);
@@ -2099,7 +2101,7 @@ TEST(far_branch_backward) {
// CBNZ <skip>
// B <out_of_range>
// skip:
- CHECK_EQ(2 * kInstrSize, __ SizeOfCodeGeneratedSince(&far));
+ CHECK_EQ(2 * kInstrSize, __ SizeOfCodeGeneratedSince(&far_label));
break;
case CondBranchType:
__ Cmp(x10, 0);
@@ -2109,7 +2111,7 @@ TEST(far_branch_backward) {
// B.NE <skip>
// B <out_of_range>
// skip:
- CHECK_EQ(3 * kInstrSize, __ SizeOfCodeGeneratedSince(&far));
+ CHECK_EQ(3 * kInstrSize, __ SizeOfCodeGeneratedSince(&far_label));
break;
default:
UNREACHABLE();
diff --git a/deps/v8/test/cctest/test-assembler-mips.cc b/deps/v8/test/cctest/test-assembler-mips.cc
index 1cc1aa3213..dd0c056369 100644
--- a/deps/v8/test/cctest/test-assembler-mips.cc
+++ b/deps/v8/test/cctest/test-assembler-mips.cc
@@ -3206,7 +3206,7 @@ TEST(jump_tables3) {
Handle<Object> values[kNumCases];
for (int i = 0; i < kNumCases; ++i) {
double value = isolate->random_number_generator()->NextDouble();
- values[i] = isolate->factory()->NewHeapNumber(value, AllocationType::kOld);
+ values[i] = isolate->factory()->NewHeapNumber<AllocationType::kOld>(value);
}
Label labels[kNumCases];
Object obj;
@@ -4825,9 +4825,10 @@ TEST(r6_beqzc) {
}
}
-void load_elements_of_vector(
- MacroAssembler& assm, // NOLINT(runtime/references)
- const uint64_t elements[], MSARegister w, Register t0, Register t1) {
+void load_elements_of_vector(MacroAssembler* assm_ptr,
+ const uint64_t elements[], MSARegister w,
+ Register t0, Register t1) {
+ MacroAssembler& assm = *assm_ptr;
__ li(t0, static_cast<uint32_t>(elements[0] & 0xFFFFFFFF));
__ li(t1, static_cast<uint32_t>((elements[0] >> 32) & 0xFFFFFFFF));
__ insert_w(w, 0, t0);
@@ -4838,9 +4839,9 @@ void load_elements_of_vector(
__ insert_w(w, 3, t1);
}
-inline void store_elements_of_vector(
- MacroAssembler& assm, // NOLINT(runtime/references)
- MSARegister w, Register a) {
+inline void store_elements_of_vector(MacroAssembler* assm_ptr, MSARegister w,
+ Register a) {
+ MacroAssembler& assm = *assm_ptr;
__ st_d(w, MemOperand(a, 0));
}
@@ -4876,15 +4877,15 @@ void run_bz_bnz(TestCaseMsaBranch* input, Branch GenerateBranch,
msa_reg_t res;
Label do_not_move_w0_to_w2;
- load_elements_of_vector(assm, &t.ws_lo, w0, t0, t1);
- load_elements_of_vector(assm, &t.wd_lo, w2, t0, t1);
- load_elements_of_vector(assm, &input->wt_lo, w1, t0, t1);
+ load_elements_of_vector(&assm, &t.ws_lo, w0, t0, t1);
+ load_elements_of_vector(&assm, &t.wd_lo, w2, t0, t1);
+ load_elements_of_vector(&assm, &input->wt_lo, w1, t0, t1);
GenerateBranch(assm, do_not_move_w0_to_w2);
__ nop();
__ move_v(w2, w0);
__ bind(&do_not_move_w0_to_w2);
- store_elements_of_vector(assm, w2, a0);
+ store_elements_of_vector(&assm, w2, a0);
__ jr(ra);
__ nop();
@@ -5841,7 +5842,7 @@ void run_msa_insert(int32_t rs_value, int n, msa_reg_t* w) {
UNREACHABLE();
}
- store_elements_of_vector(assm, w0, a0);
+ store_elements_of_vector(&assm, w0, a0);
__ jr(ra);
__ nop();
@@ -5937,10 +5938,10 @@ TEST(MSA_move_v) {
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
- load_elements_of_vector(assm, &t[i].ws_lo, w0, t0, t1);
- load_elements_of_vector(assm, &t[i].wd_lo, w2, t0, t1);
+ load_elements_of_vector(&assm, &t[i].ws_lo, w0, t0, t1);
+ load_elements_of_vector(&assm, &t[i].wd_lo, w2, t0, t1);
__ move_v(w2, w0);
- store_elements_of_vector(assm, w2, a0);
+ store_elements_of_vector(&assm, w2, a0);
__ jr(ra);
__ nop();
@@ -5981,10 +5982,10 @@ void run_msa_sldi(OperFunc GenerateOperation,
for (unsigned i = 0; i < arraysize(t); ++i) {
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
- load_elements_of_vector(assm, &t[i].ws_lo, w0, t0, t1);
- load_elements_of_vector(assm, &t[i].wd_lo, w2, t0, t1);
+ load_elements_of_vector(&assm, &t[i].ws_lo, w0, t0, t1);
+ load_elements_of_vector(&assm, &t[i].wd_lo, w2, t0, t1);
GenerateOperation(assm);
- store_elements_of_vector(assm, w2, a0);
+ store_elements_of_vector(&assm, w2, a0);
__ jr(ra);
__ nop();
@@ -6175,7 +6176,7 @@ void run_msa_i8(SecondaryField opcode, uint64_t ws_lo, uint64_t ws_hi,
UNREACHABLE();
}
- store_elements_of_vector(assm, w2, a0);
+ store_elements_of_vector(&assm, w2, a0);
__ jr(ra);
__ nop();
@@ -6460,11 +6461,11 @@ void run_msa_i5(struct TestCaseMsaI5* input, bool i5_sign_ext,
int32_t i5 =
i5_sign_ext ? static_cast<int32_t>(input->i5 << 27) >> 27 : input->i5;
- load_elements_of_vector(assm, &(input->ws_lo), w0, t0, t1);
+ load_elements_of_vector(&assm, &(input->ws_lo), w0, t0, t1);
GenerateI5InstructionFunc(assm, i5);
- store_elements_of_vector(assm, w2, a0);
+ store_elements_of_vector(&assm, w2, a0);
__ jr(ra);
__ nop();
@@ -6880,10 +6881,10 @@ void run_msa_2r(const struct TestCaseMsa2R* input,
CpuFeatureScope fscope(&assm, MIPS_SIMD);
msa_reg_t res;
- load_elements_of_vector(assm, reinterpret_cast<const uint64_t*>(input), w0,
+ load_elements_of_vector(&assm, reinterpret_cast<const uint64_t*>(input), w0,
t0, t1);
Generate2RInstructionFunc(assm);
- store_elements_of_vector(assm, w2, a0);
+ store_elements_of_vector(&assm, w2, a0);
__ jr(ra);
__ nop();
@@ -7926,13 +7927,13 @@ void run_msa_vector(struct TestCaseMsaVector* input,
CpuFeatureScope fscope(&assm, MIPS_SIMD);
msa_reg_t res;
- load_elements_of_vector(assm, &(input->ws_lo), w0, t0, t1);
- load_elements_of_vector(assm, &(input->wt_lo), w2, t0, t1);
- load_elements_of_vector(assm, &(input->wd_lo), w4, t0, t1);
+ load_elements_of_vector(&assm, &(input->ws_lo), w0, t0, t1);
+ load_elements_of_vector(&assm, &(input->wt_lo), w2, t0, t1);
+ load_elements_of_vector(&assm, &(input->wd_lo), w4, t0, t1);
GenerateVectorInstructionFunc(assm);
- store_elements_of_vector(assm, w4, a0);
+ store_elements_of_vector(&assm, w4, a0);
__ jr(ra);
__ nop();
@@ -8014,12 +8015,12 @@ void run_msa_bit(struct TestCaseMsaBit* input, InstFunc GenerateInstructionFunc,
CpuFeatureScope fscope(&assm, MIPS_SIMD);
msa_reg_t res;
- load_elements_of_vector(assm, &(input->ws_lo), w0, t0, t1);
- load_elements_of_vector(assm, &(input->wd_lo), w2, t0, t1);
+ load_elements_of_vector(&assm, &(input->ws_lo), w0, t0, t1);
+ load_elements_of_vector(&assm, &(input->wd_lo), w2, t0, t1);
GenerateInstructionFunc(assm, input->m);
- store_elements_of_vector(assm, w2, a0);
+ store_elements_of_vector(&assm, w2, a0);
__ jr(ra);
__ nop();
@@ -8491,7 +8492,7 @@ void run_msa_i10(int32_t input, InstFunc GenerateVectorInstructionFunc,
GenerateVectorInstructionFunc(assm, input);
- store_elements_of_vector(assm, w0, a0);
+ store_elements_of_vector(&assm, w0, a0);
__ jr(ra);
__ nop();
@@ -8640,13 +8641,13 @@ void run_msa_3r(struct TestCaseMsa3R* input, InstFunc GenerateI5InstructionFunc,
CpuFeatureScope fscope(&assm, MIPS_SIMD);
msa_reg_t res;
- load_elements_of_vector(assm, &(input->wt_lo), w0, t0, t1);
- load_elements_of_vector(assm, &(input->ws_lo), w1, t0, t1);
- load_elements_of_vector(assm, &(input->wd_lo), w2, t0, t1);
+ load_elements_of_vector(&assm, &(input->wt_lo), w0, t0, t1);
+ load_elements_of_vector(&assm, &(input->ws_lo), w1, t0, t1);
+ load_elements_of_vector(&assm, &(input->wd_lo), w2, t0, t1);
GenerateI5InstructionFunc(assm);
- store_elements_of_vector(assm, w2, a0);
+ store_elements_of_vector(&assm, w2, a0);
__ jr(ra);
__ nop();
@@ -9645,13 +9646,13 @@ void run_msa_3rf(const struct TestCaseMsa3RF* input,
msa_reg_t res;
load_elements_of_vector(
- assm, reinterpret_cast<const uint64_t*>(&input->ws_lo), w0, t0, t1);
+ &assm, reinterpret_cast<const uint64_t*>(&input->ws_lo), w0, t0, t1);
load_elements_of_vector(
- assm, reinterpret_cast<const uint64_t*>(&input->wt_lo), w1, t0, t1);
+ &assm, reinterpret_cast<const uint64_t*>(&input->wt_lo), w1, t0, t1);
load_elements_of_vector(
- assm, reinterpret_cast<const uint64_t*>(&input->wd_lo), w2, t0, t1);
+ &assm, reinterpret_cast<const uint64_t*>(&input->wd_lo), w2, t0, t1);
Generate2RInstructionFunc(assm);
- store_elements_of_vector(assm, w2, a0);
+ store_elements_of_vector(&assm, w2, a0);
__ jr(ra);
__ nop();
diff --git a/deps/v8/test/cctest/test-assembler-mips64.cc b/deps/v8/test/cctest/test-assembler-mips64.cc
index f337fdfcac..35e81cb46f 100644
--- a/deps/v8/test/cctest/test-assembler-mips64.cc
+++ b/deps/v8/test/cctest/test-assembler-mips64.cc
@@ -3330,7 +3330,7 @@ TEST(jump_tables3) {
Handle<Object> values[kNumCases];
for (int i = 0; i < kNumCases; ++i) {
double value = isolate->random_number_generator()->NextDouble();
- values[i] = isolate->factory()->NewHeapNumber(value, AllocationType::kOld);
+ values[i] = isolate->factory()->NewHeapNumber<AllocationType::kOld>(value);
}
Label labels[kNumCases];
Object obj;
@@ -5430,9 +5430,10 @@ TEST(r6_beqzc) {
}
}
-void load_elements_of_vector(
- MacroAssembler& assm, // NOLINT(runtime/references)
- const uint64_t elements[], MSARegister w, Register t0, Register t1) {
+void load_elements_of_vector(MacroAssembler* assm_ptr,
+ const uint64_t elements[], MSARegister w,
+ Register t0, Register t1) {
+ MacroAssembler& assm = *assm_ptr;
__ li(t0, static_cast<uint32_t>(elements[0] & 0xFFFFFFFF));
__ li(t1, static_cast<uint32_t>((elements[0] >> 32) & 0xFFFFFFFF));
__ insert_w(w, 0, t0);
@@ -5443,9 +5444,9 @@ void load_elements_of_vector(
__ insert_w(w, 3, t1);
}
-inline void store_elements_of_vector(
- MacroAssembler& assm, // NOLINT(runtime/references)
- MSARegister w, Register a) {
+inline void store_elements_of_vector(MacroAssembler* assm_ptr, MSARegister w,
+ Register a) {
+ MacroAssembler& assm = *assm_ptr;
__ st_d(w, MemOperand(a, 0));
}
@@ -5481,15 +5482,15 @@ void run_bz_bnz(TestCaseMsaBranch* input, Branch GenerateBranch,
msa_reg_t res;
Label do_not_move_w0_to_w2;
- load_elements_of_vector(assm, &t.ws_lo, w0, t0, t1);
- load_elements_of_vector(assm, &t.wd_lo, w2, t0, t1);
- load_elements_of_vector(assm, &input->wt_lo, w1, t0, t1);
+ load_elements_of_vector(&assm, &t.ws_lo, w0, t0, t1);
+ load_elements_of_vector(&assm, &t.wd_lo, w2, t0, t1);
+ load_elements_of_vector(&assm, &input->wt_lo, w1, t0, t1);
GenerateBranch(assm, do_not_move_w0_to_w2);
__ nop();
__ move_v(w2, w0);
__ bind(&do_not_move_w0_to_w2);
- store_elements_of_vector(assm, w2, a0);
+ store_elements_of_vector(&assm, w2, a0);
__ jr(ra);
__ nop();
@@ -6799,7 +6800,7 @@ void run_msa_insert(int64_t rs_value, int n, msa_reg_t* w) {
UNREACHABLE();
}
- store_elements_of_vector(assm, w0, a0);
+ store_elements_of_vector(&assm, w0, a0);
__ jr(ra);
__ nop();
@@ -6953,10 +6954,10 @@ TEST(MSA_move_v) {
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
- load_elements_of_vector(assm, &t[i].ws_lo, w0, t0, t1);
- load_elements_of_vector(assm, &t[i].wd_lo, w2, t0, t1);
+ load_elements_of_vector(&assm, &t[i].ws_lo, w0, t0, t1);
+ load_elements_of_vector(&assm, &t[i].wd_lo, w2, t0, t1);
__ move_v(w2, w0);
- store_elements_of_vector(assm, w2, a0);
+ store_elements_of_vector(&assm, w2, a0);
__ jr(ra);
__ nop();
@@ -6997,10 +6998,10 @@ void run_msa_sldi(OperFunc GenerateOperation,
for (unsigned i = 0; i < arraysize(t); ++i) {
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
- load_elements_of_vector(assm, &t[i].ws_lo, w0, t0, t1);
- load_elements_of_vector(assm, &t[i].wd_lo, w2, t0, t1);
+ load_elements_of_vector(&assm, &t[i].ws_lo, w0, t0, t1);
+ load_elements_of_vector(&assm, &t[i].wd_lo, w2, t0, t1);
GenerateOperation(assm);
- store_elements_of_vector(assm, w2, a0);
+ store_elements_of_vector(&assm, w2, a0);
__ jr(ra);
__ nop();
@@ -7157,7 +7158,7 @@ void run_msa_i8(SecondaryField opcode, uint64_t ws_lo, uint64_t ws_hi,
UNREACHABLE();
}
- store_elements_of_vector(assm, w2, a0);
+ store_elements_of_vector(&assm, w2, a0);
__ jr(ra);
__ nop();
@@ -7358,11 +7359,11 @@ void run_msa_i5(struct TestCaseMsaI5* input, bool i5_sign_ext,
int32_t i5 =
i5_sign_ext ? static_cast<int32_t>(input->i5 << 27) >> 27 : input->i5;
- load_elements_of_vector(assm, &(input->ws_lo), w0, t0, t1);
+ load_elements_of_vector(&assm, &(input->ws_lo), w0, t0, t1);
GenerateI5InstructionFunc(assm, i5);
- store_elements_of_vector(assm, w2, a0);
+ store_elements_of_vector(&assm, w2, a0);
__ jr(ra);
__ nop();
@@ -7784,10 +7785,10 @@ void run_msa_2r(const struct TestCaseMsa2R* input,
CpuFeatureScope fscope(&assm, MIPS_SIMD);
msa_reg_t res;
- load_elements_of_vector(assm, reinterpret_cast<const uint64_t*>(input), w0,
+ load_elements_of_vector(&assm, reinterpret_cast<const uint64_t*>(input), w0,
t0, t1);
Generate2RInstructionFunc(assm);
- store_elements_of_vector(assm, w2, a0);
+ store_elements_of_vector(&assm, w2, a0);
__ jr(ra);
__ nop();
@@ -8830,13 +8831,13 @@ void run_msa_vector(struct TestCaseMsaVector* input,
CpuFeatureScope fscope(&assm, MIPS_SIMD);
msa_reg_t res;
- load_elements_of_vector(assm, &(input->ws_lo), w0, t0, t1);
- load_elements_of_vector(assm, &(input->wt_lo), w2, t0, t1);
- load_elements_of_vector(assm, &(input->wd_lo), w4, t0, t1);
+ load_elements_of_vector(&assm, &(input->ws_lo), w0, t0, t1);
+ load_elements_of_vector(&assm, &(input->wt_lo), w2, t0, t1);
+ load_elements_of_vector(&assm, &(input->wd_lo), w4, t0, t1);
GenerateVectorInstructionFunc(assm);
- store_elements_of_vector(assm, w4, a0);
+ store_elements_of_vector(&assm, w4, a0);
__ jr(ra);
__ nop();
@@ -8918,12 +8919,12 @@ void run_msa_bit(struct TestCaseMsaBit* input, InstFunc GenerateInstructionFunc,
CpuFeatureScope fscope(&assm, MIPS_SIMD);
msa_reg_t res;
- load_elements_of_vector(assm, &(input->ws_lo), w0, t0, t1);
- load_elements_of_vector(assm, &(input->wd_lo), w2, t0, t1);
+ load_elements_of_vector(&assm, &(input->ws_lo), w0, t0, t1);
+ load_elements_of_vector(&assm, &(input->wd_lo), w2, t0, t1);
GenerateInstructionFunc(assm, input->m);
- store_elements_of_vector(assm, w2, a0);
+ store_elements_of_vector(&assm, w2, a0);
__ jr(ra);
__ nop();
@@ -9395,7 +9396,7 @@ void run_msa_i10(int32_t input, InstFunc GenerateVectorInstructionFunc,
GenerateVectorInstructionFunc(assm, input);
- store_elements_of_vector(assm, w0, a0);
+ store_elements_of_vector(&assm, w0, a0);
__ jr(ra);
__ nop();
@@ -9544,13 +9545,13 @@ void run_msa_3r(struct TestCaseMsa3R* input, InstFunc GenerateI5InstructionFunc,
CpuFeatureScope fscope(&assm, MIPS_SIMD);
msa_reg_t res;
- load_elements_of_vector(assm, &(input->wt_lo), w0, t0, t1);
- load_elements_of_vector(assm, &(input->ws_lo), w1, t0, t1);
- load_elements_of_vector(assm, &(input->wd_lo), w2, t0, t1);
+ load_elements_of_vector(&assm, &(input->wt_lo), w0, t0, t1);
+ load_elements_of_vector(&assm, &(input->ws_lo), w1, t0, t1);
+ load_elements_of_vector(&assm, &(input->wd_lo), w2, t0, t1);
GenerateI5InstructionFunc(assm);
- store_elements_of_vector(assm, w2, a0);
+ store_elements_of_vector(&assm, w2, a0);
__ jr(ra);
__ nop();
@@ -10548,13 +10549,13 @@ void run_msa_3rf(const struct TestCaseMsa3RF* input,
msa_reg_t res;
load_elements_of_vector(
- assm, reinterpret_cast<const uint64_t*>(&input->ws_lo), w0, t0, t1);
+ &assm, reinterpret_cast<const uint64_t*>(&input->ws_lo), w0, t0, t1);
load_elements_of_vector(
- assm, reinterpret_cast<const uint64_t*>(&input->wt_lo), w1, t0, t1);
+ &assm, reinterpret_cast<const uint64_t*>(&input->wt_lo), w1, t0, t1);
load_elements_of_vector(
- assm, reinterpret_cast<const uint64_t*>(&input->wd_lo), w2, t0, t1);
+ &assm, reinterpret_cast<const uint64_t*>(&input->wd_lo), w2, t0, t1);
Generate2RInstructionFunc(assm);
- store_elements_of_vector(assm, w2, a0);
+ store_elements_of_vector(&assm, w2, a0);
__ jr(ra);
__ nop();
diff --git a/deps/v8/test/cctest/test-backing-store.cc b/deps/v8/test/cctest/test-backing-store.cc
new file mode 100644
index 0000000000..f8010d3031
--- /dev/null
+++ b/deps/v8/test/cctest/test-backing-store.cc
@@ -0,0 +1,85 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/api/api-inl.h"
+#include "src/objects/backing-store.h"
+#include "src/wasm/wasm-objects.h"
+
+#include "test/cctest/cctest.h"
+#include "test/cctest/manually-externalized-buffer.h"
+
+namespace v8 {
+namespace internal {
+
+using testing::ManuallyExternalizedBuffer;
+
+TEST(Run_WasmModule_Buffer_Externalized_Detach) {
+ {
+ // Regression test for
+ // https://bugs.chromium.org/p/chromium/issues/detail?id=731046
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ HandleScope scope(isolate);
+ MaybeHandle<JSArrayBuffer> result =
+ isolate->factory()->NewJSArrayBufferAndBackingStore(
+ wasm::kWasmPageSize, InitializedFlag::kZeroInitialized);
+ Handle<JSArrayBuffer> buffer = result.ToHandleChecked();
+
+ // Embedder requests contents.
+ ManuallyExternalizedBuffer external(buffer);
+
+ buffer->Detach();
+ CHECK(buffer->was_detached());
+
+ // Make sure we can write to the buffer without crashing
+ uint32_t* int_buffer =
+ reinterpret_cast<uint32_t*>(external.backing_store());
+ int_buffer[0] = 0;
+ // Embedder frees contents.
+ }
+ CcTest::CollectAllAvailableGarbage();
+}
+
+TEST(Run_WasmModule_Buffer_Externalized_Regression_UseAfterFree) {
+ {
+ // Regression test for https://crbug.com/813876
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ HandleScope scope(isolate);
+ MaybeHandle<WasmMemoryObject> result =
+ WasmMemoryObject::New(isolate, 1, 1, SharedFlag::kNotShared);
+ Handle<WasmMemoryObject> memory_object = result.ToHandleChecked();
+ Handle<JSArrayBuffer> buffer(memory_object->array_buffer(), isolate);
+
+ {
+ // Embedder requests contents.
+ ManuallyExternalizedBuffer external(buffer);
+
+ // Growing (even by 0) detaches the old buffer.
+ WasmMemoryObject::Grow(isolate, memory_object, 0);
+ CHECK(buffer->was_detached());
+
+ // Embedder frees contents.
+ }
+
+ // Make sure the memory object has a new buffer that can be written to.
+ uint32_t* int_buffer = reinterpret_cast<uint32_t*>(
+ memory_object->array_buffer().backing_store());
+ int_buffer[0] = 0;
+ }
+ CcTest::CollectAllAvailableGarbage();
+}
+
+#if V8_TARGET_ARCH_64_BIT
+TEST(BackingStore_Reclaim) {
+ // Make sure we can allocate memories without running out of address space.
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ for (int i = 0; i < 256; ++i) {
+ auto backing_store =
+ BackingStore::AllocateWasmMemory(isolate, 1, 1, SharedFlag::kNotShared);
+ CHECK(backing_store);
+ }
+}
+#endif
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-code-stub-assembler.cc b/deps/v8/test/cctest/test-code-stub-assembler.cc
index 3a4f11e126..45512eaf56 100644
--- a/deps/v8/test/cctest/test-code-stub-assembler.cc
+++ b/deps/v8/test/cctest/test-code-stub-assembler.cc
@@ -123,6 +123,62 @@ TEST(CallCFunctionWithCallerSavedRegisters) {
CHECK_EQ(3, Handle<Smi>::cast(result)->value());
}
+TEST(NumberToString) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ Factory* factory = isolate->factory();
+
+ const int kNumParams = 1;
+ CodeAssemblerTester asm_tester(isolate, kNumParams);
+ CodeStubAssembler m(asm_tester.state());
+
+ {
+ TNode<Number> input = m.CAST(m.Parameter(0));
+
+ Label bailout(&m);
+ m.Return(m.NumberToString(input, &bailout));
+
+ m.BIND(&bailout);
+ m.Return(m.UndefinedConstant());
+ }
+
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
+
+ // clang-format off
+ double inputs[] = {
+ 1, 2, 42, 153, -1, -100, 0, 51095154, -1241950,
+ std::nan("-1"), std::nan("1"), std::nan("2"),
+ -std::numeric_limits<double>::infinity(),
+ std::numeric_limits<double>::infinity(),
+ -0.0, -0.001, -0.5, -0.999, -1.0,
+ 0.0, 0.001, 0.5, 0.999, 1.0,
+ -2147483647.9, -2147483648.0, -2147483648.5, -2147483648.9, // SmiMin.
+ 2147483646.9, 2147483647.0, 2147483647.5, 2147483647.9, // SmiMax.
+ -4294967295.9, -4294967296.0, -4294967296.5, -4294967297.0, // - 2^32.
+ 4294967295.9, 4294967296.0, 4294967296.5, 4294967297.0, // 2^32.
+ };
+ // clang-format on
+
+ const int kFullCacheSize = isolate->heap()->MaxNumberToStringCacheSize();
+ const int test_count = arraysize(inputs);
+ for (int i = 0; i < test_count; i++) {
+ int cache_length_before_addition = factory->number_string_cache()->length();
+ Handle<Object> input = factory->NewNumber(inputs[i]);
+ Handle<String> expected = factory->NumberToString(input);
+
+ Handle<Object> result = ft.Call(input).ToHandleChecked();
+ if (result->IsUndefined(isolate)) {
+ // Query may fail if cache was resized, in which case the entry is not
+ // added to the cache.
+ CHECK_LT(cache_length_before_addition, kFullCacheSize);
+ CHECK_EQ(factory->number_string_cache()->length(), kFullCacheSize);
+ expected = factory->NumberToString(input);
+ result = ft.Call(input).ToHandleChecked();
+ }
+ CHECK(!result->IsUndefined(isolate));
+ CHECK_EQ(*expected, *result);
+ }
+}
+
namespace {
void CheckToUint32Result(uint32_t expected, Handle<Object> result) {
@@ -439,7 +495,7 @@ TEST(TryToName) {
Label if_keyisindex(&m), if_keyisunique(&m), if_bailout(&m);
{
TYPED_VARIABLE_DEF(IntPtrT, var_index, &m);
- TYPED_VARIABLE_DEF(Object, var_unique, &m);
+ TYPED_VARIABLE_DEF(Name, var_unique, &m);
m.TryToName(key, &if_keyisindex, &var_index, &if_keyisunique, &var_unique,
&if_bailout);
@@ -1568,8 +1624,8 @@ TEST(TryLookupElement) {
v8::ArrayBuffer::Contents contents = buffer->Externalize();
buffer->Detach();
- isolate->array_buffer_allocator()->Free(contents.Data(),
- contents.ByteLength());
+ contents.Deleter()(contents.Data(), contents.ByteLength(),
+ contents.DeleterData());
CHECK_ABSENT(object, 0);
CHECK_ABSENT(object, 1);
@@ -1809,7 +1865,7 @@ TEST(OneToTwoByteStringCopy) {
const int kNumParams = 2;
CodeAssemblerTester asm_tester(isolate, kNumParams);
- CodeStubAssembler m(asm_tester.state());
+ StringBuiltinsAssembler m(asm_tester.state());
m.CopyStringCharacters(m.Parameter(0), m.Parameter(1), m.IntPtrConstant(0),
m.IntPtrConstant(0), m.IntPtrConstant(5),
@@ -1841,7 +1897,7 @@ TEST(OneToOneByteStringCopy) {
const int kNumParams = 2;
CodeAssemblerTester asm_tester(isolate, kNumParams);
- CodeStubAssembler m(asm_tester.state());
+ StringBuiltinsAssembler m(asm_tester.state());
m.CopyStringCharacters(m.Parameter(0), m.Parameter(1), m.IntPtrConstant(0),
m.IntPtrConstant(0), m.IntPtrConstant(5),
@@ -1873,7 +1929,7 @@ TEST(OneToOneByteStringCopyNonZeroStart) {
const int kNumParams = 2;
CodeAssemblerTester asm_tester(isolate, kNumParams);
- CodeStubAssembler m(asm_tester.state());
+ StringBuiltinsAssembler m(asm_tester.state());
m.CopyStringCharacters(m.Parameter(0), m.Parameter(1), m.IntPtrConstant(0),
m.IntPtrConstant(3), m.IntPtrConstant(2),
@@ -1902,7 +1958,7 @@ TEST(TwoToTwoByteStringCopy) {
const int kNumParams = 2;
CodeAssemblerTester asm_tester(isolate, kNumParams);
- CodeStubAssembler m(asm_tester.state());
+ StringBuiltinsAssembler m(asm_tester.state());
m.CopyStringCharacters(m.Parameter(0), m.Parameter(1), m.IntPtrConstant(0),
m.IntPtrConstant(0), m.IntPtrConstant(5),
@@ -1941,12 +1997,9 @@ TEST(Arguments) {
CodeStubArguments arguments(&m, m.IntPtrConstant(3));
- CSA_ASSERT(
- &m, m.TaggedEqual(arguments.AtIndex(0), m.SmiConstant(Smi::FromInt(12))));
- CSA_ASSERT(
- &m, m.TaggedEqual(arguments.AtIndex(1), m.SmiConstant(Smi::FromInt(13))));
- CSA_ASSERT(
- &m, m.TaggedEqual(arguments.AtIndex(2), m.SmiConstant(Smi::FromInt(14))));
+ CSA_ASSERT(&m, m.TaggedEqual(arguments.AtIndex(0), m.SmiConstant(12)));
+ CSA_ASSERT(&m, m.TaggedEqual(arguments.AtIndex(1), m.SmiConstant(13)));
+ CSA_ASSERT(&m, m.TaggedEqual(arguments.AtIndex(2), m.SmiConstant(14)));
arguments.PopAndReturn(arguments.GetReceiver());
@@ -1966,21 +2019,14 @@ TEST(ArgumentsWithSmiConstantIndices) {
CodeAssemblerTester asm_tester(isolate, kNumParams);
CodeStubAssembler m(asm_tester.state());
- CodeStubArguments arguments(&m, m.SmiConstant(3), nullptr,
- CodeStubAssembler::SMI_PARAMETERS);
-
- CSA_ASSERT(&m,
- m.TaggedEqual(arguments.AtIndex(m.SmiConstant(0),
- CodeStubAssembler::SMI_PARAMETERS),
- m.SmiConstant(Smi::FromInt(12))));
- CSA_ASSERT(&m,
- m.TaggedEqual(arguments.AtIndex(m.SmiConstant(1),
- CodeStubAssembler::SMI_PARAMETERS),
- m.SmiConstant(Smi::FromInt(13))));
- CSA_ASSERT(&m,
- m.TaggedEqual(arguments.AtIndex(m.SmiConstant(2),
- CodeStubAssembler::SMI_PARAMETERS),
- m.SmiConstant(Smi::FromInt(14))));
+ CodeStubArguments arguments(&m, m.SmiConstant(3));
+
+ CSA_ASSERT(&m, m.TaggedEqual(arguments.AtIndex(m.SmiConstant(0)),
+ m.SmiConstant(12)));
+ CSA_ASSERT(&m, m.TaggedEqual(arguments.AtIndex(m.SmiConstant(1)),
+ m.SmiConstant(13)));
+ CSA_ASSERT(&m, m.TaggedEqual(arguments.AtIndex(m.SmiConstant(2)),
+ m.SmiConstant(14)));
arguments.PopAndReturn(arguments.GetReceiver());
@@ -2019,21 +2065,14 @@ TEST(ArgumentsWithSmiIndices) {
CodeAssemblerTester asm_tester(isolate, kNumParams);
CodeStubAssembler m(asm_tester.state());
- CodeStubArguments arguments(&m, m.SmiConstant(3), nullptr,
- CodeStubAssembler::SMI_PARAMETERS);
-
- CSA_ASSERT(&m,
- m.TaggedEqual(arguments.AtIndex(NonConstantSmi(&m, 0),
- CodeStubAssembler::SMI_PARAMETERS),
- m.SmiConstant(Smi::FromInt(12))));
- CSA_ASSERT(&m,
- m.TaggedEqual(arguments.AtIndex(NonConstantSmi(&m, 1),
- CodeStubAssembler::SMI_PARAMETERS),
- m.SmiConstant(Smi::FromInt(13))));
- CSA_ASSERT(&m,
- m.TaggedEqual(arguments.AtIndex(NonConstantSmi(&m, 2),
- CodeStubAssembler::SMI_PARAMETERS),
- m.SmiConstant(Smi::FromInt(14))));
+ CodeStubArguments arguments(&m, m.SmiConstant(3));
+
+ CSA_ASSERT(&m, m.TaggedEqual(arguments.AtIndex(NonConstantSmi(&m, 0)),
+ m.SmiConstant(12)));
+ CSA_ASSERT(&m, m.TaggedEqual(arguments.AtIndex(NonConstantSmi(&m, 1)),
+ m.SmiConstant(13)));
+ CSA_ASSERT(&m, m.TaggedEqual(arguments.AtIndex(NonConstantSmi(&m, 2)),
+ m.SmiConstant(14)));
arguments.PopAndReturn(arguments.GetReceiver());
@@ -2060,7 +2099,7 @@ TEST(ArgumentsForEach) {
sum = m.SmiConstant(0);
- arguments.ForEach(list, [&m, &sum](Node* arg) {
+ arguments.ForEach(list, [&](TNode<Object> arg) {
sum = m.SmiAdd(sum.value(), m.CAST(arg));
});
@@ -2130,8 +2169,8 @@ class AppendJSArrayCodeStubAssembler : public CodeStubAssembler {
TVariable<IntPtrT> arg_index(this);
Label bailout(this);
arg_index = IntPtrConstant(0);
- Node* length = BuildAppendJSArray(kind_, HeapConstant(array), &args,
- &arg_index, &bailout);
+ TNode<Smi> length = BuildAppendJSArray(kind_, HeapConstant(array), &args,
+ &arg_index, &bailout);
Return(length);
BIND(&bailout);
@@ -2281,7 +2320,7 @@ TEST(AllocateAndInitJSPromise) {
PromiseBuiltinsAssembler m(asm_tester.state());
Node* const context = m.Parameter(kNumParams + 2);
- Node* const promise = m.AllocateAndInitJSPromise(context);
+ TNode<JSPromise> const promise = m.AllocateAndInitJSPromise(m.CAST(context));
m.Return(promise);
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
@@ -2298,8 +2337,8 @@ TEST(AllocateAndSetJSPromise) {
PromiseBuiltinsAssembler m(asm_tester.state());
Node* const context = m.Parameter(kNumParams + 2);
- Node* const promise = m.AllocateAndSetJSPromise(
- context, v8::Promise::kRejected, m.SmiConstant(1));
+ TNode<JSPromise> const promise = m.AllocateAndSetJSPromise(
+ m.CAST(context), v8::Promise::kRejected, m.SmiConstant(1));
m.Return(promise);
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
@@ -2361,8 +2400,8 @@ TEST(PromiseHasHandler) {
PromiseBuiltinsAssembler m(asm_tester.state());
Node* const context = m.Parameter(kNumParams + 2);
- Node* const promise =
- m.AllocateAndInitJSPromise(context, m.UndefinedConstant());
+ TNode<JSPromise> const promise =
+ m.AllocateAndInitJSPromise(m.CAST(context), m.UndefinedConstant());
m.Return(m.SelectBooleanConstant(m.PromiseHasHandler(promise)));
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
@@ -2380,10 +2419,11 @@ TEST(CreatePromiseResolvingFunctionsContext) {
Node* const context = m.Parameter(kNumParams + 2);
TNode<NativeContext> const native_context = m.LoadNativeContext(context);
- Node* const promise =
- m.AllocateAndInitJSPromise(context, m.UndefinedConstant());
- Node* const promise_context = m.CreatePromiseResolvingFunctionsContext(
- promise, m.BooleanConstant(false), native_context);
+ const TNode<JSPromise> promise =
+ m.AllocateAndInitJSPromise(m.CAST(context), m.UndefinedConstant());
+ TNode<Context> const promise_context =
+ m.CreatePromiseResolvingFunctionsContext(
+ promise, m.BooleanConstant(false), native_context);
m.Return(promise_context);
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
@@ -2408,8 +2448,8 @@ TEST(CreatePromiseResolvingFunctions) {
Node* const context = m.Parameter(kNumParams + 2);
TNode<NativeContext> const native_context = m.LoadNativeContext(context);
- Node* const promise =
- m.AllocateAndInitJSPromise(context, m.UndefinedConstant());
+ const TNode<JSPromise> promise =
+ m.AllocateAndInitJSPromise(m.CAST(context), m.UndefinedConstant());
Node *resolve, *reject;
std::tie(resolve, reject) = m.CreatePromiseResolvingFunctions(
promise, m.BooleanConstant(false), native_context);
@@ -2498,17 +2538,17 @@ TEST(AllocateFunctionWithMapAndContext) {
Node* const context = m.Parameter(kNumParams + 2);
TNode<NativeContext> const native_context = m.LoadNativeContext(context);
- Node* const promise =
- m.AllocateAndInitJSPromise(context, m.UndefinedConstant());
- Node* promise_context = m.CreatePromiseResolvingFunctionsContext(
+ const TNode<JSPromise> promise =
+ m.AllocateAndInitJSPromise(m.CAST(context), m.UndefinedConstant());
+ TNode<Context> promise_context = m.CreatePromiseResolvingFunctionsContext(
promise, m.BooleanConstant(false), native_context);
TNode<Object> resolve_info = m.LoadContextElement(
native_context,
Context::PROMISE_CAPABILITY_DEFAULT_RESOLVE_SHARED_FUN_INDEX);
TNode<Object> const map = m.LoadContextElement(
native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
- Node* const resolve =
- m.AllocateFunctionWithMapAndContext(map, resolve_info, promise_context);
+ TNode<JSFunction> const resolve = m.AllocateFunctionWithMapAndContext(
+ m.CAST(map), m.CAST(resolve_info), promise_context);
m.Return(resolve);
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
@@ -3122,7 +3162,7 @@ TEST(CloneEmptyFixedArray) {
CodeAssemblerTester asm_tester(isolate, kNumParams);
{
CodeStubAssembler m(asm_tester.state());
- m.Return(m.CloneFixedArray(m.Parameter(0)));
+ m.Return(m.CloneFixedArray(m.CAST(m.Parameter(0))));
}
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
@@ -3139,7 +3179,7 @@ TEST(CloneFixedArray) {
CodeAssemblerTester asm_tester(isolate, kNumParams);
{
CodeStubAssembler m(asm_tester.state());
- m.Return(m.CloneFixedArray(m.Parameter(0)));
+ m.Return(m.CloneFixedArray(m.CAST(m.Parameter(0))));
}
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
@@ -3161,7 +3201,7 @@ TEST(CloneFixedArrayCOW) {
CodeAssemblerTester asm_tester(isolate, kNumParams);
{
CodeStubAssembler m(asm_tester.state());
- m.Return(m.CloneFixedArray(m.Parameter(0)));
+ m.Return(m.CloneFixedArray(m.CAST(m.Parameter(0))));
}
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
@@ -3542,37 +3582,6 @@ TEST(TestCallBuiltinIndirectLoad) {
Handle<String>::cast(result.ToHandleChecked())));
}
-TEST(TestGotoIfDebugExecutionModeChecksSideEffects) {
- Isolate* isolate(CcTest::InitIsolateOnce());
- CodeAssemblerTester asm_tester(isolate, 0);
- {
- CodeStubAssembler m(asm_tester.state());
- Label is_true(&m), is_false(&m);
- m.GotoIfDebugExecutionModeChecksSideEffects(&is_true);
- m.Goto(&is_false);
- m.BIND(&is_false);
- m.Return(m.BooleanConstant(false));
-
- m.BIND(&is_true);
- m.Return(m.BooleanConstant(true));
- }
-
- FunctionTester ft(asm_tester.GenerateCode(), 0);
-
- CHECK(isolate->debug_execution_mode() != DebugInfo::kSideEffects);
-
- Handle<Object> result = ft.Call().ToHandleChecked();
- CHECK(result->IsBoolean());
- CHECK_EQ(false, result->BooleanValue(isolate));
-
- isolate->debug()->StartSideEffectCheckMode();
- CHECK(isolate->debug_execution_mode() == DebugInfo::kSideEffects);
-
- result = ft.Call().ToHandleChecked();
- CHECK(result->IsBoolean());
- CHECK_EQ(true, result->BooleanValue(isolate));
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-compiler.cc b/deps/v8/test/cctest/test-compiler.cc
index b961da9437..bd2766518b 100644
--- a/deps/v8/test/cctest/test-compiler.cc
+++ b/deps/v8/test/cctest/test-compiler.cc
@@ -907,7 +907,7 @@ TEST(DeepEagerCompilationPeakMemory) {
" }"
"}");
v8::ScriptCompiler::Source script_source(source);
- CcTest::i_isolate()->compilation_cache()->Disable();
+ CcTest::i_isolate()->compilation_cache()->DisableScriptAndEval();
v8::HeapStatistics heap_statistics;
CcTest::isolate()->GetHeapStatistics(&heap_statistics);
diff --git a/deps/v8/test/cctest/test-cpu-profiler.cc b/deps/v8/test/cctest/test-cpu-profiler.cc
index 6d0ee0e512..c0d43b21a1 100644
--- a/deps/v8/test/cctest/test-cpu-profiler.cc
+++ b/deps/v8/test/cctest/test-cpu-profiler.cc
@@ -54,8 +54,8 @@
#include "src/tracing/trace-event.h"
#ifdef V8_USE_PERFETTO
-#include "perfetto/trace/chrome/chrome_trace_event.pb.h"
-#include "perfetto/trace/trace.pb.h"
+#include "protos/perfetto/trace/chrome/chrome_trace_event.pb.h"
+#include "protos/perfetto/trace/trace.pb.h"
#endif
namespace v8 {
diff --git a/deps/v8/test/cctest/test-debug-helper.cc b/deps/v8/test/cctest/test-debug-helper.cc
index 67236e5a31..560db1b0d2 100644
--- a/deps/v8/test/cctest/test-debug-helper.cc
+++ b/deps/v8/test/cctest/test-debug-helper.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/api/api-inl.h"
+#include "src/flags/flags.h"
#include "src/heap/spaces.h"
#include "test/cctest/cctest.h"
#include "tools/debug_helper/debug-helper.h"
@@ -61,6 +62,10 @@ void CheckProp(const d::ObjectProperty& property, const char* expected_type,
CHECK(*reinterpret_cast<TValue*>(property.address) == expected_value);
}
+bool StartsWith(std::string full_string, std::string prefix) {
+ return full_string.substr(0, prefix.size()) == prefix;
+}
+
} // namespace
TEST(GetObjectProperties) {
@@ -68,12 +73,13 @@ TEST(GetObjectProperties) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
LocalContext context;
- d::Roots roots{0, 0, 0, 0}; // We don't know the heap roots.
+ // Claim we don't know anything about the heap layout.
+ d::HeapAddresses heap_addresses{0, 0, 0, 0};
v8::Local<v8::Value> v = CompileRun("42");
Handle<Object> o = v8::Utils::OpenHandle(*v);
d::ObjectPropertiesResultPtr props =
- d::GetObjectProperties(o->ptr(), &ReadMemory, roots);
+ d::GetObjectProperties(o->ptr(), &ReadMemory, heap_addresses);
CHECK(props->type_check_result == d::TypeCheckResult::kSmi);
CHECK(props->brief == std::string("42 (0x2a)"));
CHECK(props->type == std::string("v8::internal::Smi"));
@@ -81,7 +87,7 @@ TEST(GetObjectProperties) {
v = CompileRun("[\"a\", \"bc\"]");
o = v8::Utils::OpenHandle(*v);
- props = d::GetObjectProperties(o->ptr(), &ReadMemory, roots);
+ props = d::GetObjectProperties(o->ptr(), &ReadMemory, heap_addresses);
CHECK(props->type_check_result == d::TypeCheckResult::kUsedMap);
CHECK(props->type == std::string("v8::internal::JSArray"));
CHECK_EQ(props->num_properties, 4);
@@ -92,9 +98,9 @@ TEST(GetObjectProperties) {
CheckProp(*props->properties[3], "v8::internal::Object", "length",
static_cast<i::Tagged_t>(IntToSmi(2)));
- // We need to supply a root address for decompression before reading the
+ // We need to supply some valid address for decompression before reading the
// elements from the JSArray.
- roots.any_heap_pointer = o->ptr();
+ heap_addresses.any_heap_pointer = o->ptr();
i::Tagged_t properties_or_hash =
*reinterpret_cast<i::Tagged_t*>(props->properties[1]->address);
@@ -106,32 +112,39 @@ TEST(GetObjectProperties) {
// any ability to read memory.
{
MemoryFailureRegion failure(0, UINTPTR_MAX);
- props = d::GetObjectProperties(properties_or_hash, &ReadMemory, roots);
+ props =
+ d::GetObjectProperties(properties_or_hash, &ReadMemory, heap_addresses);
CHECK(props->type_check_result ==
d::TypeCheckResult::kObjectPointerValidButInaccessible);
CHECK(props->type == std::string("v8::internal::HeapObject"));
CHECK_EQ(props->num_properties, 1);
CheckProp(*props->properties[0], "v8::internal::Map", "map");
- CHECK(std::string(props->brief).substr(0, 21) ==
- std::string("maybe EmptyFixedArray"));
+ // "maybe" prefix indicates that GetObjectProperties recognized the offset
+ // within the page as matching a known object, but didn't know whether the
+ // object is on the right page. This response can only happen in builds
+ // without pointer compression, because otherwise heap addresses would be at
+ // deterministic locations within the heap reservation.
+ CHECK(COMPRESS_POINTERS_BOOL
+ ? StartsWith(props->brief, "EmptyFixedArray")
+ : StartsWith(props->brief, "maybe EmptyFixedArray"));
- // Provide a heap root so the API can be more sure.
- roots.read_only_space =
+ // Provide a heap first page so the API can be more sure.
+ heap_addresses.read_only_space_first_page =
reinterpret_cast<uintptr_t>(reinterpret_cast<i::Isolate*>(isolate)
->heap()
->read_only_space()
->first_page());
- props = d::GetObjectProperties(properties_or_hash, &ReadMemory, roots);
+ props =
+ d::GetObjectProperties(properties_or_hash, &ReadMemory, heap_addresses);
CHECK(props->type_check_result ==
d::TypeCheckResult::kObjectPointerValidButInaccessible);
CHECK(props->type == std::string("v8::internal::HeapObject"));
CHECK_EQ(props->num_properties, 1);
CheckProp(*props->properties[0], "v8::internal::Map", "map");
- CHECK(std::string(props->brief).substr(0, 15) ==
- std::string("EmptyFixedArray"));
+ CHECK(StartsWith(props->brief, "EmptyFixedArray"));
}
- props = d::GetObjectProperties(elements, &ReadMemory, roots);
+ props = d::GetObjectProperties(elements, &ReadMemory, heap_addresses);
CHECK(props->type_check_result == d::TypeCheckResult::kUsedMap);
CHECK(props->type == std::string("v8::internal::FixedArray"));
CHECK_EQ(props->num_properties, 3);
@@ -142,9 +155,10 @@ TEST(GetObjectProperties) {
d::PropertyKind::kArrayOfKnownSize, 2);
// Get the second string value from the FixedArray.
- i::Tagged_t second_string_address = *reinterpret_cast<i::Tagged_t*>(
- props->properties[2]->address + sizeof(i::Tagged_t));
- props = d::GetObjectProperties(second_string_address, &ReadMemory, roots);
+ i::Tagged_t second_string_address =
+ reinterpret_cast<i::Tagged_t*>(props->properties[2]->address)[1];
+ props = d::GetObjectProperties(second_string_address, &ReadMemory,
+ heap_addresses);
CHECK(props->type_check_result == d::TypeCheckResult::kUsedMap);
CHECK(props->type == std::string("v8::internal::SeqOneByteString"));
CHECK_EQ(props->num_properties, 4);
@@ -162,18 +176,38 @@ TEST(GetObjectProperties) {
// its properties should match what we read last time.
d::ObjectPropertiesResultPtr props2;
{
+ heap_addresses.read_only_space_first_page = 0;
uintptr_t map_address =
d::GetObjectProperties(
*reinterpret_cast<i::Tagged_t*>(props->properties[0]->address),
- &ReadMemory, roots)
+ &ReadMemory, heap_addresses)
->properties[0]
->address;
MemoryFailureRegion failure(map_address, map_address + i::Map::kSize);
- props2 = d::GetObjectProperties(second_string_address, &ReadMemory, roots,
- "v8::internal::String");
- CHECK(props2->type_check_result == d::TypeCheckResult::kUsedTypeHint);
- CHECK(props2->type == std::string("v8::internal::String"));
- CHECK_EQ(props2->num_properties, 3);
+ props2 = d::GetObjectProperties(second_string_address, &ReadMemory,
+ heap_addresses, "v8::internal::String");
+ if (COMPRESS_POINTERS_BOOL) {
+ // The first page of each heap space can be automatically detected when
+ // pointer compression is active, so we expect to use known maps instead
+ // of the type hint.
+ CHECK_EQ(props2->type_check_result, d::TypeCheckResult::kKnownMapPointer);
+ CHECK(props2->type == std::string("v8::internal::SeqOneByteString"));
+ CHECK_EQ(props2->num_properties, 4);
+ CheckProp(*props2->properties[3], "char", "chars",
+ d::PropertyKind::kArrayOfKnownSize, 2);
+ CHECK_EQ(props2->num_guessed_types, 0);
+ } else {
+ CHECK_EQ(props2->type_check_result, d::TypeCheckResult::kUsedTypeHint);
+ CHECK(props2->type == std::string("v8::internal::String"));
+ CHECK_EQ(props2->num_properties, 3);
+
+ // The type hint we provided was the abstract class String, but
+ // GetObjectProperties should have recognized that the Map pointer looked
+ // like the right value for a SeqOneByteString.
+ CHECK_EQ(props2->num_guessed_types, 1);
+ CHECK(std::string(props2->guessed_types[0]) ==
+ std::string("v8::internal::SeqOneByteString"));
+ }
CheckProp(*props2->properties[0], "v8::internal::Map", "map",
*reinterpret_cast<i::Tagged_t*>(props->properties[0]->address));
CheckProp(*props2->properties[1], "uint32_t", "hash_field",
@@ -183,7 +217,7 @@ TEST(GetObjectProperties) {
// Try a weak reference.
props2 = d::GetObjectProperties(second_string_address | kWeakHeapObjectMask,
- &ReadMemory, roots);
+ &ReadMemory, heap_addresses);
std::string weak_ref_prefix = "weak ref to ";
CHECK(weak_ref_prefix + props->brief == props2->brief);
CHECK(props2->type_check_result == d::TypeCheckResult::kUsedMap);
@@ -201,9 +235,8 @@ TEST(GetObjectProperties) {
const alphabet = "abcdefghijklmnopqrstuvwxyz";
alphabet.substr(3,20) + alphabet.toUpperCase().substr(5,15) + "7")");
o = v8::Utils::OpenHandle(*v);
- props = d::GetObjectProperties(o->ptr(), &ReadMemory, roots);
- CHECK(std::string(props->brief).substr(0, 38) ==
- std::string("\"defghijklmnopqrstuvwFGHIJKLMNOPQRST7\""));
+ props = d::GetObjectProperties(o->ptr(), &ReadMemory, heap_addresses);
+ CHECK(StartsWith(props->brief, "\"defghijklmnopqrstuvwFGHIJKLMNOPQRST7\""));
// Cause a failure when reading the "second" pointer within the top-level
// ConsString.
@@ -211,15 +244,15 @@ TEST(GetObjectProperties) {
CheckProp(*props->properties[4], "v8::internal::String", "second");
uintptr_t second_address = props->properties[4]->address;
MemoryFailureRegion failure(second_address, second_address + 4);
- props = d::GetObjectProperties(o->ptr(), &ReadMemory, roots);
- CHECK(std::string(props->brief).substr(0, 40) ==
- std::string("\"defghijklmnopqrstuvwFGHIJKLMNOPQRST...\""));
+ props = d::GetObjectProperties(o->ptr(), &ReadMemory, heap_addresses);
+ CHECK(
+ StartsWith(props->brief, "\"defghijklmnopqrstuvwFGHIJKLMNOPQRST...\""));
}
// Build a very long string.
v = CompileRun("'a'.repeat(1000)");
o = v8::Utils::OpenHandle(*v);
- props = d::GetObjectProperties(o->ptr(), &ReadMemory, roots);
+ props = d::GetObjectProperties(o->ptr(), &ReadMemory, heap_addresses);
CHECK(std::string(props->brief).substr(79, 7) == std::string("aa...\" "));
}
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index 4ad55ef6b5..c76f922d86 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -893,7 +893,6 @@ TEST(BreakPointInlineBoundBuiltin) {
TEST(BreakPointInlinedConstructorBuiltin) {
i::FLAG_allow_natives_syntax = true;
- i::FLAG_experimental_inline_promise_constructor = true;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -1032,8 +1031,6 @@ TEST(BreakPointBuiltinNewContext) {
i::Handle<i::BreakPoint> bp;
// === Test builtin from a new context ===
-// This does not work with no-snapshot build.
-#ifdef V8_USE_SNAPSHOT
break_point_hit_count = 0;
builtin = CompileRun("String.prototype.repeat").As<v8::Function>();
CompileRun("'a'.repeat(10)");
@@ -1059,7 +1056,6 @@ TEST(BreakPointBuiltinNewContext) {
CompileRun("'b'.repeat(10)");
CHECK_EQ(2, break_point_hit_count);
}
-#endif
v8::debug::SetDebugDelegate(env->GetIsolate(), nullptr);
CheckDebuggerUnloaded();
@@ -3135,8 +3131,8 @@ TEST(NoBreakWhenBootstrapping) {
{
// Create a context with an extension to make sure that some JavaScript
// code is executed during bootstrapping.
- v8::RegisterExtension(v8::base::make_unique<v8::Extension>(
- "simpletest", kSimpleExtensionSource));
+ v8::RegisterExtension(
+ std::make_unique<v8::Extension>("simpletest", kSimpleExtensionSource));
const char* extension_names[] = { "simpletest" };
v8::ExtensionConfiguration extensions(1, extension_names);
v8::HandleScope handle_scope(isolate);
@@ -4510,7 +4506,7 @@ UNINITIALIZED_TEST(LoadedAtStartupScripts) {
}
}
CHECK_EQ(count_by_type[i::Script::TYPE_NATIVE], 0);
- CHECK_EQ(count_by_type[i::Script::TYPE_EXTENSION], 2);
+ CHECK_EQ(count_by_type[i::Script::TYPE_EXTENSION], 1);
CHECK_EQ(count_by_type[i::Script::TYPE_NORMAL], 1);
CHECK_EQ(count_by_type[i::Script::TYPE_WASM], 0);
CHECK_EQ(count_by_type[i::Script::TYPE_INSPECTOR], 0);
diff --git a/deps/v8/test/cctest/test-disasm-arm.cc b/deps/v8/test/cctest/test-disasm-arm.cc
index 76e06df47e..16dee03f50 100644
--- a/deps/v8/test/cctest/test-disasm-arm.cc
+++ b/deps/v8/test/cctest/test-disasm-arm.cc
@@ -1166,6 +1166,12 @@ TEST(Neon) {
"f2dae550 vshl.i16 q15, q0, #10");
COMPARE(vshl(NeonS32, q15, q0, 17),
"f2f1e550 vshl.i32 q15, q0, #17");
+ COMPARE(vshl(NeonS8, q15, q0, q1),
+ "f242e440 vshl.s8 q15, q0, q1");
+ COMPARE(vshl(NeonU16, q15, q2, q3),
+ "f356e444 vshl.u16 q15, q2, q3");
+ COMPARE(vshl(NeonS32, q15, q4, q5),
+ "f26ae448 vshl.s32 q15, q4, q5");
COMPARE(vshr(NeonS8, q15, q0, 6),
"f2cae050 vshr.s8 q15, q0, #6");
COMPARE(vshr(NeonU16, q15, q0, 10),
diff --git a/deps/v8/test/cctest/test-disasm-arm64.cc b/deps/v8/test/cctest/test-disasm-arm64.cc
index ba4d92d3a2..2b46d7ed11 100644
--- a/deps/v8/test/cctest/test-disasm-arm64.cc
+++ b/deps/v8/test/cctest/test-disasm-arm64.cc
@@ -1888,6 +1888,8 @@ TEST(system_pauth) {
COMPARE(paciasp(), "paciasp");
COMPARE(autia1716(), "autia1716");
COMPARE(autiasp(), "autiasp");
+
+ CLEANUP();
}
TEST_(debug) {
diff --git a/deps/v8/test/cctest/test-disasm-ia32.cc b/deps/v8/test/cctest/test-disasm-ia32.cc
index 4078bd429c..563d3a87cf 100644
--- a/deps/v8/test/cctest/test-disasm-ia32.cc
+++ b/deps/v8/test/cctest/test-disasm-ia32.cc
@@ -435,6 +435,8 @@ TEST(DisasmIa320) {
__ maxps(xmm1, Operand(ebx, ecx, times_4, 10000));
__ rcpps(xmm1, xmm0);
__ rcpps(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ sqrtps(xmm1, xmm0);
+ __ sqrtps(xmm1, Operand(ebx, ecx, times_4, 10000));
__ rsqrtps(xmm1, xmm0);
__ rsqrtps(xmm1, Operand(ebx, ecx, times_4, 10000));
@@ -444,6 +446,8 @@ TEST(DisasmIa320) {
__ cmpltps(xmm5, Operand(ebx, ecx, times_4, 10000));
__ cmpleps(xmm5, xmm1);
__ cmpleps(xmm5, Operand(ebx, ecx, times_4, 10000));
+ __ cmpunordps(xmm5, xmm1);
+ __ cmpunordps(xmm5, Operand(ebx, ecx, times_4, 10000));
__ cmpneqps(xmm5, xmm1);
__ cmpneqps(xmm5, Operand(ebx, ecx, times_4, 10000));
@@ -467,6 +471,9 @@ TEST(DisasmIa320) {
__ movdqu(xmm0, Operand(ebx, ecx, times_4, 10000));
__ movdqu(Operand(ebx, ecx, times_4, 10000), xmm0);
+ __ movapd(xmm0, xmm1);
+ __ movapd(xmm0, Operand(edx, 4));
+
__ movd(xmm0, edi);
__ movd(xmm0, Operand(ebx, ecx, times_4, 10000));
__ movd(eax, xmm1);
@@ -490,6 +497,36 @@ TEST(DisasmIa320) {
__ cmpltsd(xmm0, xmm1);
__ andpd(xmm0, xmm1);
+ __ andpd(xmm0, Operand(ebx, ecx, times_4, 10000));
+ __ andnpd(xmm0, xmm1);
+ __ andnpd(xmm0, Operand(ebx, ecx, times_4, 10000));
+ __ orpd(xmm0, xmm1);
+ __ orpd(xmm0, Operand(ebx, ecx, times_4, 10000));
+ __ xorpd(xmm0, xmm1);
+ __ xorpd(xmm0, Operand(ebx, ecx, times_4, 10000));
+ __ addpd(xmm1, xmm0);
+ __ addpd(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ subpd(xmm1, xmm0);
+ __ subpd(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ mulpd(xmm1, xmm0);
+ __ mulpd(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ divpd(xmm1, xmm0);
+ __ divpd(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ minpd(xmm1, xmm0);
+ __ minpd(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ maxpd(xmm1, xmm0);
+ __ maxpd(xmm1, Operand(ebx, ecx, times_4, 10000));
+
+ __ cmpeqpd(xmm5, xmm1);
+ __ cmpeqpd(xmm5, Operand(ebx, ecx, times_4, 10000));
+ __ cmpltpd(xmm5, xmm1);
+ __ cmpltpd(xmm5, Operand(ebx, ecx, times_4, 10000));
+ __ cmplepd(xmm5, xmm1);
+ __ cmplepd(xmm5, Operand(ebx, ecx, times_4, 10000));
+ __ cmpunordpd(xmm5, xmm1);
+ __ cmpunordpd(xmm5, Operand(ebx, ecx, times_4, 10000));
+ __ cmpneqpd(xmm5, xmm1);
+ __ cmpneqpd(xmm5, Operand(ebx, ecx, times_4, 10000));
__ psllw(xmm0, 17);
__ pslld(xmm0, 17);
@@ -623,6 +660,8 @@ TEST(DisasmIa320) {
__ vandps(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
__ vandnps(xmm0, xmm1, xmm2);
__ vandnps(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ vorps(xmm0, xmm1, xmm2);
+ __ vorps(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
__ vxorps(xmm0, xmm1, xmm2);
__ vxorps(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
__ vaddps(xmm0, xmm1, xmm2);
@@ -639,9 +678,13 @@ TEST(DisasmIa320) {
__ vmaxps(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
__ vrcpps(xmm1, xmm0);
__ vrcpps(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ vsqrtps(xmm1, xmm0);
+ __ vsqrtps(xmm1, Operand(ebx, ecx, times_4, 10000));
__ vrsqrtps(xmm1, xmm0);
__ vrsqrtps(xmm1, Operand(ebx, ecx, times_4, 10000));
__ vmovaps(xmm0, xmm1);
+ __ vmovapd(xmm0, xmm1);
+ __ vmovapd(xmm0, Operand(ebx, ecx, times_4, 10000));
__ vshufps(xmm0, xmm1, xmm2, 3);
__ vshufps(xmm0, xmm1, Operand(edx, 4), 3);
__ vhaddps(xmm0, xmm1, xmm2);
@@ -653,11 +696,17 @@ TEST(DisasmIa320) {
__ vcmpltps(xmm5, xmm4, Operand(ebx, ecx, times_4, 10000));
__ vcmpleps(xmm5, xmm4, xmm1);
__ vcmpleps(xmm5, xmm4, Operand(ebx, ecx, times_4, 10000));
+ __ vcmpunordps(xmm5, xmm4, xmm1);
+ __ vcmpunordps(xmm5, xmm4, Operand(ebx, ecx, times_4, 10000));
__ vcmpneqps(xmm5, xmm4, xmm1);
__ vcmpneqps(xmm5, xmm4, Operand(ebx, ecx, times_4, 10000));
__ vandpd(xmm0, xmm1, xmm2);
__ vandpd(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ vandnpd(xmm0, xmm1, xmm2);
+ __ vandnpd(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ vorpd(xmm0, xmm1, xmm2);
+ __ vorpd(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
__ vxorpd(xmm0, xmm1, xmm2);
__ vxorpd(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
__ vaddpd(xmm0, xmm1, xmm2);
@@ -673,10 +722,22 @@ TEST(DisasmIa320) {
__ vmaxpd(xmm0, xmm1, xmm2);
__ vmaxpd(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ vcmpeqpd(xmm5, xmm4, xmm1);
+ __ vcmpeqpd(xmm5, xmm4, Operand(ebx, ecx, times_4, 10000));
+ __ vcmpltpd(xmm5, xmm4, xmm1);
+ __ vcmpltpd(xmm5, xmm4, Operand(ebx, ecx, times_4, 10000));
+ __ vcmplepd(xmm5, xmm4, xmm1);
+ __ vcmplepd(xmm5, xmm4, Operand(ebx, ecx, times_4, 10000));
+ __ vcmpunordpd(xmm5, xmm4, xmm1);
+ __ vcmpunordpd(xmm5, xmm4, Operand(ebx, ecx, times_4, 10000));
+ __ vcmpneqpd(xmm5, xmm4, xmm1);
+ __ vcmpneqpd(xmm5, xmm4, Operand(ebx, ecx, times_4, 10000));
+
__ vpsllw(xmm0, xmm7, 21);
__ vpslld(xmm0, xmm7, 21);
__ vpsrlw(xmm0, xmm7, 21);
__ vpsrld(xmm0, xmm7, 21);
+ __ vpsrlq(xmm0, xmm7, 21);
__ vpsraw(xmm0, xmm7, 21);
__ vpsrad(xmm0, xmm7, 21);
diff --git a/deps/v8/test/cctest/test-disasm-x64.cc b/deps/v8/test/cctest/test-disasm-x64.cc
index 08793fba4a..86d98d8daf 100644
--- a/deps/v8/test/cctest/test-disasm-x64.cc
+++ b/deps/v8/test/cctest/test-disasm-x64.cc
@@ -182,6 +182,8 @@ TEST(DisasmX64) {
__ decq(rdx);
__ cdq();
+ __ repstosq();
+
__ nop();
__ idivq(rdx);
__ mull(rdx);
diff --git a/deps/v8/test/cctest/test-field-type-tracking.cc b/deps/v8/test/cctest/test-field-type-tracking.cc
index 512bf2a9c6..9eb9071d77 100644
--- a/deps/v8/test/cctest/test-field-type-tracking.cc
+++ b/deps/v8/test/cctest/test-field-type-tracking.cc
@@ -249,21 +249,22 @@ class Expectations {
}
}
- bool Check(DescriptorArray descriptors, int descriptor) const {
+ bool Check(DescriptorArray descriptors, InternalIndex descriptor) const {
PropertyDetails details = descriptors.GetDetails(descriptor);
- if (details.kind() != kinds_[descriptor]) return false;
- if (details.location() != locations_[descriptor]) return false;
- if (details.constness() != constnesses_[descriptor]) return false;
+ if (details.kind() != kinds_[descriptor.as_int()]) return false;
+ if (details.location() != locations_[descriptor.as_int()]) return false;
+ if (details.constness() != constnesses_[descriptor.as_int()]) return false;
- PropertyAttributes expected_attributes = attributes_[descriptor];
+ PropertyAttributes expected_attributes = attributes_[descriptor.as_int()];
if (details.attributes() != expected_attributes) return false;
- Representation expected_representation = representations_[descriptor];
+ Representation expected_representation =
+ representations_[descriptor.as_int()];
if (!details.representation().Equals(expected_representation)) return false;
- Object expected_value = *values_[descriptor];
+ Object expected_value = *values_[descriptor.as_int()];
if (details.location() == kField) {
if (details.kind() == kData) {
FieldType type = descriptors.GetFieldType(descriptor);
@@ -278,7 +279,7 @@ class Expectations {
if (value == expected_value) return true;
if (!value.IsAccessorPair()) return false;
AccessorPair pair = AccessorPair::cast(value);
- return pair.Equals(expected_value, *setter_values_[descriptor]);
+ return pair.Equals(expected_value, *setter_values_[descriptor.as_int()]);
}
UNREACHABLE();
}
@@ -291,13 +292,12 @@ class Expectations {
DescriptorArray descriptors = map.instance_descriptors();
CHECK(expected_nof <= number_of_properties_);
- for (int i = 0; i < expected_nof; i++) {
+ for (InternalIndex i : InternalIndex::Range(expected_nof)) {
if (!Check(descriptors, i)) {
Print();
#ifdef OBJECT_PRINT
descriptors.Print();
#endif
- Check(descriptors, i);
return false;
}
}
@@ -459,7 +459,7 @@ class Expectations {
Handle<Object> getter(pair->getter(), isolate);
Handle<Object> setter(pair->setter(), isolate);
- int descriptor =
+ InternalIndex descriptor =
map->instance_descriptors().SearchWithCache(isolate, *name, *map);
map = Map::TransitionToAccessorProperty(isolate, map, name, descriptor,
getter, setter, attributes);
@@ -495,8 +495,9 @@ TEST(ReconfigureAccessorToNonExistingDataField) {
CHECK(map->is_stable());
CHECK(expectations.Check(*map));
+ InternalIndex first(0);
Handle<Map> new_map = Map::ReconfigureProperty(
- isolate, map, 0, kData, NONE, Representation::None(), none_type);
+ isolate, map, first, kData, NONE, Representation::None(), none_type);
// |map| did not change except marked unstable.
CHECK(!map->is_deprecated());
CHECK(!map->is_stable());
@@ -511,12 +512,12 @@ TEST(ReconfigureAccessorToNonExistingDataField) {
CHECK(expectations.Check(*new_map));
Handle<Map> new_map2 = Map::ReconfigureProperty(
- isolate, map, 0, kData, NONE, Representation::None(), none_type);
+ isolate, map, first, kData, NONE, Representation::None(), none_type);
CHECK_EQ(*new_map, *new_map2);
Handle<Object> value(Smi::kZero, isolate);
Handle<Map> prepared_map = Map::PrepareForDataProperty(
- isolate, new_map, 0, PropertyConstness::kConst, value);
+ isolate, new_map, first, PropertyConstness::kConst, value);
// None to Smi generalization is trivial, map does not change.
CHECK_EQ(*new_map, *prepared_map);
@@ -530,7 +531,7 @@ TEST(ReconfigureAccessorToNonExistingDataField) {
Factory* factory = isolate->factory();
Handle<JSObject> obj = factory->NewJSObjectFromMap(map);
JSObject::MigrateToMap(isolate, obj, prepared_map);
- FieldIndex index = FieldIndex::ForDescriptor(*prepared_map, 0);
+ FieldIndex index = FieldIndex::ForDescriptor(*prepared_map, first);
CHECK(obj->RawFastPropertyAt(index).IsUninitialized(isolate));
#ifdef VERIFY_HEAP
obj->ObjectVerify(isolate);
@@ -565,14 +566,16 @@ TEST(ReconfigureAccessorToNonExistingDataFieldHeavy) {
Handle<JSObject> obj = Handle<JSObject>::cast(obj_value);
CHECK_EQ(1, obj->map().NumberOfOwnDescriptors());
- CHECK(obj->map().instance_descriptors().GetStrongValue(0).IsAccessorPair());
+ InternalIndex first(0);
+ CHECK(
+ obj->map().instance_descriptors().GetStrongValue(first).IsAccessorPair());
Handle<Object> value(Smi::FromInt(42), isolate);
JSObject::SetOwnPropertyIgnoreAttributes(obj, foo_str, value, NONE).Check();
// Check that the property contains |value|.
CHECK_EQ(1, obj->map().NumberOfOwnDescriptors());
- FieldIndex index = FieldIndex::ForDescriptor(obj->map(), 0);
+ FieldIndex index = FieldIndex::ForDescriptor(obj->map(), first);
Object the_value = obj->RawFastPropertyAt(index);
CHECK(the_value.IsSmi());
CHECK_EQ(42, Smi::ToInt(the_value));
@@ -641,7 +644,7 @@ void TestGeneralizeField(int detach_property_at_index, int property_index,
from.representation, from.type);
} else {
map = expectations.AddDataField(map, NONE, PropertyConstness::kConst,
- Representation::Double(), any_type);
+ Representation::Smi(), any_type);
if (i == detach_property_at_index) {
detach_point_map = map;
}
@@ -653,11 +656,11 @@ void TestGeneralizeField(int detach_property_at_index, int property_index,
if (is_detached_map) {
detach_point_map = Map::ReconfigureProperty(
- isolate, detach_point_map, detach_property_at_index, kData, NONE,
- Representation::Tagged(), any_type);
+ isolate, detach_point_map, InternalIndex(detach_property_at_index),
+ kData, NONE, Representation::Double(), any_type);
expectations.SetDataField(detach_property_at_index,
PropertyConstness::kConst,
- Representation::Tagged(), any_type);
+ Representation::Double(), any_type);
CHECK(map->is_deprecated());
CHECK(expectations.Check(*detach_point_map,
detach_point_map->NumberOfOwnDescriptors()));
@@ -666,16 +669,17 @@ void TestGeneralizeField(int detach_property_at_index, int property_index,
// Create dummy optimized code object to test correct dependencies
// on the field owner.
Handle<Code> code = CreateDummyOptimizedCode(isolate);
- Handle<Map> field_owner(map->FindFieldOwner(isolate, property_index),
- isolate);
+ Handle<Map> field_owner(
+ map->FindFieldOwner(isolate, InternalIndex(property_index)), isolate);
DependentCode::InstallDependency(isolate, MaybeObjectHandle::Weak(code),
field_owner,
DependentCode::kFieldOwnerGroup);
CHECK(!code->marked_for_deoptimization());
// Create new maps by generalizing representation of propX field.
- Handle<Map> new_map = Map::ReconfigureProperty(
- isolate, map, property_index, kData, NONE, to.representation, to.type);
+ Handle<Map> new_map =
+ Map::ReconfigureProperty(isolate, map, InternalIndex(property_index),
+ kData, NONE, to.representation, to.type);
expectations.SetDataField(property_index, expected.constness,
expected.representation, expected.type);
@@ -814,7 +818,9 @@ TEST(GeneralizeDoubleFieldToTagged) {
TestGeneralizeField(
{PropertyConstness::kMutable, Representation::Double(), any_type},
{PropertyConstness::kMutable, Representation::HeapObject(), value_type},
- {PropertyConstness::kMutable, Representation::Tagged(), any_type});
+ {PropertyConstness::kMutable, Representation::Tagged(), any_type},
+ FLAG_unbox_double_fields || !FLAG_modify_field_representation_inplace,
+ !FLAG_unbox_double_fields && FLAG_modify_field_representation_inplace);
}
TEST(GeneralizeHeapObjectFieldToTagged) {
@@ -965,8 +971,9 @@ TEST(GeneralizeFieldWithAccessorProperties) {
maps[i] = maps[i - 1];
continue;
}
- Handle<Map> new_map = Map::ReconfigureProperty(
- isolate, map, i, kData, NONE, Representation::Double(), any_type);
+ Handle<Map> new_map =
+ Map::ReconfigureProperty(isolate, map, InternalIndex(i), kData, NONE,
+ Representation::Double(), any_type);
maps[i] = new_map;
expectations.SetDataField(i, PropertyConstness::kMutable,
@@ -1053,7 +1060,8 @@ void TestReconfigureDataFieldAttribute_GeneralizeField(
// Create dummy optimized code object to test correct dependencies
// on the field owner.
Handle<Code> code = CreateDummyOptimizedCode(isolate);
- Handle<Map> field_owner(map->FindFieldOwner(isolate, kSplitProp), isolate);
+ Handle<Map> field_owner(
+ map->FindFieldOwner(isolate, InternalIndex(kSplitProp)), isolate);
DependentCode::InstallDependency(isolate, MaybeObjectHandle::Weak(code),
field_owner,
DependentCode::kFieldOwnerGroup);
@@ -1061,8 +1069,9 @@ void TestReconfigureDataFieldAttribute_GeneralizeField(
// Reconfigure attributes of property |kSplitProp| of |map2| to NONE, which
// should generalize representations in |map1|.
- Handle<Map> new_map = Map::ReconfigureExistingProperty(
- isolate, map2, kSplitProp, kData, NONE, PropertyConstness::kConst);
+ Handle<Map> new_map =
+ Map::ReconfigureExistingProperty(isolate, map2, InternalIndex(kSplitProp),
+ kData, NONE, PropertyConstness::kConst);
// |map2| should be left unchanged but marked unstable.
CHECK(!map2->is_stable());
@@ -1141,7 +1150,8 @@ void TestReconfigureDataFieldAttribute_GeneralizeFieldTrivial(
// Create dummy optimized code object to test correct dependencies
// on the field owner.
Handle<Code> code = CreateDummyOptimizedCode(isolate);
- Handle<Map> field_owner(map->FindFieldOwner(isolate, kSplitProp), isolate);
+ Handle<Map> field_owner(
+ map->FindFieldOwner(isolate, InternalIndex(kSplitProp)), isolate);
DependentCode::InstallDependency(isolate, MaybeObjectHandle::Weak(code),
field_owner,
DependentCode::kFieldOwnerGroup);
@@ -1149,8 +1159,9 @@ void TestReconfigureDataFieldAttribute_GeneralizeFieldTrivial(
// Reconfigure attributes of property |kSplitProp| of |map2| to NONE, which
// should generalize representations in |map1|.
- Handle<Map> new_map = Map::ReconfigureExistingProperty(
- isolate, map2, kSplitProp, kData, NONE, PropertyConstness::kConst);
+ Handle<Map> new_map =
+ Map::ReconfigureExistingProperty(isolate, map2, InternalIndex(kSplitProp),
+ kData, NONE, PropertyConstness::kConst);
// |map2| should be left unchanged but marked unstable.
CHECK(!map2->is_stable());
@@ -1436,8 +1447,7 @@ struct CheckNormalize {
//
template <typename TestConfig, typename Checker>
static void TestReconfigureProperty_CustomPropertyAfterTargetMap(
- TestConfig& config, // NOLINT(runtime/references)
- Checker& checker) { // NOLINT(runtime/references)
+ TestConfig* config, Checker* checker) {
Isolate* isolate = CcTest::i_isolate();
Handle<FieldType> any_type = FieldType::Any(isolate);
@@ -1469,7 +1479,7 @@ static void TestReconfigureProperty_CustomPropertyAfterTargetMap(
map1 = expectations1.AddDataField(map1, NONE, constness, representation,
any_type);
}
- map1 = config.AddPropertyAtBranch(1, expectations1, map1);
+ map1 = config->AddPropertyAtBranch(1, &expectations1, map1);
for (int i = kCustomPropIndex + 1; i < kPropCount; i++) {
map1 = expectations1.AddDataField(map1, NONE, constness, representation,
any_type);
@@ -1489,7 +1499,7 @@ static void TestReconfigureProperty_CustomPropertyAfterTargetMap(
map2 = expectations2.AddDataField(map2, NONE, constness, representation,
any_type);
}
- map2 = config.AddPropertyAtBranch(2, expectations2, map2);
+ map2 = config->AddPropertyAtBranch(2, &expectations2, map2);
for (int i = kCustomPropIndex + 1; i < kPropCount; i++) {
map2 = expectations2.AddDataField(map2, NONE, constness, representation,
any_type);
@@ -1501,8 +1511,9 @@ static void TestReconfigureProperty_CustomPropertyAfterTargetMap(
// Reconfigure attributes of property |kSplitProp| of |map2| to NONE, which
// should generalize representations in |map1|.
- Handle<Map> new_map = Map::ReconfigureExistingProperty(
- isolate, map2, kSplitProp, kData, NONE, PropertyConstness::kConst);
+ Handle<Map> new_map =
+ Map::ReconfigureExistingProperty(isolate, map2, InternalIndex(kSplitProp),
+ kData, NONE, PropertyConstness::kConst);
// |map2| should be left unchanged but marked unstable.
CHECK(!map2->is_stable());
@@ -1510,8 +1521,8 @@ static void TestReconfigureProperty_CustomPropertyAfterTargetMap(
CHECK_NE(*map2, *new_map);
CHECK(expectations2.Check(*map2));
- config.UpdateExpectations(kCustomPropIndex, expectations1);
- checker.Check(isolate, map1, new_map, expectations1);
+ config->UpdateExpectations(kCustomPropIndex, &expectations1);
+ checker->Check(isolate, map1, new_map, expectations1);
}
TEST(ReconfigureDataFieldAttribute_SameDataConstantAfterTargetMap) {
@@ -1526,18 +1537,14 @@ TEST(ReconfigureDataFieldAttribute_SameDataConstantAfterTargetMap) {
js_func_ = factory->NewFunctionForTest(factory->empty_string());
}
- Handle<Map> AddPropertyAtBranch(
- int branch_id,
- Expectations& expectations, // NOLINT(runtime/references)
- Handle<Map> map) {
+ Handle<Map> AddPropertyAtBranch(int branch_id, Expectations* expectations,
+ Handle<Map> map) {
CHECK(branch_id == 1 || branch_id == 2);
// Add the same data constant property at both transition tree branches.
- return expectations.AddDataConstant(map, NONE, js_func_);
+ return expectations->AddDataConstant(map, NONE, js_func_);
}
- void UpdateExpectations(
- int property_index,
- Expectations& expectations) { // NOLINT(runtime/references)
+ void UpdateExpectations(int property_index, Expectations* expectations) {
// Expectations stay the same.
}
};
@@ -1545,7 +1552,7 @@ TEST(ReconfigureDataFieldAttribute_SameDataConstantAfterTargetMap) {
TestConfig config;
// Two branches are "compatible" so the |map1| should NOT be deprecated.
CheckSameMap checker;
- TestReconfigureProperty_CustomPropertyAfterTargetMap(config, checker);
+ TestReconfigureProperty_CustomPropertyAfterTargetMap(&config, &checker);
}
@@ -1575,26 +1582,22 @@ TEST(ReconfigureDataFieldAttribute_DataConstantToDataFieldAfterTargetMap) {
factory->NewFunction(sloppy_map, info, isolate->native_context());
}
- Handle<Map> AddPropertyAtBranch(
- int branch_id,
- Expectations& expectations, // NOLINT(runtime/references)
- Handle<Map> map) {
+ Handle<Map> AddPropertyAtBranch(int branch_id, Expectations* expectations,
+ Handle<Map> map) {
CHECK(branch_id == 1 || branch_id == 2);
Handle<JSFunction> js_func = branch_id == 1 ? js_func1_ : js_func2_;
- return expectations.AddDataConstant(map, NONE, js_func);
+ return expectations->AddDataConstant(map, NONE, js_func);
}
- void UpdateExpectations(
- int property_index,
- Expectations& expectations) { // NOLINT(runtime/references)
- expectations.SetDataField(property_index, PropertyConstness::kConst,
- Representation::HeapObject(), function_type_);
+ void UpdateExpectations(int property_index, Expectations* expectations) {
+ expectations->SetDataField(property_index, PropertyConstness::kConst,
+ Representation::HeapObject(), function_type_);
}
};
TestConfig config;
CheckSameMap checker;
- TestReconfigureProperty_CustomPropertyAfterTargetMap(config, checker);
+ TestReconfigureProperty_CustomPropertyAfterTargetMap(&config, &checker);
}
@@ -1612,28 +1615,23 @@ TEST(ReconfigureDataFieldAttribute_DataConstantToAccConstantAfterTargetMap) {
pair_ = CreateAccessorPair(true, true);
}
- Handle<Map> AddPropertyAtBranch(
- int branch_id,
- Expectations& expectations, // NOLINT(runtime/references)
- Handle<Map> map) {
+ Handle<Map> AddPropertyAtBranch(int branch_id, Expectations* expectations,
+ Handle<Map> map) {
CHECK(branch_id == 1 || branch_id == 2);
if (branch_id == 1) {
- return expectations.AddDataConstant(map, NONE, js_func_);
+ return expectations->AddDataConstant(map, NONE, js_func_);
} else {
- return expectations.AddAccessorConstant(map, NONE, pair_);
+ return expectations->AddAccessorConstant(map, NONE, pair_);
}
}
- void UpdateExpectations(
- int property_index,
- Expectations& expectations // NOLINT(runtime/references)
- ) {}
+ void UpdateExpectations(int property_index, Expectations* expectations) {}
};
TestConfig config;
// These are completely separate branches in transition tree.
CheckUnrelated checker;
- TestReconfigureProperty_CustomPropertyAfterTargetMap(config, checker);
+ TestReconfigureProperty_CustomPropertyAfterTargetMap(&config, &checker);
}
@@ -1645,26 +1643,22 @@ TEST(ReconfigureDataFieldAttribute_SameAccessorConstantAfterTargetMap) {
Handle<AccessorPair> pair_;
TestConfig() { pair_ = CreateAccessorPair(true, true); }
- Handle<Map> AddPropertyAtBranch(
- int branch_id,
- Expectations& expectations, // NOLINT(runtime/references)
- Handle<Map> map) {
+ Handle<Map> AddPropertyAtBranch(int branch_id, Expectations* expectations,
+ Handle<Map> map) {
CHECK(branch_id == 1 || branch_id == 2);
// Add the same accessor constant property at both transition tree
// branches.
- return expectations.AddAccessorConstant(map, NONE, pair_);
+ return expectations->AddAccessorConstant(map, NONE, pair_);
}
- void UpdateExpectations(
- int property_index,
- Expectations& expectations) { // NOLINT(runtime/references)
+ void UpdateExpectations(int property_index, Expectations* expectations) {
// Two branches are "compatible" so the |map1| should NOT be deprecated.
}
};
TestConfig config;
CheckSameMap checker;
- TestReconfigureProperty_CustomPropertyAfterTargetMap(config, checker);
+ TestReconfigureProperty_CustomPropertyAfterTargetMap(&config, &checker);
}
@@ -1680,24 +1674,20 @@ TEST(ReconfigureDataFieldAttribute_AccConstantToAccFieldAfterTargetMap) {
pair2_ = CreateAccessorPair(true, true);
}
- Handle<Map> AddPropertyAtBranch(
- int branch_id,
- Expectations& expectations, // NOLINT(runtime/references)
- Handle<Map> map) {
+ Handle<Map> AddPropertyAtBranch(int branch_id, Expectations* expectations,
+ Handle<Map> map) {
CHECK(branch_id == 1 || branch_id == 2);
Handle<AccessorPair> pair = branch_id == 1 ? pair1_ : pair2_;
- return expectations.AddAccessorConstant(map, NONE, pair);
+ return expectations->AddAccessorConstant(map, NONE, pair);
}
- void UpdateExpectations(
- int property_index,
- Expectations& expectations) { // NOLINT(runtime/references)
+ void UpdateExpectations(int property_index, Expectations* expectations) {
if (IS_ACCESSOR_FIELD_SUPPORTED) {
- expectations.SetAccessorField(property_index);
+ expectations->SetAccessorField(property_index);
} else {
// Currently we have a normalize case and ACCESSOR property becomes
// ACCESSOR_CONSTANT.
- expectations.SetAccessorConstant(property_index, pair2_);
+ expectations->SetAccessorConstant(property_index, pair2_);
}
}
};
@@ -1705,11 +1695,11 @@ TEST(ReconfigureDataFieldAttribute_AccConstantToAccFieldAfterTargetMap) {
TestConfig config;
if (IS_ACCESSOR_FIELD_SUPPORTED) {
CheckSameMap checker;
- TestReconfigureProperty_CustomPropertyAfterTargetMap(config, checker);
+ TestReconfigureProperty_CustomPropertyAfterTargetMap(&config, &checker);
} else {
// Currently we have a normalize case.
CheckNormalize checker;
- TestReconfigureProperty_CustomPropertyAfterTargetMap(config, checker);
+ TestReconfigureProperty_CustomPropertyAfterTargetMap(&config, &checker);
}
}
@@ -1722,31 +1712,26 @@ TEST(ReconfigureDataFieldAttribute_AccConstantToDataFieldAfterTargetMap) {
Handle<AccessorPair> pair_;
TestConfig() { pair_ = CreateAccessorPair(true, true); }
- Handle<Map> AddPropertyAtBranch(
- int branch_id,
- Expectations& expectations, // NOLINT(runtime/references)
- Handle<Map> map) {
+ Handle<Map> AddPropertyAtBranch(int branch_id, Expectations* expectations,
+ Handle<Map> map) {
CHECK(branch_id == 1 || branch_id == 2);
if (branch_id == 1) {
- return expectations.AddAccessorConstant(map, NONE, pair_);
+ return expectations->AddAccessorConstant(map, NONE, pair_);
} else {
Isolate* isolate = CcTest::i_isolate();
Handle<FieldType> any_type = FieldType::Any(isolate);
- return expectations.AddDataField(map, NONE, PropertyConstness::kConst,
- Representation::Smi(), any_type);
+ return expectations->AddDataField(map, NONE, PropertyConstness::kConst,
+ Representation::Smi(), any_type);
}
}
- void UpdateExpectations(
- int property_index,
- Expectations& expectations // NOLINT(runtime/references)
- ) {}
+ void UpdateExpectations(int property_index, Expectations* expectations) {}
};
TestConfig config;
// These are completely separate branches in transition tree.
CheckUnrelated checker;
- TestReconfigureProperty_CustomPropertyAfterTargetMap(config, checker);
+ TestReconfigureProperty_CustomPropertyAfterTargetMap(&config, &checker);
}
@@ -1811,7 +1796,8 @@ static void TestReconfigureElementsKind_GeneralizeFieldTrivial(
// Create dummy optimized code object to test correct dependencies
// on the field owner.
Handle<Code> code = CreateDummyOptimizedCode(isolate);
- Handle<Map> field_owner(map->FindFieldOwner(isolate, kDiffProp), isolate);
+ Handle<Map> field_owner(
+ map->FindFieldOwner(isolate, InternalIndex(kDiffProp)), isolate);
DependentCode::InstallDependency(isolate, MaybeObjectHandle::Weak(code),
field_owner,
DependentCode::kFieldOwnerGroup);
@@ -2084,8 +2070,9 @@ TEST(ReconfigurePropertySplitMapTransitionsOverflow) {
map2 = handle(target, isolate);
}
- map2 = Map::ReconfigureProperty(isolate, map2, kSplitProp, kData, NONE,
- Representation::Double(), any_type);
+ map2 = Map::ReconfigureProperty(isolate, map2, InternalIndex(kSplitProp),
+ kData, NONE, Representation::Double(),
+ any_type);
expectations.SetDataField(kSplitProp, PropertyConstness::kMutable,
Representation::Double(), any_type);
@@ -2141,9 +2128,8 @@ TEST(ReconfigurePropertySplitMapTransitionsOverflow) {
// fixed.
template <typename TestConfig>
static void TestGeneralizeFieldWithSpecialTransition(
- TestConfig& config, // NOLINT(runtime/references)
- const CRFTData& from, const CRFTData& to, const CRFTData& expected,
- bool expected_deprecation) {
+ TestConfig* config, const CRFTData& from, const CRFTData& to,
+ const CRFTData& expected, bool expected_deprecation) {
Isolate* isolate = CcTest::i_isolate();
Expectations expectations(isolate);
@@ -2163,13 +2149,13 @@ static void TestGeneralizeFieldWithSpecialTransition(
// Apply some special transition to |map|.
CHECK(map->owns_descriptors());
- Handle<Map> map2 = config.Transition(map, expectations2);
+ Handle<Map> map2 = config->Transition(map, &expectations2);
// |map| should still match expectations.
CHECK(!map->is_deprecated());
CHECK(expectations.Check(*map));
- if (config.generalizes_representations()) {
+ if (config->generalizes_representations()) {
for (int i = 0; i < kPropCount; i++) {
expectations2.GeneralizeField(i);
}
@@ -2182,8 +2168,9 @@ static void TestGeneralizeFieldWithSpecialTransition(
// Create new maps by generalizing representation of propX field.
Handle<Map> maps[kPropCount];
for (int i = 0; i < kPropCount; i++) {
- Handle<Map> new_map = Map::ReconfigureProperty(isolate, map, i, kData, NONE,
- to.representation, to.type);
+ Handle<Map> new_map =
+ Map::ReconfigureProperty(isolate, map, InternalIndex(i), kData, NONE,
+ to.representation, to.type);
maps[i] = new_map;
expectations.SetDataField(i, expected.constness, expected.representation,
@@ -2206,10 +2193,10 @@ static void TestGeneralizeFieldWithSpecialTransition(
CHECK_EQ(*new_map2, *tmp_map);
} else {
// Equivalent transitions should always find the updated map.
- CHECK(config.is_non_equivalent_transition());
+ CHECK(config->is_non_equivalent_transition());
}
- if (config.is_non_equivalent_transition()) {
+ if (config->is_non_equivalent_transition()) {
// In case of non-equivalent transition currently we generalize all
// representations.
for (int i = 0; i < kPropCount; i++) {
@@ -2260,9 +2247,9 @@ TEST(ElementsKindTransitionFromMapOwningDescriptor) {
ElementsKind kind)
: attributes(attributes), symbol(symbol), elements_kind(kind) {}
- Handle<Map> Transition(Handle<Map> map, Expectations& expectations) {
- expectations.SetElementsKind(elements_kind);
- expectations.ChangeAttributesForAllProperties(attributes);
+ Handle<Map> Transition(Handle<Map> map, Expectations* expectations) {
+ expectations->SetElementsKind(elements_kind);
+ expectations->ChangeAttributesForAllProperties(attributes);
return Map::CopyForPreventExtensions(CcTest::i_isolate(), map, attributes,
symbol, "CopyForPreventExtensions");
}
@@ -2287,17 +2274,17 @@ TEST(ElementsKindTransitionFromMapOwningDescriptor) {
: DICTIONARY_ELEMENTS}};
for (size_t i = 0; i < arraysize(configs); i++) {
TestGeneralizeFieldWithSpecialTransition(
- configs[i],
+ &configs[i],
{PropertyConstness::kMutable, Representation::Smi(), any_type},
{PropertyConstness::kMutable, Representation::HeapObject(), value_type},
{PropertyConstness::kMutable, Representation::Tagged(), any_type},
!FLAG_modify_field_representation_inplace);
TestGeneralizeFieldWithSpecialTransition(
- configs[i],
+ &configs[i],
{PropertyConstness::kMutable, Representation::Double(), any_type},
{PropertyConstness::kMutable, Representation::HeapObject(), value_type},
{PropertyConstness::kMutable, Representation::Tagged(), any_type},
- true);
+ FLAG_unbox_double_fields || !FLAG_modify_field_representation_inplace);
}
}
@@ -2316,7 +2303,7 @@ TEST(ElementsKindTransitionFromMapNotOwningDescriptor) {
ElementsKind kind)
: attributes(attributes), symbol(symbol), elements_kind(kind) {}
- Handle<Map> Transition(Handle<Map> map, Expectations& expectations) {
+ Handle<Map> Transition(Handle<Map> map, Expectations* expectations) {
Isolate* isolate = CcTest::i_isolate();
Handle<FieldType> any_type = FieldType::Any(isolate);
@@ -2329,8 +2316,8 @@ TEST(ElementsKindTransitionFromMapNotOwningDescriptor) {
.ToHandleChecked();
CHECK(!map->owns_descriptors());
- expectations.SetElementsKind(elements_kind);
- expectations.ChangeAttributesForAllProperties(attributes);
+ expectations->SetElementsKind(elements_kind);
+ expectations->ChangeAttributesForAllProperties(attributes);
return Map::CopyForPreventExtensions(isolate, map, attributes, symbol,
"CopyForPreventExtensions");
}
@@ -2355,17 +2342,17 @@ TEST(ElementsKindTransitionFromMapNotOwningDescriptor) {
: DICTIONARY_ELEMENTS}};
for (size_t i = 0; i < arraysize(configs); i++) {
TestGeneralizeFieldWithSpecialTransition(
- configs[i],
+ &configs[i],
{PropertyConstness::kMutable, Representation::Smi(), any_type},
{PropertyConstness::kMutable, Representation::HeapObject(), value_type},
{PropertyConstness::kMutable, Representation::Tagged(), any_type},
!FLAG_modify_field_representation_inplace);
TestGeneralizeFieldWithSpecialTransition(
- configs[i],
+ &configs[i],
{PropertyConstness::kMutable, Representation::Double(), any_type},
{PropertyConstness::kMutable, Representation::HeapObject(), value_type},
{PropertyConstness::kMutable, Representation::Tagged(), any_type},
- true);
+ FLAG_unbox_double_fields || !FLAG_modify_field_representation_inplace);
}
}
@@ -2388,9 +2375,7 @@ TEST(PrototypeTransitionFromMapOwningDescriptor) {
prototype_ = factory->NewJSObjectFromMap(Map::Create(isolate, 0));
}
- Handle<Map> Transition(
- Handle<Map> map,
- Expectations& expectations) { // NOLINT(runtime/references)
+ Handle<Map> Transition(Handle<Map> map, Expectations* expectations) {
return Map::TransitionToPrototype(CcTest::i_isolate(), map, prototype_);
}
// TODO(ishell): remove once IS_PROTO_TRANS_ISSUE_FIXED is removed.
@@ -2401,14 +2386,16 @@ TEST(PrototypeTransitionFromMapOwningDescriptor) {
};
TestConfig config;
TestGeneralizeFieldWithSpecialTransition(
- config, {PropertyConstness::kMutable, Representation::Smi(), any_type},
+ &config, {PropertyConstness::kMutable, Representation::Smi(), any_type},
{PropertyConstness::kMutable, Representation::HeapObject(), value_type},
{PropertyConstness::kMutable, Representation::Tagged(), any_type},
!FLAG_modify_field_representation_inplace);
TestGeneralizeFieldWithSpecialTransition(
- config, {PropertyConstness::kMutable, Representation::Double(), any_type},
+ &config,
+ {PropertyConstness::kMutable, Representation::Double(), any_type},
{PropertyConstness::kMutable, Representation::HeapObject(), value_type},
- {PropertyConstness::kMutable, Representation::Tagged(), any_type}, true);
+ {PropertyConstness::kMutable, Representation::Tagged(), any_type},
+ FLAG_unbox_double_fields || !FLAG_modify_field_representation_inplace);
}
TEST(PrototypeTransitionFromMapNotOwningDescriptor) {
@@ -2429,9 +2416,7 @@ TEST(PrototypeTransitionFromMapNotOwningDescriptor) {
prototype_ = factory->NewJSObjectFromMap(Map::Create(isolate, 0));
}
- Handle<Map> Transition(
- Handle<Map> map,
- Expectations& expectations) { // NOLINT(runtime/references)
+ Handle<Map> Transition(Handle<Map> map, Expectations* expectations) {
Isolate* isolate = CcTest::i_isolate();
Handle<FieldType> any_type = FieldType::Any(isolate);
@@ -2454,14 +2439,16 @@ TEST(PrototypeTransitionFromMapNotOwningDescriptor) {
};
TestConfig config;
TestGeneralizeFieldWithSpecialTransition(
- config, {PropertyConstness::kMutable, Representation::Smi(), any_type},
+ &config, {PropertyConstness::kMutable, Representation::Smi(), any_type},
{PropertyConstness::kMutable, Representation::HeapObject(), value_type},
{PropertyConstness::kMutable, Representation::Tagged(), any_type},
!FLAG_modify_field_representation_inplace);
TestGeneralizeFieldWithSpecialTransition(
- config, {PropertyConstness::kMutable, Representation::Double(), any_type},
+ &config,
+ {PropertyConstness::kMutable, Representation::Double(), any_type},
{PropertyConstness::kMutable, Representation::HeapObject(), value_type},
- {PropertyConstness::kMutable, Representation::Tagged(), any_type}, true);
+ {PropertyConstness::kMutable, Representation::Tagged(), any_type},
+ FLAG_unbox_double_fields || !FLAG_modify_field_representation_inplace);
}
////////////////////////////////////////////////////////////////////////////////
@@ -2486,10 +2473,8 @@ struct TransitionToDataFieldOperator {
heap_type_(heap_type),
value_(value) {}
- Handle<Map> DoTransition(
- Expectations& expectations, // NOLINT(runtime/references)
- Handle<Map> map) {
- return expectations.TransitionToDataField(
+ Handle<Map> DoTransition(Expectations* expectations, Handle<Map> map) {
+ return expectations->TransitionToDataField(
map, attributes_, constness_, representation_, heap_type_, value_);
}
};
@@ -2503,8 +2488,8 @@ struct TransitionToDataConstantOperator {
PropertyAttributes attributes = NONE)
: attributes_(attributes), value_(value) {}
- Handle<Map> DoTransition(Expectations& expectations, Handle<Map> map) {
- return expectations.TransitionToDataConstant(map, attributes_, value_);
+ Handle<Map> DoTransition(Expectations* expectations, Handle<Map> map) {
+ return expectations->TransitionToDataConstant(map, attributes_, value_);
}
};
@@ -2517,14 +2502,14 @@ struct TransitionToAccessorConstantOperator {
PropertyAttributes attributes = NONE)
: attributes_(attributes), pair_(pair) {}
- Handle<Map> DoTransition(Expectations& expectations, Handle<Map> map) {
- return expectations.TransitionToAccessorConstant(map, attributes_, pair_);
+ Handle<Map> DoTransition(Expectations* expectations, Handle<Map> map) {
+ return expectations->TransitionToAccessorConstant(map, attributes_, pair_);
}
};
struct ReconfigureAsDataPropertyOperator {
- int descriptor_;
+ InternalIndex descriptor_;
Representation representation_;
PropertyAttributes attributes_;
Handle<FieldType> heap_type_;
@@ -2538,12 +2523,11 @@ struct ReconfigureAsDataPropertyOperator {
attributes_(attributes),
heap_type_(heap_type) {}
- Handle<Map> DoTransition(
- Isolate* isolate,
- Expectations& expectations, // NOLINT(runtime/references)
- Handle<Map> map) {
- expectations.SetDataField(descriptor_, PropertyConstness::kMutable,
- representation_, heap_type_);
+ Handle<Map> DoTransition(Isolate* isolate, Expectations* expectations,
+ Handle<Map> map) {
+ expectations->SetDataField(descriptor_.as_int(),
+ PropertyConstness::kMutable, representation_,
+ heap_type_);
return Map::ReconfigureExistingProperty(isolate, map, descriptor_, kData,
attributes_,
PropertyConstness::kConst);
@@ -2552,16 +2536,16 @@ struct ReconfigureAsDataPropertyOperator {
struct ReconfigureAsAccessorPropertyOperator {
- int descriptor_;
+ InternalIndex descriptor_;
PropertyAttributes attributes_;
ReconfigureAsAccessorPropertyOperator(int descriptor,
PropertyAttributes attributes = NONE)
: descriptor_(descriptor), attributes_(attributes) {}
- Handle<Map> DoTransition(Isolate* isolate, Expectations& expectations,
+ Handle<Map> DoTransition(Isolate* isolate, Expectations* expectations,
Handle<Map> map) {
- expectations.SetAccessorField(descriptor_);
+ expectations->SetAccessorField(descriptor_.as_int());
return Map::ReconfigureExistingProperty(isolate, map, descriptor_,
kAccessor, attributes_,
PropertyConstness::kConst);
@@ -2586,9 +2570,8 @@ struct FieldGeneralizationChecker {
attributes_(attributes),
heap_type_(heap_type) {}
- void Check(Isolate* isolate,
- Expectations& expectations2, // NOLINT(runtime/references)
- Handle<Map> map1, Handle<Map> map2) {
+ void Check(Isolate* isolate, Expectations* expectations, Handle<Map> map1,
+ Handle<Map> map2) {
CHECK(!map2->is_deprecated());
CHECK(map1->is_deprecated());
@@ -2597,21 +2580,20 @@ struct FieldGeneralizationChecker {
CHECK_EQ(*map2, *updated_map);
CheckMigrationTarget(isolate, *map1, *updated_map);
- expectations2.SetDataField(descriptor_, attributes_, constness_,
+ expectations->SetDataField(descriptor_, attributes_, constness_,
representation_, heap_type_);
- CHECK(expectations2.Check(*map2));
+ CHECK(expectations->Check(*map2));
}
};
// Checks that existing transition was taken as is.
struct SameMapChecker {
- void Check(Isolate* isolate,
- Expectations& expectations, // NOLINT(runtime/references)
- Handle<Map> map1, Handle<Map> map2) {
+ void Check(Isolate* isolate, Expectations* expectations, Handle<Map> map1,
+ Handle<Map> map2) {
CHECK(!map2->is_deprecated());
CHECK_EQ(*map1, *map2);
- CHECK(expectations.Check(*map2));
+ CHECK(expectations->Check(*map2));
}
};
@@ -2619,12 +2601,11 @@ struct SameMapChecker {
// Checks that both |map1| and |map2| should stays non-deprecated, this is
// the case when property kind is change.
struct PropertyKindReconfigurationChecker {
- void Check(Expectations& expectations, // NOLINT(runtime/references)
- Handle<Map> map1, Handle<Map> map2) {
+ void Check(Expectations* expectations, Handle<Map> map1, Handle<Map> map2) {
CHECK(!map1->is_deprecated());
CHECK(!map2->is_deprecated());
CHECK_NE(*map1, *map2);
- CHECK(expectations.Check(*map2));
+ CHECK(expectations->Check(*map2));
}
};
@@ -2645,10 +2626,8 @@ struct PropertyKindReconfigurationChecker {
// where "p4A" and "p4B" differ only in the attributes.
//
template <typename TransitionOp1, typename TransitionOp2, typename Checker>
-static void TestTransitionTo(
- TransitionOp1& transition_op1, // NOLINT(runtime/references)
- TransitionOp2& transition_op2, // NOLINT(runtime/references)
- Checker& checker) { // NOLINT(runtime/references)
+static void TestTransitionTo(TransitionOp1* transition_op1,
+ TransitionOp2* transition_op2, Checker* checker) {
Isolate* isolate = CcTest::i_isolate();
Handle<FieldType> any_type = FieldType::Any(isolate);
@@ -2664,14 +2643,14 @@ static void TestTransitionTo(
CHECK(expectations.Check(*map));
Expectations expectations1 = expectations;
- Handle<Map> map1 = transition_op1.DoTransition(expectations1, map);
+ Handle<Map> map1 = transition_op1->DoTransition(&expectations1, map);
CHECK(expectations1.Check(*map1));
Expectations expectations2 = expectations;
- Handle<Map> map2 = transition_op2.DoTransition(expectations2, map);
+ Handle<Map> map2 = transition_op2->DoTransition(&expectations2, map);
// Let the test customization do the check.
- checker.Check(isolate, expectations2, map1, map2);
+ checker->Check(isolate, &expectations2, map1, map2);
}
TEST(TransitionDataFieldToDataField) {
@@ -2692,7 +2671,7 @@ TEST(TransitionDataFieldToDataField) {
FieldGeneralizationChecker checker(kPropCount - 1,
PropertyConstness::kMutable,
Representation::Double(), any_type);
- TestTransitionTo(transition_op1, transition_op2, checker);
+ TestTransitionTo(&transition_op1, &transition_op2, &checker);
}
TEST(TransitionDataConstantToSameDataConstant) {
@@ -2706,7 +2685,7 @@ TEST(TransitionDataConstantToSameDataConstant) {
TransitionToDataConstantOperator transition_op(js_func);
SameMapChecker checker;
- TestTransitionTo(transition_op, transition_op, checker);
+ TestTransitionTo(&transition_op, &transition_op, &checker);
}
@@ -2732,7 +2711,7 @@ TEST(TransitionDataConstantToAnotherDataConstant) {
TransitionToDataConstantOperator transition_op2(js_func2);
SameMapChecker checker;
- TestTransitionTo(transition_op1, transition_op2, checker);
+ TestTransitionTo(&transition_op1, &transition_op2, &checker);
}
@@ -2754,12 +2733,12 @@ TEST(TransitionDataConstantToDataField) {
if (FLAG_modify_field_representation_inplace) {
SameMapChecker checker;
- TestTransitionTo(transition_op1, transition_op2, checker);
+ TestTransitionTo(&transition_op1, &transition_op2, &checker);
} else {
FieldGeneralizationChecker checker(kPropCount - 1,
PropertyConstness::kMutable,
Representation::Tagged(), any_type);
- TestTransitionTo(transition_op1, transition_op2, checker);
+ TestTransitionTo(&transition_op1, &transition_op2, &checker);
}
}
@@ -2772,7 +2751,7 @@ TEST(TransitionAccessorConstantToSameAccessorConstant) {
TransitionToAccessorConstantOperator transition_op(pair);
SameMapChecker checker;
- TestTransitionTo(transition_op, transition_op, checker);
+ TestTransitionTo(&transition_op, &transition_op, &checker);
}
// TODO(ishell): add this test once IS_ACCESSOR_FIELD_SUPPORTED is supported.
@@ -2840,11 +2819,11 @@ void TestStoreToConstantField(const char* store_func_source,
CHECK(!map->is_dictionary_map());
CHECK(!map->is_deprecated());
CHECK_EQ(1, map->NumberOfOwnDescriptors());
-
- CHECK(map->instance_descriptors().GetDetails(0).representation().Equals(
+ InternalIndex first(0);
+ CHECK(map->instance_descriptors().GetDetails(first).representation().Equals(
expected_rep));
CHECK_EQ(PropertyConstness::kConst,
- map->instance_descriptors().GetDetails(0).constness());
+ map->instance_descriptors().GetDetails(first).constness());
// Store value2 to obj2 and check that it got same map and property details
// did not change.
@@ -2856,10 +2835,10 @@ void TestStoreToConstantField(const char* store_func_source,
CHECK(!map->is_deprecated());
CHECK_EQ(1, map->NumberOfOwnDescriptors());
- CHECK(map->instance_descriptors().GetDetails(0).representation().Equals(
+ CHECK(map->instance_descriptors().GetDetails(first).representation().Equals(
expected_rep));
CHECK_EQ(PropertyConstness::kConst,
- map->instance_descriptors().GetDetails(0).constness());
+ map->instance_descriptors().GetDetails(first).constness());
// Store value2 to obj1 and check that property became mutable.
Call(isolate, store_func, obj1, value2).Check();
@@ -2869,10 +2848,10 @@ void TestStoreToConstantField(const char* store_func_source,
CHECK(!map->is_deprecated());
CHECK_EQ(1, map->NumberOfOwnDescriptors());
- CHECK(map->instance_descriptors().GetDetails(0).representation().Equals(
+ CHECK(map->instance_descriptors().GetDetails(first).representation().Equals(
expected_rep));
CHECK_EQ(expected_constness,
- map->instance_descriptors().GetDetails(0).constness());
+ map->instance_descriptors().GetDetails(first).constness());
}
void TestStoreToConstantField_PlusMinusZero(const char* store_func_source,
diff --git a/deps/v8/test/cctest/test-flags.cc b/deps/v8/test/cctest/test-flags.cc
index 4e5fcffa62..93c7048f81 100644
--- a/deps/v8/test/cctest/test-flags.cc
+++ b/deps/v8/test/cctest/test-flags.cc
@@ -209,11 +209,5 @@ TEST(FlagsJitlessImplications) {
}
}
-TEST(FlagsRegexpInterpretAllImplications) {
- if (FLAG_regexp_interpret_all) {
- CHECK(!FLAG_regexp_tier_up);
- }
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-global-handles.cc b/deps/v8/test/cctest/test-global-handles.cc
index 417679432b..98a66bf535 100644
--- a/deps/v8/test/cctest/test-global-handles.cc
+++ b/deps/v8/test/cctest/test-global-handles.cc
@@ -49,7 +49,7 @@ class NonRootingEmbedderHeapTracer final : public v8::EmbedderHeapTracer {
const std::vector<std::pair<void*, void*>>& embedder_fields) final {}
bool AdvanceTracing(double deadline_in_ms) final { return true; }
bool IsTracingDone() final { return true; }
- void TracePrologue() final {}
+ void TracePrologue(TraceFlags) final {}
void TraceEpilogue() final {}
void EnterFinalPause(EmbedderStackState) final {}
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index 3aec4ae003..7784a7f855 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -3062,7 +3062,8 @@ TEST(ArrayBufferSharedBackingStore) {
CHECK(ab2_data);
CHECK_EQ(ab1_data, ab2_data);
CHECK_EQ(2, GetRetainersCount(snapshot, ab1_data));
- free(data);
+ ab_contents.Deleter()(ab_contents.Data(), ab_contents.ByteLength(),
+ ab_contents.DeleterData());
}
@@ -3577,10 +3578,9 @@ TEST(AddressToTraceMap) {
}
static const v8::AllocationProfile::Node* FindAllocationProfileNode(
- v8::Isolate* isolate,
- v8::AllocationProfile& profile, // NOLINT(runtime/references)
+ v8::Isolate* isolate, v8::AllocationProfile* profile,
const Vector<const char*>& names) {
- v8::AllocationProfile::Node* node = profile.GetRootNode();
+ v8::AllocationProfile::Node* node = profile->GetRootNode();
for (int i = 0; node != nullptr && i < names.length(); ++i) {
const char* name = names[i];
auto children = node->children;
@@ -3650,7 +3650,7 @@ TEST(SamplingHeapProfiler) {
CHECK(profile);
const char* names[] = {"", "foo", "bar"};
- auto node_bar = FindAllocationProfileNode(env->GetIsolate(), *profile,
+ auto node_bar = FindAllocationProfileNode(env->GetIsolate(), profile.get(),
ArrayVector(names));
CHECK(node_bar);
@@ -3674,12 +3674,12 @@ TEST(SamplingHeapProfiler) {
CHECK(profile);
const char* names1[] = {"", "start", "f_0_0", "f_0_1", "f_0_2"};
- auto node1 = FindAllocationProfileNode(env->GetIsolate(), *profile,
+ auto node1 = FindAllocationProfileNode(env->GetIsolate(), profile.get(),
ArrayVector(names1));
CHECK(node1);
const char* names2[] = {"", "generateFunctions"};
- auto node2 = FindAllocationProfileNode(env->GetIsolate(), *profile,
+ auto node2 = FindAllocationProfileNode(env->GetIsolate(), profile.get(),
ArrayVector(names2));
CHECK(node2);
@@ -3737,11 +3737,11 @@ TEST(SamplingHeapProfilerRateAgnosticEstimates) {
CHECK(profile);
const char* path_to_foo[] = {"", "foo"};
- auto node_foo = FindAllocationProfileNode(env->GetIsolate(), *profile,
+ auto node_foo = FindAllocationProfileNode(env->GetIsolate(), profile.get(),
ArrayVector(path_to_foo));
CHECK(node_foo);
const char* path_to_bar[] = {"", "foo", "bar"};
- auto node_bar = FindAllocationProfileNode(env->GetIsolate(), *profile,
+ auto node_bar = FindAllocationProfileNode(env->GetIsolate(), profile.get(),
ArrayVector(path_to_bar));
CHECK(node_bar);
@@ -3761,11 +3761,11 @@ TEST(SamplingHeapProfilerRateAgnosticEstimates) {
CHECK(profile);
const char* path_to_foo[] = {"", "foo"};
- auto node_foo = FindAllocationProfileNode(env->GetIsolate(), *profile,
+ auto node_foo = FindAllocationProfileNode(env->GetIsolate(), profile.get(),
ArrayVector(path_to_foo));
CHECK(node_foo);
const char* path_to_bar[] = {"", "foo", "bar"};
- auto node_bar = FindAllocationProfileNode(env->GetIsolate(), *profile,
+ auto node_bar = FindAllocationProfileNode(env->GetIsolate(), profile.get(),
ArrayVector(path_to_bar));
CHECK(node_bar);
@@ -3804,7 +3804,7 @@ TEST(SamplingHeapProfilerApiAllocation) {
heap_profiler->GetAllocationProfile());
CHECK(profile);
const char* names[] = {"(V8 API)"};
- auto node = FindAllocationProfileNode(env->GetIsolate(), *profile,
+ auto node = FindAllocationProfileNode(env->GetIsolate(), profile.get(),
ArrayVector(names));
CHECK(node);
@@ -3944,7 +3944,7 @@ TEST(SamplingHeapProfilerPretenuredInlineAllocations) {
heap_profiler->StopSamplingHeapProfiler();
const char* names[] = {"f"};
- auto node_f = FindAllocationProfileNode(env->GetIsolate(), *profile,
+ auto node_f = FindAllocationProfileNode(env->GetIsolate(), profile.get(),
ArrayVector(names));
CHECK(node_f);
@@ -3974,7 +3974,7 @@ TEST(SamplingHeapProfilerLargeInterval) {
heap_profiler->GetAllocationProfile());
CHECK(profile);
const char* names[] = {"(EXTERNAL)"};
- auto node = FindAllocationProfileNode(env->GetIsolate(), *profile,
+ auto node = FindAllocationProfileNode(env->GetIsolate(), profile.get(),
ArrayVector(names));
CHECK(node);
diff --git a/deps/v8/test/cctest/test-inobject-slack-tracking.cc b/deps/v8/test/cctest/test-inobject-slack-tracking.cc
index 6a25536dd5..e2de4df4fb 100644
--- a/deps/v8/test/cctest/test-inobject-slack-tracking.cc
+++ b/deps/v8/test/cctest/test-inobject-slack-tracking.cc
@@ -1112,7 +1112,7 @@ TEST(SubclassRegExpBuiltin) {
v8::HandleScope scope(CcTest::isolate());
const int first_field = 1;
- TestSubclassBuiltin("A1", JS_REGEXP_TYPE, "RegExp", "'o(..)h', 'g'",
+ TestSubclassBuiltin("A1", JS_REG_EXP_TYPE, "RegExp", "'o(..)h', 'g'",
first_field);
}
diff --git a/deps/v8/test/cctest/test-lockers.cc b/deps/v8/test/cctest/test-lockers.cc
index 092c107841..4ed00e0a11 100644
--- a/deps/v8/test/cctest/test-lockers.cc
+++ b/deps/v8/test/cctest/test-lockers.cc
@@ -944,7 +944,7 @@ TEST(ExtensionsRegistration) {
"test4", "test5", "test6", "test7"};
for (const char* name : extension_names) {
v8::RegisterExtension(
- v8::base::make_unique<v8::Extension>(name, kSimpleExtensionSource));
+ std::make_unique<v8::Extension>(name, kSimpleExtensionSource));
}
std::vector<JoinableThread*> threads;
threads.reserve(kNThreads);
diff --git a/deps/v8/test/cctest/test-macro-assembler-x64.cc b/deps/v8/test/cctest/test-macro-assembler-x64.cc
index 1344c0e9d1..e238c8c021 100644
--- a/deps/v8/test/cctest/test-macro-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-x64.cc
@@ -242,37 +242,37 @@ TEST(SmiTag) {
__ movq(rax, Immediate(1)); // Test number.
__ movq(rcx, Immediate(0));
- __ SmiTag(rcx, rcx);
+ __ SmiTag(rcx);
__ Set(rdx, Smi::kZero.ptr());
- __ cmpq(rcx, rdx);
+ __ cmp_tagged(rcx, rdx);
__ j(not_equal, &exit);
__ movq(rax, Immediate(2)); // Test number.
__ movq(rcx, Immediate(1024));
- __ SmiTag(rcx, rcx);
+ __ SmiTag(rcx);
__ Set(rdx, Smi::FromInt(1024).ptr());
- __ cmpq(rcx, rdx);
+ __ cmp_tagged(rcx, rdx);
__ j(not_equal, &exit);
__ movq(rax, Immediate(3)); // Test number.
__ movq(rcx, Immediate(-1));
- __ SmiTag(rcx, rcx);
+ __ SmiTag(rcx);
__ Set(rdx, Smi::FromInt(-1).ptr());
- __ cmpq(rcx, rdx);
+ __ cmp_tagged(rcx, rdx);
__ j(not_equal, &exit);
__ movq(rax, Immediate(4)); // Test number.
__ movq(rcx, Immediate(Smi::kMaxValue));
- __ SmiTag(rcx, rcx);
+ __ SmiTag(rcx);
__ Set(rdx, Smi::FromInt(Smi::kMaxValue).ptr());
- __ cmpq(rcx, rdx);
+ __ cmp_tagged(rcx, rdx);
__ j(not_equal, &exit);
__ movq(rax, Immediate(5)); // Test number.
__ movq(rcx, Immediate(Smi::kMinValue));
- __ SmiTag(rcx, rcx);
+ __ SmiTag(rcx);
__ Set(rdx, Smi::FromInt(Smi::kMinValue).ptr());
- __ cmpq(rcx, rdx);
+ __ cmp_tagged(rcx, rdx);
__ j(not_equal, &exit);
// Different target register.
@@ -281,35 +281,35 @@ TEST(SmiTag) {
__ movq(rcx, Immediate(0));
__ SmiTag(r8, rcx);
__ Set(rdx, Smi::zero().ptr());
- __ cmpq(r8, rdx);
+ __ cmp_tagged(r8, rdx);
__ j(not_equal, &exit);
__ movq(rax, Immediate(7)); // Test number.
__ movq(rcx, Immediate(1024));
__ SmiTag(r8, rcx);
__ Set(rdx, Smi::FromInt(1024).ptr());
- __ cmpq(r8, rdx);
+ __ cmp_tagged(r8, rdx);
__ j(not_equal, &exit);
__ movq(rax, Immediate(8)); // Test number.
__ movq(rcx, Immediate(-1));
__ SmiTag(r8, rcx);
__ Set(rdx, Smi::FromInt(-1).ptr());
- __ cmpq(r8, rdx);
+ __ cmp_tagged(r8, rdx);
__ j(not_equal, &exit);
__ movq(rax, Immediate(9)); // Test number.
__ movq(rcx, Immediate(Smi::kMaxValue));
__ SmiTag(r8, rcx);
__ Set(rdx, Smi::FromInt(Smi::kMaxValue).ptr());
- __ cmpq(r8, rdx);
+ __ cmp_tagged(r8, rdx);
__ j(not_equal, &exit);
__ movq(rax, Immediate(10)); // Test number.
__ movq(rcx, Immediate(Smi::kMinValue));
__ SmiTag(r8, rcx);
__ Set(rdx, Smi::FromInt(Smi::kMinValue).ptr());
- __ cmpq(r8, rdx);
+ __ cmp_tagged(r8, rdx);
__ j(not_equal, &exit);
@@ -344,7 +344,7 @@ TEST(SmiCheck) {
// CheckSmi
__ movl(rcx, Immediate(0));
- __ SmiTag(rcx, rcx);
+ __ SmiTag(rcx);
cond = masm->CheckSmi(rcx);
__ j(NegateCondition(cond), &exit);
@@ -355,7 +355,7 @@ TEST(SmiCheck) {
__ incq(rax);
__ movl(rcx, Immediate(-1));
- __ SmiTag(rcx, rcx);
+ __ SmiTag(rcx);
cond = masm->CheckSmi(rcx);
__ j(NegateCondition(cond), &exit);
@@ -366,7 +366,7 @@ TEST(SmiCheck) {
__ incq(rax);
__ movl(rcx, Immediate(Smi::kMaxValue));
- __ SmiTag(rcx, rcx);
+ __ SmiTag(rcx);
cond = masm->CheckSmi(rcx);
__ j(NegateCondition(cond), &exit);
@@ -377,7 +377,7 @@ TEST(SmiCheck) {
__ incq(rax);
__ movl(rcx, Immediate(Smi::kMinValue));
- __ SmiTag(rcx, rcx);
+ __ SmiTag(rcx);
cond = masm->CheckSmi(rcx);
__ j(NegateCondition(cond), &exit);
diff --git a/deps/v8/test/cctest/test-modules.cc b/deps/v8/test/cctest/test-modules.cc
index 0f2bfd2a5f..d7cb6e610e 100644
--- a/deps/v8/test/cctest/test-modules.cc
+++ b/deps/v8/test/cctest/test-modules.cc
@@ -14,6 +14,7 @@ using v8::Isolate;
using v8::Local;
using v8::MaybeLocal;
using v8::Module;
+using v8::Promise;
using v8::ScriptCompiler;
using v8::ScriptOrigin;
using v8::String;
@@ -196,99 +197,480 @@ static MaybeLocal<Module> CompileSpecifierAsModuleResolveCallback(
}
TEST(ModuleEvaluation) {
- Isolate* isolate = CcTest::isolate();
- HandleScope scope(isolate);
- LocalContext env;
- v8::TryCatch try_catch(isolate);
+ bool prev_top_level_await = i::FLAG_harmony_top_level_await;
+ for (auto top_level_await : {true, false}) {
+ i::FLAG_harmony_top_level_await = top_level_await;
- Local<String> source_text = v8_str(
- "import 'Object.expando = 5';"
- "import 'Object.expando *= 2';");
- ScriptOrigin origin = ModuleOrigin(v8_str("file.js"), CcTest::isolate());
- ScriptCompiler::Source source(source_text, origin);
- Local<Module> module =
- ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
- CHECK_EQ(Module::kUninstantiated, module->GetStatus());
- CHECK(module
- ->InstantiateModule(env.local(),
- CompileSpecifierAsModuleResolveCallback)
- .FromJust());
- CHECK_EQ(Module::kInstantiated, module->GetStatus());
- CHECK(!module->Evaluate(env.local()).IsEmpty());
- CHECK_EQ(Module::kEvaluated, module->GetStatus());
- ExpectInt32("Object.expando", 10);
+ Isolate* isolate = CcTest::isolate();
+ HandleScope scope(isolate);
+ LocalContext env;
+ v8::TryCatch try_catch(isolate);
- CHECK(!try_catch.HasCaught());
+ Local<String> source_text = v8_str(
+ "import 'Object.expando = 5';"
+ "import 'Object.expando *= 2';");
+ ScriptOrigin origin = ModuleOrigin(v8_str("file.js"), CcTest::isolate());
+ ScriptCompiler::Source source(source_text, origin);
+ Local<Module> module =
+ ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
+ CHECK_EQ(Module::kUninstantiated, module->GetStatus());
+ CHECK(module
+ ->InstantiateModule(env.local(),
+ CompileSpecifierAsModuleResolveCallback)
+ .FromJust());
+ CHECK_EQ(Module::kInstantiated, module->GetStatus());
+
+ MaybeLocal<Value> result = module->Evaluate(env.local());
+ CHECK_EQ(Module::kEvaluated, module->GetStatus());
+ if (i::FLAG_harmony_top_level_await) {
+ Local<Promise> promise = Local<Promise>::Cast(result.ToLocalChecked());
+ CHECK_EQ(promise->State(), v8::Promise::kFulfilled);
+ CHECK(promise->Result()->IsUndefined());
+ } else {
+ CHECK(!result.IsEmpty());
+ ExpectInt32("Object.expando", 10);
+ }
+ CHECK(!try_catch.HasCaught());
+ }
+ i::FLAG_harmony_top_level_await = prev_top_level_await;
}
-TEST(ModuleEvaluationError) {
- Isolate* isolate = CcTest::isolate();
- HandleScope scope(isolate);
- LocalContext env;
- v8::TryCatch try_catch(isolate);
+TEST(ModuleEvaluationError1) {
+ bool prev_top_level_await = i::FLAG_harmony_top_level_await;
+ for (auto top_level_await : {true, false}) {
+ i::FLAG_harmony_top_level_await = top_level_await;
- Local<String> source_text =
- v8_str("Object.x = (Object.x || 0) + 1; throw 'boom';");
- ScriptOrigin origin = ModuleOrigin(v8_str("file.js"), CcTest::isolate());
- ScriptCompiler::Source source(source_text, origin);
- Local<Module> module =
- ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
- CHECK_EQ(Module::kUninstantiated, module->GetStatus());
- CHECK(module
- ->InstantiateModule(env.local(),
- CompileSpecifierAsModuleResolveCallback)
- .FromJust());
- CHECK_EQ(Module::kInstantiated, module->GetStatus());
+ Isolate* isolate = CcTest::isolate();
+ HandleScope scope(isolate);
+ LocalContext env;
+ v8::TryCatch try_catch(isolate);
- {
- v8::TryCatch inner_try_catch(isolate);
- CHECK(module->Evaluate(env.local()).IsEmpty());
- CHECK(inner_try_catch.HasCaught());
- CHECK(inner_try_catch.Exception()->StrictEquals(v8_str("boom")));
- CHECK_EQ(Module::kErrored, module->GetStatus());
- Local<Value> exception = module->GetException();
- CHECK(exception->StrictEquals(v8_str("boom")));
- ExpectInt32("Object.x", 1);
+ Local<String> source_text =
+ v8_str("Object.x = (Object.x || 0) + 1; throw 'boom';");
+ ScriptOrigin origin = ModuleOrigin(v8_str("file.js"), CcTest::isolate());
+ ScriptCompiler::Source source(source_text, origin);
+ Local<Module> module =
+ ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
+ CHECK_EQ(Module::kUninstantiated, module->GetStatus());
+ CHECK(module
+ ->InstantiateModule(env.local(),
+ CompileSpecifierAsModuleResolveCallback)
+ .FromJust());
+ CHECK_EQ(Module::kInstantiated, module->GetStatus());
+
+ MaybeLocal<Value> result_1;
+ {
+ v8::TryCatch inner_try_catch(isolate);
+ result_1 = module->Evaluate(env.local());
+ CHECK_EQ(Module::kErrored, module->GetStatus());
+ Local<Value> exception = module->GetException();
+ CHECK(exception->StrictEquals(v8_str("boom")));
+ ExpectInt32("Object.x", 1);
+ CHECK(inner_try_catch.HasCaught());
+ CHECK(inner_try_catch.Exception()->StrictEquals(v8_str("boom")));
+ }
+
+ MaybeLocal<Value> result_2;
+ {
+ v8::TryCatch inner_try_catch(isolate);
+ result_2 = module->Evaluate(env.local());
+ CHECK_EQ(Module::kErrored, module->GetStatus());
+ Local<Value> exception = module->GetException();
+ CHECK(exception->StrictEquals(v8_str("boom")));
+ ExpectInt32("Object.x", 1);
+
+ if (i::FLAG_harmony_top_level_await) {
+ // With top level await we do not rethrow the exception.
+ CHECK(!inner_try_catch.HasCaught());
+ } else {
+ CHECK(inner_try_catch.HasCaught());
+ CHECK(inner_try_catch.Exception()->StrictEquals(v8_str("boom")));
+ }
+ }
+ if (i::FLAG_harmony_top_level_await) {
+ // With top level await, errored evaluation returns a rejected promise
+ // with the exception.
+ Local<Promise> promise_1 =
+ Local<Promise>::Cast(result_1.ToLocalChecked());
+ Local<Promise> promise_2 =
+ Local<Promise>::Cast(result_2.ToLocalChecked());
+ CHECK_EQ(promise_1->State(), v8::Promise::kRejected);
+ CHECK_EQ(promise_2->State(), v8::Promise::kRejected);
+ CHECK_EQ(promise_1->Result(), module->GetException());
+ CHECK_EQ(promise_2->Result(), module->GetException());
+ } else {
+ CHECK(result_1.IsEmpty() && result_2.IsEmpty());
+ }
+
+ CHECK(!try_catch.HasCaught());
}
+ i::FLAG_harmony_top_level_await = prev_top_level_await;
+}
- {
- v8::TryCatch inner_try_catch(isolate);
- CHECK(module->Evaluate(env.local()).IsEmpty());
- CHECK(inner_try_catch.HasCaught());
- CHECK(inner_try_catch.Exception()->StrictEquals(v8_str("boom")));
- CHECK_EQ(Module::kErrored, module->GetStatus());
- Local<Value> exception = module->GetException();
- CHECK(exception->StrictEquals(v8_str("boom")));
- ExpectInt32("Object.x", 1);
+static Local<Module> failure_module;
+static Local<Module> dependent_module;
+MaybeLocal<Module> ResolveCallbackForModuleEvaluationError2(
+ Local<Context> context, Local<String> specifier, Local<Module> referrer) {
+ if (specifier->StrictEquals(v8_str("./failure.js"))) {
+ return failure_module;
+ } else {
+ CHECK(specifier->StrictEquals(v8_str("./dependent.js")));
+ return dependent_module;
}
+}
- CHECK(!try_catch.HasCaught());
+TEST(ModuleEvaluationError2) {
+ bool prev_top_level_await = i::FLAG_harmony_top_level_await;
+ for (auto top_level_await : {true, false}) {
+ i::FLAG_harmony_top_level_await = top_level_await;
+
+ Isolate* isolate = CcTest::isolate();
+ HandleScope scope(isolate);
+ LocalContext env;
+ v8::TryCatch try_catch(isolate);
+
+ Local<String> failure_text = v8_str("throw 'boom';");
+ ScriptOrigin failure_origin =
+ ModuleOrigin(v8_str("failure.js"), CcTest::isolate());
+ ScriptCompiler::Source failure_source(failure_text, failure_origin);
+ failure_module = ScriptCompiler::CompileModule(isolate, &failure_source)
+ .ToLocalChecked();
+ CHECK_EQ(Module::kUninstantiated, failure_module->GetStatus());
+ CHECK(failure_module
+ ->InstantiateModule(env.local(),
+ ResolveCallbackForModuleEvaluationError2)
+ .FromJust());
+ CHECK_EQ(Module::kInstantiated, failure_module->GetStatus());
+
+ MaybeLocal<Value> result_1;
+ {
+ v8::TryCatch inner_try_catch(isolate);
+ result_1 = failure_module->Evaluate(env.local());
+ CHECK_EQ(Module::kErrored, failure_module->GetStatus());
+ Local<Value> exception = failure_module->GetException();
+ CHECK(exception->StrictEquals(v8_str("boom")));
+ CHECK(inner_try_catch.HasCaught());
+ CHECK(inner_try_catch.Exception()->StrictEquals(v8_str("boom")));
+ }
+
+ Local<String> dependent_text =
+ v8_str("import './failure.js'; export const c = 123;");
+ ScriptOrigin dependent_origin =
+ ModuleOrigin(v8_str("dependent.js"), CcTest::isolate());
+ ScriptCompiler::Source dependent_source(dependent_text, dependent_origin);
+ dependent_module = ScriptCompiler::CompileModule(isolate, &dependent_source)
+ .ToLocalChecked();
+ CHECK_EQ(Module::kUninstantiated, dependent_module->GetStatus());
+ CHECK(dependent_module
+ ->InstantiateModule(env.local(),
+ ResolveCallbackForModuleEvaluationError2)
+ .FromJust());
+ CHECK_EQ(Module::kInstantiated, dependent_module->GetStatus());
+
+ MaybeLocal<Value> result_2;
+ {
+ v8::TryCatch inner_try_catch(isolate);
+ result_2 = dependent_module->Evaluate(env.local());
+ CHECK_EQ(Module::kErrored, dependent_module->GetStatus());
+ Local<Value> exception = dependent_module->GetException();
+ CHECK(exception->StrictEquals(v8_str("boom")));
+ CHECK_EQ(exception, failure_module->GetException());
+
+ if (i::FLAG_harmony_top_level_await) {
+ // With top level await we do not rethrow the exception.
+ CHECK(!inner_try_catch.HasCaught());
+ } else {
+ CHECK(inner_try_catch.HasCaught());
+ CHECK(inner_try_catch.Exception()->StrictEquals(v8_str("boom")));
+ }
+ }
+
+ if (i::FLAG_harmony_top_level_await) {
+ // With top level await, errored evaluation returns a rejected promise
+ // with the exception.
+ Local<Promise> promise_1 =
+ Local<Promise>::Cast(result_1.ToLocalChecked());
+ Local<Promise> promise_2 =
+ Local<Promise>::Cast(result_2.ToLocalChecked());
+ CHECK_EQ(promise_1->State(), v8::Promise::kRejected);
+ CHECK_EQ(promise_2->State(), v8::Promise::kRejected);
+ CHECK_EQ(promise_1->Result(), failure_module->GetException());
+ CHECK_EQ(promise_2->Result(), failure_module->GetException());
+ } else {
+ CHECK(result_1.IsEmpty() && result_2.IsEmpty());
+ }
+
+ CHECK(!try_catch.HasCaught());
+ }
+ i::FLAG_harmony_top_level_await = prev_top_level_await;
}
TEST(ModuleEvaluationCompletion1) {
+ bool prev_top_level_await = i::FLAG_harmony_top_level_await;
+ for (auto top_level_await : {true, false}) {
+ i::FLAG_harmony_top_level_await = top_level_await;
+
+ Isolate* isolate = CcTest::isolate();
+ HandleScope scope(isolate);
+ LocalContext env;
+ v8::TryCatch try_catch(isolate);
+
+ const char* sources[] = {
+ "",
+ "var a = 1",
+ "import '42'",
+ "export * from '42'",
+ "export {} from '42'",
+ "export {}",
+ "var a = 1; export {a}",
+ "export function foo() {}",
+ "export class C extends null {}",
+ "export let a = 1",
+ "export default 1",
+ "export default function foo() {}",
+ "export default function () {}",
+ "export default (function () {})",
+ "export default class C extends null {}",
+ "export default (class C extends null {})",
+ "for (var i = 0; i < 5; ++i) {}",
+ };
+
+ for (auto src : sources) {
+ Local<String> source_text = v8_str(src);
+ ScriptOrigin origin = ModuleOrigin(v8_str("file.js"), CcTest::isolate());
+ ScriptCompiler::Source source(source_text, origin);
+ Local<Module> module =
+ ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
+ CHECK_EQ(Module::kUninstantiated, module->GetStatus());
+ CHECK(module
+ ->InstantiateModule(env.local(),
+ CompileSpecifierAsModuleResolveCallback)
+ .FromJust());
+ CHECK_EQ(Module::kInstantiated, module->GetStatus());
+
+ // Evaluate twice.
+ Local<Value> result_1 = module->Evaluate(env.local()).ToLocalChecked();
+ CHECK_EQ(Module::kEvaluated, module->GetStatus());
+ Local<Value> result_2 = module->Evaluate(env.local()).ToLocalChecked();
+ CHECK_EQ(Module::kEvaluated, module->GetStatus());
+
+ if (i::FLAG_harmony_top_level_await) {
+ Local<Promise> promise = Local<Promise>::Cast(result_1);
+ CHECK_EQ(promise->State(), v8::Promise::kFulfilled);
+ CHECK(promise->Result()->IsUndefined());
+
+ // Second evaluation should return the same promise.
+ Local<Promise> promise_too = Local<Promise>::Cast(result_2);
+ CHECK_EQ(promise, promise_too);
+ CHECK_EQ(promise_too->State(), v8::Promise::kFulfilled);
+ CHECK(promise_too->Result()->IsUndefined());
+ } else {
+ CHECK(result_1->IsUndefined());
+ CHECK(result_2->IsUndefined());
+ }
+ }
+ CHECK(!try_catch.HasCaught());
+ }
+ i::FLAG_harmony_top_level_await = prev_top_level_await;
+}
+
+TEST(ModuleEvaluationCompletion2) {
+ bool prev_top_level_await = i::FLAG_harmony_top_level_await;
+ for (auto top_level_await : {true, false}) {
+ i::FLAG_harmony_top_level_await = top_level_await;
+
+ Isolate* isolate = CcTest::isolate();
+ HandleScope scope(isolate);
+ LocalContext env;
+ v8::TryCatch try_catch(isolate);
+
+ const char* sources[] = {
+ "'gaga'; ",
+ "'gaga'; var a = 1",
+ "'gaga'; import '42'",
+ "'gaga'; export * from '42'",
+ "'gaga'; export {} from '42'",
+ "'gaga'; export {}",
+ "'gaga'; var a = 1; export {a}",
+ "'gaga'; export function foo() {}",
+ "'gaga'; export class C extends null {}",
+ "'gaga'; export let a = 1",
+ "'gaga'; export default 1",
+ "'gaga'; export default function foo() {}",
+ "'gaga'; export default function () {}",
+ "'gaga'; export default (function () {})",
+ "'gaga'; export default class C extends null {}",
+ "'gaga'; export default (class C extends null {})",
+ };
+
+ for (auto src : sources) {
+ Local<String> source_text = v8_str(src);
+ ScriptOrigin origin = ModuleOrigin(v8_str("file.js"), CcTest::isolate());
+ ScriptCompiler::Source source(source_text, origin);
+ Local<Module> module =
+ ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
+ CHECK_EQ(Module::kUninstantiated, module->GetStatus());
+ CHECK(module
+ ->InstantiateModule(env.local(),
+ CompileSpecifierAsModuleResolveCallback)
+ .FromJust());
+ CHECK_EQ(Module::kInstantiated, module->GetStatus());
+
+ Local<Value> result_1 = module->Evaluate(env.local()).ToLocalChecked();
+ CHECK_EQ(Module::kEvaluated, module->GetStatus());
+
+ Local<Value> result_2 = module->Evaluate(env.local()).ToLocalChecked();
+ CHECK_EQ(Module::kEvaluated, module->GetStatus());
+ if (i::FLAG_harmony_top_level_await) {
+ Local<Promise> promise = Local<Promise>::Cast(result_1);
+ CHECK_EQ(promise->State(), v8::Promise::kFulfilled);
+ CHECK(promise->Result()->IsUndefined());
+
+ // Second Evaluation should return the same promise.
+ Local<Promise> promise_too = Local<Promise>::Cast(result_2);
+ CHECK_EQ(promise, promise_too);
+ CHECK_EQ(promise_too->State(), v8::Promise::kFulfilled);
+ CHECK(promise_too->Result()->IsUndefined());
+ } else {
+ CHECK(result_1->StrictEquals(v8_str("gaga")));
+ CHECK(result_2->IsUndefined());
+ }
+ }
+ CHECK(!try_catch.HasCaught());
+ }
+ i::FLAG_harmony_top_level_await = prev_top_level_await;
+}
+
+TEST(ModuleNamespace) {
+ bool prev_top_level_await = i::FLAG_harmony_top_level_await;
+ for (auto top_level_await : {true, false}) {
+ i::FLAG_harmony_top_level_await = top_level_await;
+
+ Isolate* isolate = CcTest::isolate();
+ HandleScope scope(isolate);
+ LocalContext env;
+ v8::TryCatch try_catch(isolate);
+
+ Local<v8::Object> ReferenceError =
+ CompileRun("ReferenceError")->ToObject(env.local()).ToLocalChecked();
+
+ Local<String> source_text = v8_str(
+ "import {a, b} from 'export var a = 1; export let b = 2';"
+ "export function geta() {return a};"
+ "export function getb() {return b};"
+ "export let radio = 3;"
+ "export var gaga = 4;");
+ ScriptOrigin origin = ModuleOrigin(v8_str("file.js"), CcTest::isolate());
+ ScriptCompiler::Source source(source_text, origin);
+ Local<Module> module =
+ ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
+ CHECK_EQ(Module::kUninstantiated, module->GetStatus());
+ CHECK(module
+ ->InstantiateModule(env.local(),
+ CompileSpecifierAsModuleResolveCallback)
+ .FromJust());
+ CHECK_EQ(Module::kInstantiated, module->GetStatus());
+ Local<Value> ns = module->GetModuleNamespace();
+ CHECK_EQ(Module::kInstantiated, module->GetStatus());
+ Local<v8::Object> nsobj = ns->ToObject(env.local()).ToLocalChecked();
+ CHECK_EQ(nsobj->CreationContext(), env.local());
+
+ // a, b
+ CHECK(nsobj->Get(env.local(), v8_str("a")).ToLocalChecked()->IsUndefined());
+ CHECK(nsobj->Get(env.local(), v8_str("b")).ToLocalChecked()->IsUndefined());
+
+ // geta
+ {
+ auto geta = nsobj->Get(env.local(), v8_str("geta")).ToLocalChecked();
+ auto a = geta.As<v8::Function>()
+ ->Call(env.local(), geta, 0, nullptr)
+ .ToLocalChecked();
+ CHECK(a->IsUndefined());
+ }
+
+ // getb
+ {
+ v8::TryCatch inner_try_catch(isolate);
+ auto getb = nsobj->Get(env.local(), v8_str("getb")).ToLocalChecked();
+ CHECK(getb.As<v8::Function>()
+ ->Call(env.local(), getb, 0, nullptr)
+ .IsEmpty());
+ CHECK(inner_try_catch.HasCaught());
+ CHECK(inner_try_catch.Exception()
+ ->InstanceOf(env.local(), ReferenceError)
+ .FromJust());
+ }
+
+ // radio
+ {
+ v8::TryCatch inner_try_catch(isolate);
+ // https://bugs.chromium.org/p/v8/issues/detail?id=7235
+ // CHECK(nsobj->Get(env.local(), v8_str("radio")).IsEmpty());
+ CHECK(nsobj->Get(env.local(), v8_str("radio"))
+ .ToLocalChecked()
+ ->IsUndefined());
+ CHECK(inner_try_catch.HasCaught());
+ CHECK(inner_try_catch.Exception()
+ ->InstanceOf(env.local(), ReferenceError)
+ .FromJust());
+ }
+
+ // gaga
+ {
+ auto gaga = nsobj->Get(env.local(), v8_str("gaga")).ToLocalChecked();
+ CHECK(gaga->IsUndefined());
+ }
+
+ CHECK(!try_catch.HasCaught());
+ CHECK_EQ(Module::kInstantiated, module->GetStatus());
+ module->Evaluate(env.local()).ToLocalChecked();
+ CHECK_EQ(Module::kEvaluated, module->GetStatus());
+
+ // geta
+ {
+ auto geta = nsobj->Get(env.local(), v8_str("geta")).ToLocalChecked();
+ auto a = geta.As<v8::Function>()
+ ->Call(env.local(), geta, 0, nullptr)
+ .ToLocalChecked();
+ CHECK_EQ(1, a->Int32Value(env.local()).FromJust());
+ }
+
+ // getb
+ {
+ auto getb = nsobj->Get(env.local(), v8_str("getb")).ToLocalChecked();
+ auto b = getb.As<v8::Function>()
+ ->Call(env.local(), getb, 0, nullptr)
+ .ToLocalChecked();
+ CHECK_EQ(2, b->Int32Value(env.local()).FromJust());
+ }
+
+ // radio
+ {
+ auto radio = nsobj->Get(env.local(), v8_str("radio")).ToLocalChecked();
+ CHECK_EQ(3, radio->Int32Value(env.local()).FromJust());
+ }
+
+ // gaga
+ {
+ auto gaga = nsobj->Get(env.local(), v8_str("gaga")).ToLocalChecked();
+ CHECK_EQ(4, gaga->Int32Value(env.local()).FromJust());
+ }
+ CHECK(!try_catch.HasCaught());
+ }
+ i::FLAG_harmony_top_level_await = prev_top_level_await;
+}
+
+TEST(ModuleEvaluationTopLevelAwait) {
+ bool previous_top_level_await_flag_value = i::FLAG_harmony_top_level_await;
+ i::FLAG_harmony_top_level_await = true;
Isolate* isolate = CcTest::isolate();
HandleScope scope(isolate);
LocalContext env;
v8::TryCatch try_catch(isolate);
-
const char* sources[] = {
- "",
- "var a = 1",
- "import '42'",
- "export * from '42'",
- "export {} from '42'",
- "export {}",
- "var a = 1; export {a}",
- "export function foo() {}",
- "export class C extends null {}",
- "export let a = 1",
- "export default 1",
- "export default function foo() {}",
- "export default function () {}",
- "export default (function () {})",
- "export default class C extends null {}",
- "export default (class C extends null {})",
- "for (var i = 0; i < 5; ++i) {}",
+ "await 42",
+ "import 'await 42';",
+ "import '42'; import 'await 42';",
};
for (auto src : sources) {
@@ -303,41 +685,30 @@ TEST(ModuleEvaluationCompletion1) {
CompileSpecifierAsModuleResolveCallback)
.FromJust());
CHECK_EQ(Module::kInstantiated, module->GetStatus());
- CHECK(module->Evaluate(env.local()).ToLocalChecked()->IsUndefined());
- CHECK_EQ(Module::kEvaluated, module->GetStatus());
- CHECK(module->Evaluate(env.local()).ToLocalChecked()->IsUndefined());
+ Local<Promise> promise =
+ Local<Promise>::Cast(module->Evaluate(env.local()).ToLocalChecked());
CHECK_EQ(Module::kEvaluated, module->GetStatus());
+ CHECK_EQ(promise->State(), v8::Promise::kFulfilled);
+ CHECK(promise->Result()->IsUndefined());
+ CHECK(!try_catch.HasCaught());
}
-
- CHECK(!try_catch.HasCaught());
+ i::FLAG_harmony_top_level_await = previous_top_level_await_flag_value;
}
-TEST(ModuleEvaluationCompletion2) {
+TEST(ModuleEvaluationTopLevelAwaitError) {
+ bool previous_top_level_await_flag_value = i::FLAG_harmony_top_level_await;
+ i::FLAG_harmony_top_level_await = true;
Isolate* isolate = CcTest::isolate();
HandleScope scope(isolate);
LocalContext env;
- v8::TryCatch try_catch(isolate);
-
const char* sources[] = {
- "'gaga'; ",
- "'gaga'; var a = 1",
- "'gaga'; import '42'",
- "'gaga'; export * from '42'",
- "'gaga'; export {} from '42'",
- "'gaga'; export {}",
- "'gaga'; var a = 1; export {a}",
- "'gaga'; export function foo() {}",
- "'gaga'; export class C extends null {}",
- "'gaga'; export let a = 1",
- "'gaga'; export default 1",
- "'gaga'; export default function foo() {}",
- "'gaga'; export default function () {}",
- "'gaga'; export default (function () {})",
- "'gaga'; export default class C extends null {}",
- "'gaga'; export default (class C extends null {})",
+ "await 42; throw 'boom';",
+ "import 'await 42; throw \"boom\";';",
+ "import '42'; import 'await 42; throw \"boom\";';",
};
for (auto src : sources) {
+ v8::TryCatch try_catch(isolate);
Local<String> source_text = v8_str(src);
ScriptOrigin origin = ModuleOrigin(v8_str("file.js"), CcTest::isolate());
ScriptCompiler::Source source(source_text, origin);
@@ -349,126 +720,170 @@ TEST(ModuleEvaluationCompletion2) {
CompileSpecifierAsModuleResolveCallback)
.FromJust());
CHECK_EQ(Module::kInstantiated, module->GetStatus());
- CHECK(module->Evaluate(env.local())
- .ToLocalChecked()
- ->StrictEquals(v8_str("gaga")));
- CHECK_EQ(Module::kEvaluated, module->GetStatus());
- CHECK(module->Evaluate(env.local()).ToLocalChecked()->IsUndefined());
- CHECK_EQ(Module::kEvaluated, module->GetStatus());
+ Local<Promise> promise =
+ Local<Promise>::Cast(module->Evaluate(env.local()).ToLocalChecked());
+ CHECK_EQ(Module::kErrored, module->GetStatus());
+ CHECK_EQ(promise->State(), v8::Promise::kRejected);
+ CHECK(promise->Result()->StrictEquals(v8_str("boom")));
+ CHECK(module->GetException()->StrictEquals(v8_str("boom")));
+
+ // TODO(joshualitt) I am not sure, but this might not be supposed to throw
+ // because it is async.
+ CHECK(!try_catch.HasCaught());
}
+ i::FLAG_harmony_top_level_await = previous_top_level_await_flag_value;
+}
- CHECK(!try_catch.HasCaught());
+namespace {
+struct DynamicImportData {
+ DynamicImportData(Isolate* isolate_, Local<Promise::Resolver> resolver_,
+ Local<Context> context_, bool should_resolve_)
+ : isolate(isolate_), should_resolve(should_resolve_) {
+ resolver.Reset(isolate, resolver_);
+ context.Reset(isolate, context_);
+ }
+
+ Isolate* isolate;
+ v8::Global<Promise::Resolver> resolver;
+ v8::Global<Context> context;
+ bool should_resolve;
+};
+
+void DoHostImportModuleDynamically(void* import_data) {
+ std::unique_ptr<DynamicImportData> import_data_(
+ static_cast<DynamicImportData*>(import_data));
+ Isolate* isolate(import_data_->isolate);
+ HandleScope handle_scope(isolate);
+
+ Local<Promise::Resolver> resolver(import_data_->resolver.Get(isolate));
+ Local<Context> realm(import_data_->context.Get(isolate));
+ Context::Scope context_scope(realm);
+
+ if (import_data_->should_resolve) {
+ resolver->Resolve(realm, True(isolate)).ToChecked();
+ } else {
+ resolver->Reject(realm, v8_str("boom")).ToChecked();
+ }
}
-TEST(ModuleNamespace) {
+v8::MaybeLocal<v8::Promise> HostImportModuleDynamicallyCallbackResolve(
+ Local<Context> context, Local<v8::ScriptOrModule> referrer,
+ Local<String> specifier) {
+ Isolate* isolate = context->GetIsolate();
+ Local<v8::Promise::Resolver> resolver =
+ v8::Promise::Resolver::New(context).ToLocalChecked();
+
+ DynamicImportData* data =
+ new DynamicImportData(isolate, resolver, context, true);
+ isolate->EnqueueMicrotask(DoHostImportModuleDynamically, data);
+ return resolver->GetPromise();
+}
+
+v8::MaybeLocal<v8::Promise> HostImportModuleDynamicallyCallbackReject(
+ Local<Context> context, Local<v8::ScriptOrModule> referrer,
+ Local<String> specifier) {
+ Isolate* isolate = context->GetIsolate();
+ Local<v8::Promise::Resolver> resolver =
+ v8::Promise::Resolver::New(context).ToLocalChecked();
+
+ DynamicImportData* data =
+ new DynamicImportData(isolate, resolver, context, false);
+ isolate->EnqueueMicrotask(DoHostImportModuleDynamically, data);
+ return resolver->GetPromise();
+}
+
+} // namespace
+
+TEST(ModuleEvaluationTopLevelAwaitDynamicImport) {
+ bool previous_top_level_await_flag_value = i::FLAG_harmony_top_level_await;
+ bool previous_dynamic_import_flag_value = i::FLAG_harmony_dynamic_import;
+ i::FLAG_harmony_top_level_await = true;
+ i::FLAG_harmony_dynamic_import = true;
Isolate* isolate = CcTest::isolate();
HandleScope scope(isolate);
+ isolate->SetMicrotasksPolicy(v8::MicrotasksPolicy::kExplicit);
+ isolate->SetHostImportModuleDynamicallyCallback(
+ HostImportModuleDynamicallyCallbackResolve);
LocalContext env;
v8::TryCatch try_catch(isolate);
+ const char* sources[] = {
+ "await import('foo');",
+ "import 'await import(\"foo\");';",
+ "import '42'; import 'await import(\"foo\");';",
+ };
- Local<v8::Object> ReferenceError =
- CompileRun("ReferenceError")->ToObject(env.local()).ToLocalChecked();
-
- Local<String> source_text = v8_str(
- "import {a, b} from 'export var a = 1; export let b = 2';"
- "export function geta() {return a};"
- "export function getb() {return b};"
- "export let radio = 3;"
- "export var gaga = 4;");
- ScriptOrigin origin = ModuleOrigin(v8_str("file.js"), CcTest::isolate());
- ScriptCompiler::Source source(source_text, origin);
- Local<Module> module =
- ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
- CHECK_EQ(Module::kUninstantiated, module->GetStatus());
- CHECK(module
- ->InstantiateModule(env.local(),
- CompileSpecifierAsModuleResolveCallback)
- .FromJust());
- CHECK_EQ(Module::kInstantiated, module->GetStatus());
- Local<Value> ns = module->GetModuleNamespace();
- CHECK_EQ(Module::kInstantiated, module->GetStatus());
- Local<v8::Object> nsobj = ns->ToObject(env.local()).ToLocalChecked();
-
- // a, b
- CHECK(nsobj->Get(env.local(), v8_str("a")).ToLocalChecked()->IsUndefined());
- CHECK(nsobj->Get(env.local(), v8_str("b")).ToLocalChecked()->IsUndefined());
-
- // geta
- {
- auto geta = nsobj->Get(env.local(), v8_str("geta")).ToLocalChecked();
- auto a = geta.As<v8::Function>()
- ->Call(env.local(), geta, 0, nullptr)
- .ToLocalChecked();
- CHECK(a->IsUndefined());
- }
-
- // getb
- {
- v8::TryCatch inner_try_catch(isolate);
- auto getb = nsobj->Get(env.local(), v8_str("getb")).ToLocalChecked();
- CHECK(
- getb.As<v8::Function>()->Call(env.local(), getb, 0, nullptr).IsEmpty());
- CHECK(inner_try_catch.HasCaught());
- CHECK(inner_try_catch.Exception()
- ->InstanceOf(env.local(), ReferenceError)
+ for (auto src : sources) {
+ Local<String> source_text = v8_str(src);
+ ScriptOrigin origin = ModuleOrigin(v8_str("file.js"), CcTest::isolate());
+ ScriptCompiler::Source source(source_text, origin);
+ Local<Module> module =
+ ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
+ CHECK_EQ(Module::kUninstantiated, module->GetStatus());
+ CHECK(module
+ ->InstantiateModule(env.local(),
+ CompileSpecifierAsModuleResolveCallback)
.FromJust());
- }
+ CHECK_EQ(Module::kInstantiated, module->GetStatus());
- // radio
- {
- v8::TryCatch inner_try_catch(isolate);
- // https://bugs.chromium.org/p/v8/issues/detail?id=7235
- // CHECK(nsobj->Get(env.local(), v8_str("radio")).IsEmpty());
- CHECK(nsobj->Get(env.local(), v8_str("radio"))
- .ToLocalChecked()
- ->IsUndefined());
- CHECK(inner_try_catch.HasCaught());
- CHECK(inner_try_catch.Exception()
- ->InstanceOf(env.local(), ReferenceError)
- .FromJust());
- }
+ Local<Promise> promise =
+ Local<Promise>::Cast(module->Evaluate(env.local()).ToLocalChecked());
+ CHECK_EQ(Module::kEvaluated, module->GetStatus());
+ CHECK_EQ(promise->State(), v8::Promise::kPending);
+ CHECK(!try_catch.HasCaught());
- // gaga
- {
- auto gaga = nsobj->Get(env.local(), v8_str("gaga")).ToLocalChecked();
- CHECK(gaga->IsUndefined());
+ isolate->RunMicrotasks();
+ CHECK_EQ(promise->State(), v8::Promise::kFulfilled);
}
+ i::FLAG_harmony_top_level_await = previous_top_level_await_flag_value;
+ i::FLAG_harmony_dynamic_import = previous_dynamic_import_flag_value;
+}
- CHECK(!try_catch.HasCaught());
- CHECK_EQ(Module::kInstantiated, module->GetStatus());
- module->Evaluate(env.local()).ToLocalChecked();
- CHECK_EQ(Module::kEvaluated, module->GetStatus());
-
- // geta
- {
- auto geta = nsobj->Get(env.local(), v8_str("geta")).ToLocalChecked();
- auto a = geta.As<v8::Function>()
- ->Call(env.local(), geta, 0, nullptr)
- .ToLocalChecked();
- CHECK_EQ(1, a->Int32Value(env.local()).FromJust());
- }
+TEST(ModuleEvaluationTopLevelAwaitDynamicImportError) {
+ bool previous_top_level_await_flag_value = i::FLAG_harmony_top_level_await;
+ bool previous_dynamic_import_flag_value = i::FLAG_harmony_dynamic_import;
+ i::FLAG_harmony_top_level_await = true;
+ i::FLAG_harmony_dynamic_import = true;
+ Isolate* isolate = CcTest::isolate();
+ HandleScope scope(isolate);
+ isolate->SetMicrotasksPolicy(v8::MicrotasksPolicy::kExplicit);
+ isolate->SetHostImportModuleDynamicallyCallback(
+ HostImportModuleDynamicallyCallbackReject);
+ LocalContext env;
+ v8::TryCatch try_catch(isolate);
+ const char* sources[] = {
+ "await import('foo');",
+ "import 'await import(\"foo\");';",
+ "import '42'; import 'await import(\"foo\");';",
+ };
- // getb
- {
- auto getb = nsobj->Get(env.local(), v8_str("getb")).ToLocalChecked();
- auto b = getb.As<v8::Function>()
- ->Call(env.local(), getb, 0, nullptr)
- .ToLocalChecked();
- CHECK_EQ(2, b->Int32Value(env.local()).FromJust());
- }
+ for (auto src : sources) {
+ Local<String> source_text = v8_str(src);
+ ScriptOrigin origin = ModuleOrigin(v8_str("file.js"), CcTest::isolate());
+ ScriptCompiler::Source source(source_text, origin);
+ Local<Module> module =
+ ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
+ CHECK_EQ(Module::kUninstantiated, module->GetStatus());
+ CHECK(module
+ ->InstantiateModule(env.local(),
+ CompileSpecifierAsModuleResolveCallback)
+ .FromJust());
+ CHECK_EQ(Module::kInstantiated, module->GetStatus());
- // radio
- {
- auto radio = nsobj->Get(env.local(), v8_str("radio")).ToLocalChecked();
- CHECK_EQ(3, radio->Int32Value(env.local()).FromJust());
- }
+ Local<Promise> promise =
+ Local<Promise>::Cast(module->Evaluate(env.local()).ToLocalChecked());
+ CHECK_EQ(Module::kEvaluated, module->GetStatus());
+ CHECK_EQ(promise->State(), v8::Promise::kPending);
+ CHECK(!try_catch.HasCaught());
- // gaga
- {
- auto gaga = nsobj->Get(env.local(), v8_str("gaga")).ToLocalChecked();
- CHECK_EQ(4, gaga->Int32Value(env.local()).FromJust());
+ isolate->RunMicrotasks();
+ CHECK_EQ(Module::kErrored, module->GetStatus());
+ CHECK_EQ(promise->State(), v8::Promise::kRejected);
+ CHECK(promise->Result()->StrictEquals(v8_str("boom")));
+ CHECK(module->GetException()->StrictEquals(v8_str("boom")));
+ CHECK(!try_catch.HasCaught());
}
-
- CHECK(!try_catch.HasCaught());
+ i::FLAG_harmony_top_level_await = previous_top_level_await_flag_value;
+ i::FLAG_harmony_dynamic_import = previous_dynamic_import_flag_value;
}
+
} // anonymous namespace
diff --git a/deps/v8/test/cctest/test-orderedhashtable.cc b/deps/v8/test/cctest/test-orderedhashtable.cc
index 44a845eb74..189f950b2e 100644
--- a/deps/v8/test/cctest/test-orderedhashtable.cc
+++ b/deps/v8/test/cctest/test-orderedhashtable.cc
@@ -1262,6 +1262,7 @@ TEST(OrderedHashMapHandlerInsertion) {
Verify(isolate, map);
CHECK(OrderedHashMapHandler::HasKey(isolate, map, key1));
CHECK(SmallOrderedHashMap::Is(map));
+
for (int i = 0; i < 1024; i++) {
Handle<Smi> key_i(Smi::FromInt(i), isolate);
Handle<Smi> value_i(Smi::FromInt(i), isolate);
@@ -1276,6 +1277,83 @@ TEST(OrderedHashMapHandlerInsertion) {
CHECK(OrderedHashMap::Is(map));
}
+TEST(OrderedHashSetHandlerDeletion) {
+ LocalContext context;
+ Isolate* isolate = GetIsolateFrom(&context);
+ HandleScope scope(isolate);
+
+ Handle<HeapObject> set =
+ OrderedHashSetHandler::Allocate(isolate, 4).ToHandleChecked();
+ Verify(isolate, set);
+
+ // Add a new key.
+ Handle<Smi> key1(Smi::FromInt(1), isolate);
+ CHECK(!OrderedHashSetHandler::HasKey(isolate, set, key1));
+ set = OrderedHashSetHandler::Add(isolate, set, key1).ToHandleChecked();
+ Verify(isolate, set);
+ CHECK(OrderedHashSetHandler::HasKey(isolate, set, key1));
+
+ // Add existing key.
+ set = OrderedHashSetHandler::Add(isolate, set, key1).ToHandleChecked();
+ Verify(isolate, set);
+ CHECK(OrderedHashSetHandler::HasKey(isolate, set, key1));
+ CHECK(SmallOrderedHashSet::Is(set));
+
+ // Remove a non-existing key.
+ Handle<Smi> key2(Smi::FromInt(2), isolate);
+ OrderedHashSetHandler::Delete(isolate, set, key2);
+ Verify(isolate, set);
+ CHECK(OrderedHashSetHandler::HasKey(isolate, set, key1));
+ CHECK(!OrderedHashSetHandler::HasKey(isolate, set, key2));
+ CHECK(SmallOrderedHashSet::Is(set));
+
+ // Remove an existing key.
+ OrderedHashSetHandler::Delete(isolate, set, key1);
+ Verify(isolate, set);
+ CHECK(!OrderedHashSetHandler::HasKey(isolate, set, key1));
+ CHECK(SmallOrderedHashSet::Is(set));
+}
+
+TEST(OrderedHashMapHandlerDeletion) {
+ LocalContext context;
+ Isolate* isolate = GetIsolateFrom(&context);
+ HandleScope scope(isolate);
+
+ Handle<HeapObject> map =
+ OrderedHashMapHandler::Allocate(isolate, 4).ToHandleChecked();
+ Verify(isolate, map);
+
+ // Add a new key.
+ Handle<Smi> key1(Smi::FromInt(1), isolate);
+ Handle<Smi> value1(Smi::FromInt(1), isolate);
+ CHECK(!OrderedHashMapHandler::HasKey(isolate, map, key1));
+ map =
+ OrderedHashMapHandler::Add(isolate, map, key1, value1).ToHandleChecked();
+ Verify(isolate, map);
+ CHECK(OrderedHashMapHandler::HasKey(isolate, map, key1));
+
+ // Add existing key.
+ map =
+ OrderedHashMapHandler::Add(isolate, map, key1, value1).ToHandleChecked();
+ Verify(isolate, map);
+ CHECK(OrderedHashMapHandler::HasKey(isolate, map, key1));
+ CHECK(SmallOrderedHashMap::Is(map));
+
+ // Remove a non-existing key.
+ Handle<Smi> key2(Smi::FromInt(2), isolate);
+ OrderedHashMapHandler::Delete(isolate, map, key2);
+ Verify(isolate, map);
+ CHECK(OrderedHashMapHandler::HasKey(isolate, map, key1));
+ CHECK(!OrderedHashMapHandler::HasKey(isolate, map, key2));
+ CHECK(SmallOrderedHashMap::Is(map));
+
+ // Remove an existing key.
+ OrderedHashMapHandler::Delete(isolate, map, key1);
+ Verify(isolate, map);
+ CHECK(!OrderedHashMapHandler::HasKey(isolate, map, key1));
+ CHECK(SmallOrderedHashMap::Is(map));
+}
+
TEST(OrderedNameDictionaryInsertion) {
LocalContext context;
Isolate* isolate = GetIsolateFrom(&context);
@@ -1798,6 +1876,49 @@ TEST(OrderedNameDictionaryHandlerInsertion) {
CHECK(table->IsOrderedNameDictionary());
}
+TEST(OrderedNameDictionaryHandlerDeletion) {
+ LocalContext context;
+ Isolate* isolate = GetIsolateFrom(&context);
+ HandleScope scope(isolate);
+
+ Handle<HeapObject> table =
+ OrderedNameDictionaryHandler::Allocate(isolate, 4).ToHandleChecked();
+ CHECK(table->IsSmallOrderedNameDictionary());
+ Verify(isolate, table);
+
+ // Add a new key.
+ Handle<String> value = isolate->factory()->InternalizeUtf8String("bar");
+ Handle<String> key = isolate->factory()->InternalizeUtf8String("foo");
+ Handle<String> key2 = isolate->factory()->InternalizeUtf8String("foo2");
+ PropertyDetails details = PropertyDetails::Empty();
+
+ table = OrderedNameDictionaryHandler::Add(isolate, table, key, value, details)
+ .ToHandleChecked();
+ DCHECK(key->IsUniqueName());
+ Verify(isolate, table);
+ CHECK(table->IsSmallOrderedNameDictionary());
+ CHECK_NE(OrderedNameDictionaryHandler::kNotFound,
+ OrderedNameDictionaryHandler::FindEntry(isolate, *table, *key));
+
+ // Remove a non-existing key.
+ OrderedNameDictionaryHandler::Delete(isolate, table, key2);
+ Verify(isolate, table);
+ CHECK(table->IsSmallOrderedNameDictionary());
+ CHECK_EQ(OrderedNameDictionaryHandler::kNotFound,
+ OrderedNameDictionaryHandler::FindEntry(isolate, *table, *key2));
+ CHECK_NE(OrderedNameDictionaryHandler::kNotFound,
+ OrderedNameDictionaryHandler::FindEntry(isolate, *table, *key));
+
+ // Remove an existing key.
+ OrderedNameDictionaryHandler::Delete(isolate, table, key);
+ Verify(isolate, table);
+ CHECK(table->IsSmallOrderedNameDictionary());
+ CHECK_EQ(OrderedNameDictionaryHandler::kNotFound,
+ OrderedNameDictionaryHandler::FindEntry(isolate, *table, *key));
+
+ CHECK(table->IsSmallOrderedNameDictionary());
+}
+
TEST(OrderedNameDictionarySetEntry) {
LocalContext context;
Isolate* isolate = GetIsolateFrom(&context);
diff --git a/deps/v8/test/cctest/test-parsing.cc b/deps/v8/test/cctest/test-parsing.cc
index 857bd7a454..18f15af477 100644
--- a/deps/v8/test/cctest/test-parsing.cc
+++ b/deps/v8/test/cctest/test-parsing.cc
@@ -1506,8 +1506,11 @@ TEST(DiscardFunctionBody) {
fun = exp->AsObjectLiteral()->properties()->at(0)->value()->
AsFunctionLiteral();
} else {
- fun = exp->AsClassLiteral()->properties()->at(0)->value()->
- AsFunctionLiteral();
+ fun = exp->AsClassLiteral()
+ ->public_members()
+ ->at(0)
+ ->value()
+ ->AsFunctionLiteral();
}
}
CHECK(!fun->ShouldEagerCompile());
@@ -3608,6 +3611,14 @@ TEST(MaybeAssignedParameters) {
"g(arg)}"},
{true, "function f(arg) {g(arg); eval('arguments[0] = 42'); g(arg)}"},
{true, "function f(arg) {g(arg); g(() => arguments[0] = 42); g(arg)}"},
+
+ // default values
+ {false, "function f({x:arg = 1}) {}"},
+ {true, "function f({x:arg = 1}, {y:b=(arg=2)}) {}"},
+ {true, "function f({x:arg = (arg = 2)}) {}"},
+ {false, "var f = ({x:arg = 1}) => {}"},
+ {true, "var f = ({x:arg = 1}, {y:b=(arg=2)}) => {}"},
+ {true, "var f = ({x:arg = (arg = 2)}) => {}"},
};
const char* suffix = "; f";
@@ -5877,6 +5888,70 @@ TEST(PrivateMembersWrongAccessNoEarlyErrors) {
private_methods, arraysize(private_methods));
}
+TEST(PrivateStaticClassMethodsAndAccessorsNoErrors) {
+ // clang-format off
+ // Tests proposed class fields syntax.
+ const char* context_data[][2] = {{"(class {", "});"},
+ {"(class extends Base {", "});"},
+ {"class C {", "}"},
+ {"class C extends Base {", "}"},
+ {nullptr, nullptr}};
+ const char* class_body_data[] = {
+ "static #a() { }",
+ "static get #a() { }",
+ "static set #a(val) { }",
+ "static get #a() { } static set #a(val) { }",
+ "static *#a() { }",
+ "static async #a() { }",
+ "static async *#a() { }",
+ nullptr
+ };
+ // clang-format on
+
+ RunParserSyncTest(context_data, class_body_data, kError);
+
+ static const ParserFlag private_methods[] = {kAllowHarmonyPrivateMethods};
+ RunParserSyncTest(context_data, class_body_data, kSuccess, nullptr, 0,
+ private_methods, arraysize(private_methods));
+}
+
+TEST(PrivateStaticClassMethodsAndAccessorsDuplicateErrors) {
+ // clang-format off
+ // Tests proposed class fields syntax.
+ const char* context_data[][2] = {{"(class {", "});"},
+ {"(class extends Base {", "});"},
+ {"class C {", "}"},
+ {"class C extends Base {", "}"},
+ {nullptr, nullptr}};
+ const char* class_body_data[] = {
+ "static get #a() {} static get #a() {}",
+ "static get #a() {} static #a() {}",
+ "static get #a() {} get #a() {}",
+ "static get #a() {} set #a(val) {}",
+ "static get #a() {} #a() {}",
+
+ "static set #a(val) {} static set #a(val) {}",
+ "static set #a(val) {} static #a() {}",
+ "static set #a(val) {} get #a() {}",
+ "static set #a(val) {} set #a(val) {}",
+ "static set #a(val) {} #a() {}",
+
+ "static #a() {} static #a() {}",
+ "static #a() {} #a(val) {}",
+ "static #a() {} set #a(val) {}",
+ "static #a() {} get #a() {}",
+
+ nullptr
+ };
+ // clang-format on
+
+ RunParserSyncTest(context_data, class_body_data, kError);
+
+ static const ParserFlag private_methods[] = {kAllowHarmonyPrivateMethods};
+ RunParserSyncTest(context_data, class_body_data, kError, nullptr, 0,
+ private_methods, arraysize(private_methods));
+}
+
TEST(PrivateClassFieldsNoErrors) {
// clang-format off
// Tests proposed class fields syntax.
@@ -6216,14 +6291,6 @@ TEST(PrivateStaticClassFieldsErrors) {
"#a; static #a",
"static #a; #a",
- // TODO(joyee): support static private methods
- "static #a() { }",
- "static get #a() { }",
- "static set #a() { }",
- "static *#a() { }",
- "static async #a() { }",
- "static async *#a() { }",
-
// ASI
"static #['a'] = 0\n",
"static #['a'] = 0\n b",
diff --git a/deps/v8/test/cctest/test-poison-disasm-arm.cc b/deps/v8/test/cctest/test-poison-disasm-arm.cc
index 3410e5487d..dd54bf28bc 100644
--- a/deps/v8/test/cctest/test-poison-disasm-arm.cc
+++ b/deps/v8/test/cctest/test-poison-disasm-arm.cc
@@ -24,6 +24,8 @@ const std::string kPReg = // NOLINT(runtime/string)
TEST(DisasmPoisonMonomorphicLoad) {
#ifdef ENABLE_DISASSEMBLER
if (i::FLAG_always_opt || !i::FLAG_opt) return;
+ // TODO(9684): Re-enable for TurboProp if necessary.
+ if (i::FLAG_turboprop) return;
i::FLAG_allow_natives_syntax = true;
i::FLAG_untrusted_code_mitigations = true;
@@ -58,6 +60,8 @@ TEST(DisasmPoisonMonomorphicLoad) {
TEST(DisasmPoisonPolymorphicLoad) {
#ifdef ENABLE_DISASSEMBLER
if (i::FLAG_always_opt || !i::FLAG_opt) return;
+ // TODO(9684): Re-enable for TurboProp if necessary.
+ if (i::FLAG_turboprop) return;
i::FLAG_allow_natives_syntax = true;
i::FLAG_untrusted_code_mitigations = true;
@@ -101,7 +105,7 @@ TEST(DisasmPoisonPolymorphicLoad) {
"csdb", // spec. barrier
"ldr <<BSt:r[0-9]+>>, \\[<<Obj>>, #\\+[0-9]+\\]", // load backing store
"and <<BSt>>, <<BSt>>, " + kPReg, // apply the poison
- "ldr <<Prop:r[0-9]+>>, \\[<<Obj>>, #\\+[0-9]+\\]", // load the property
+ "ldr <<Prop:r[0-9]+>>, \\[<<BSt>>, #\\+[0-9]+\\]", // load the property
"and <<Prop>>, <<Prop>>, " + kPReg, // apply the poison
// Ldone:
};
@@ -109,5 +113,43 @@ TEST(DisasmPoisonPolymorphicLoad) {
#endif // ENABLE_DISASSEMBLER
}
+TEST(DisasmPoisonMonomorphicLoadFloat64) {
+#ifdef ENABLE_DISASSEMBLER
+ if (i::FLAG_always_opt || !i::FLAG_opt) return;
+
+ i::FLAG_allow_natives_syntax = true;
+ i::FLAG_untrusted_code_mitigations = true;
+
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+
+ CompileRun(
+ "function mono(o) { return o.x; }"
+ "%PrepareFunctionForOptimization(mono);"
+ "mono({ x : 1.1 });"
+ "mono({ x : 1.1 });"
+ "%OptimizeFunctionOnNextCall(mono);"
+ "mono({ x : 1.1 });");
+
+ // Matches that the property access sequence is instrumented with
+ // poisoning.
+ std::vector<std::string> patterns_array = {
+ "ldr <<Map:r[0-9]+>>, \\[<<Obj:r[0-9]+>>, #-1\\]", // load map
+ "ldr <<ExpMap:r[0-9]+>>, \\[pc, #", // load expected map
+ "cmp <<Map>>, <<ExpMap>>", // compare maps
+ "bne", // deopt if different
+ "eorne " + kPReg + ", " + kPReg + ", " + kPReg, // update the poison
+ "csdb", // spec. barrier
+ "ldr <<Field:r[0-9]+>>, \\[<<Obj>>, #\\+[0-9]+\\]", // load the field
+ "and <<Field>>, <<Field>>, " + kPReg, // apply the poison
+ "mov <<Mov:r[0-9]+>>, #[0-9]+", // addr. calculation
+ "add ip, <<Field>>, <<Mov>>", // addr. calculation
+ "and ip, ip, " + kPReg, // apply the poison
+ "vldr d[0-9]+, \\[ip", // load Float64
+ };
+ CHECK(CheckDisassemblyRegexPatterns("mono", patterns_array));
+#endif // ENABLE_DISASSEMBLER
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-poison-disasm-arm64.cc b/deps/v8/test/cctest/test-poison-disasm-arm64.cc
index a428ce7b89..32f4315e3a 100644
--- a/deps/v8/test/cctest/test-poison-disasm-arm64.cc
+++ b/deps/v8/test/cctest/test-poison-disasm-arm64.cc
@@ -24,6 +24,8 @@ const std::string kPReg = // NOLINT(runtime/string)
TEST(DisasmPoisonMonomorphicLoad) {
#ifdef ENABLE_DISASSEMBLER
if (i::FLAG_always_opt || !i::FLAG_opt) return;
+ // TODO(9684): Re-enable for TurboProp if necessary.
+ if (i::FLAG_turboprop) return;
i::FLAG_allow_natives_syntax = true;
i::FLAG_untrusted_code_mitigations = true;
@@ -49,7 +51,7 @@ TEST(DisasmPoisonMonomorphicLoad) {
"b.ne", // deopt if different
"csel " + kPReg + ", xzr, " + kPReg + ", ne", // update the poison
"csdb", // spec. barrier
- "ldursw x<<Field:[0-9]+>>, \\[<<Obj>>, #[0-9]+\\]", // load the field
+ "ldur w<<Field:[0-9]+>>, \\[<<Obj>>, #[0-9]+\\]", // load the field
"and x<<Field>>, x<<Field>>, " + kPReg, // apply the poison
};
#else
@@ -71,6 +73,8 @@ TEST(DisasmPoisonMonomorphicLoad) {
TEST(DisasmPoisonPolymorphicLoad) {
#ifdef ENABLE_DISASSEMBLER
if (i::FLAG_always_opt || !i::FLAG_opt) return;
+ // TODO(9684): Re-enable for TurboProp if necessary.
+ if (i::FLAG_turboprop) return;
i::FLAG_allow_natives_syntax = true;
i::FLAG_untrusted_code_mitigations = true;
@@ -113,7 +117,7 @@ TEST(DisasmPoisonPolymorphicLoad) {
// Lcase1:
"csel " + kPReg + ", xzr, " + kPReg + ", ne", // update the poison
"csdb", // spec. barrier
- "ldursw x<<BSt:[0-9]+>>, \\[<<Obj>>, #[0-9]+\\]", // load backing store
+ "ldur w<<BSt:[0-9]+>>, \\[<<Obj>>, #[0-9]+\\]", // load backing store
// branchful decompress
"add x<<BSt>>, x26, x<<BSt>>", // Add root to ref
"and x<<BSt>>, x<<BSt>>, " + kPReg, // apply the poison
@@ -135,9 +139,13 @@ TEST(DisasmPoisonPolymorphicLoad) {
"b.ne", // deopt if different
"csel " + kPReg + ", xzr, " + kPReg + ", ne", // update the poison
"csdb", // spec. barrier
- "ldur <<Field:x[0-9]+>>, \\[<<Obj>>, #[0-9]+\\]", // load the field
- "and <<Field>>, <<Field>>, " + kPReg, // apply the poison
- "asr x[0-9]+, <<Field>>, #32", // untag
+ "ldur x<<Field:[0-9]+>>, \\[<<Obj>>, #[0-9]+\\]", // load the field
+ "and x<<Field>>, x<<Field>>, " + kPReg, // apply the poison
+#ifdef V8_31BIT_SMIS_ON_64BIT_ARCH
+ "asr w<<Field>>, w<<Field>>, #1", // untag
+#else
+ "asr x[0-9]+, x<<Field>>, #32", // untag
+#endif
"b", // goto merge point
// Lcase1:
"csel " + kPReg + ", xzr, " + kPReg + ", ne", // update the poison
@@ -153,5 +161,65 @@ TEST(DisasmPoisonPolymorphicLoad) {
#endif // ENABLE_DISASSEMBLER
}
+TEST(DisasmPoisonMonomorphicLoadFloat64) {
+#ifdef ENABLE_DISASSEMBLER
+ if (i::FLAG_always_opt || !i::FLAG_opt) return;
+ // TODO(9684): Re-enable for TurboProp if necessary.
+ if (i::FLAG_turboprop) return;
+
+ i::FLAG_allow_natives_syntax = true;
+ i::FLAG_untrusted_code_mitigations = true;
+
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+
+ CompileRun(
+ "function mono(o) { return o.x; }"
+ "%PrepareFunctionForOptimization(mono);"
+ "mono({ x : 1.1 });"
+ "mono({ x : 1.1 });"
+ "%OptimizeFunctionOnNextCall(mono);"
+ "mono({ x : 1.1 });");
+
+ // Matches that the property access sequence is instrumented with
+ // poisoning.
+#if defined(V8_COMPRESS_POINTERS)
+ std::vector<std::string> patterns_array = {
+ "ldur <<Map:w[0-9]+>>, \\[<<Obj:x[0-9]+>>, #-1\\]", // load map
+ "ldr <<ExpMap:w[0-9]+>>, pc", // load expected map
+ "cmp <<Map>>, <<ExpMap>>", // compare maps
+ "b.ne", // deopt if differ
+ "csel " + kPReg + ", xzr, " + kPReg + ", ne", // update the poison
+ "csdb", // spec. barrier
+ "ldur w<<F1:[0-9]+>>, \\[<<Obj>>, #11\\]", // load heap number
+ "add x<<F1>>, x26, x<<F1>>", // Decompress ref
+ "and x<<F1>>, x<<F1>>, " + kPReg, // apply the poison
+ "add <<Addr:x[0-9]+>>, x<<F1>>, #0x[0-9a-f]+", // addr. calculation
+ "and <<Addr>>, <<Addr>>, " + kPReg, // apply the poison
+ "ldr d[0-9]+, \\[<<Addr>>\\]", // load Float64
+ };
+#else
+ std::vector<std::string> patterns_array = {
+ "ldur <<Map:x[0-9]+>>, \\[<<Obj:x[0-9]+>>, #-1\\]", // load map
+ "ldr <<ExpMap:x[0-9]+>>, pc", // load expected map
+ "cmp <<Map>>, <<ExpMap>>", // compare maps
+ "b.ne", // deopt if differ
+ "csel " + kPReg + ", xzr, " + kPReg + ", ne", // update the poison
+ "csdb", // spec. barrier
+#if V8_DOUBLE_FIELDS_UNBOXING
+ "add <<Addr:x[0-9]+>>, <<Obj>>, #0x[0-9a-f]+", // addr. calculation
+#else
+ "ldur <<F1:x[0-9]+>>, \\[<<Obj>>, #23\\]", // load heap number
+ "and <<F1>>, <<F1>>, " + kPReg, // apply the poison
+ "add <<Addr:x[0-9]+>>, <<F1>>, #0x7", // addr. calculation
+#endif
+ "and <<Addr>>, <<Addr>>, " + kPReg, // apply the poison
+ "ldr d[0-9]+, \\[<<Addr>>\\]", // load Float64
+ };
+#endif
+ CHECK(CheckDisassemblyRegexPatterns("mono", patterns_array));
+#endif // ENABLE_DISASSEMBLER
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-profile-generator.cc b/deps/v8/test/cctest/test-profile-generator.cc
index ccebabec30..5cfc4df2a3 100644
--- a/deps/v8/test/cctest/test-profile-generator.cc
+++ b/deps/v8/test/cctest/test-profile-generator.cc
@@ -674,13 +674,12 @@ static const char* line_number_test_source_profile_time_functions =
"bar_at_the_second_line();\n"
"function lazy_func_at_6th_line() {}";
-int GetFunctionLineNumber(CpuProfiler& profiler, // NOLINT(runtime/references)
- LocalContext& env, // NOLINT(runtime/references)
+int GetFunctionLineNumber(CpuProfiler* profiler, LocalContext* env,
const char* name) {
- CodeMap* code_map = profiler.generator()->code_map();
+ CodeMap* code_map = profiler->generator()->code_map();
i::Handle<i::JSFunction> func = i::Handle<i::JSFunction>::cast(
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
- env->Global()->Get(env.local(), v8_str(name)).ToLocalChecked())));
+ (*env)->Global()->Get(env->local(), v8_str(name)).ToLocalChecked())));
CodeEntry* func_entry =
code_map->FindEntry(func->abstract_code().InstructionStart());
if (!func_entry) FATAL("%s", name);
@@ -705,12 +704,12 @@ TEST(LineNumber) {
profiler.processor()->StopSynchronously();
bool is_lazy = i::FLAG_lazy;
- CHECK_EQ(1, GetFunctionLineNumber(profiler, env, "foo_at_the_first_line"));
+ CHECK_EQ(1, GetFunctionLineNumber(&profiler, &env, "foo_at_the_first_line"));
CHECK_EQ(is_lazy ? 0 : 4,
- GetFunctionLineNumber(profiler, env, "lazy_func_at_forth_line"));
- CHECK_EQ(2, GetFunctionLineNumber(profiler, env, "bar_at_the_second_line"));
+ GetFunctionLineNumber(&profiler, &env, "lazy_func_at_forth_line"));
+ CHECK_EQ(2, GetFunctionLineNumber(&profiler, &env, "bar_at_the_second_line"));
CHECK_EQ(is_lazy ? 0 : 6,
- GetFunctionLineNumber(profiler, env, "lazy_func_at_6th_line"));
+ GetFunctionLineNumber(&profiler, &env, "lazy_func_at_6th_line"));
profiler.StopProfiling("LineNumber");
}
diff --git a/deps/v8/test/cctest/test-regexp.cc b/deps/v8/test/cctest/test-regexp.cc
index 1374673c61..95e752bece 100644
--- a/deps/v8/test/cctest/test-regexp.cc
+++ b/deps/v8/test/cctest/test-regexp.cc
@@ -38,6 +38,7 @@
#include "src/objects/js-regexp-inl.h"
#include "src/objects/objects-inl.h"
#include "src/regexp/regexp-bytecode-generator.h"
+#include "src/regexp/regexp-bytecodes.h"
#include "src/regexp/regexp-compiler.h"
#include "src/regexp/regexp-interpreter.h"
#include "src/regexp/regexp-macro-assembler-arch.h"
@@ -1744,19 +1745,6 @@ TEST(UseCountRegExp) {
CHECK_EQ(2, use_counts[v8::Isolate::kRegExpPrototypeStickyGetter]);
CHECK_EQ(1, use_counts[v8::Isolate::kRegExpPrototypeToString]);
CHECK(resultToStringError->IsObject());
-
- // Increment a UseCounter when .matchAll() is used with a non-global
- // regular expression.
- CHECK_EQ(0, use_counts[v8::Isolate::kRegExpMatchAllWithNonGlobalRegExp]);
- v8::Local<v8::Value> resultReMatchAllNonGlobal =
- CompileRun("'a'.matchAll(/./)");
- CHECK_EQ(1, use_counts[v8::Isolate::kRegExpMatchAllWithNonGlobalRegExp]);
- CHECK(resultReMatchAllNonGlobal->IsObject());
- // Don't increment the counter for global regular expressions.
- v8::Local<v8::Value> resultReMatchAllGlobal =
- CompileRun("'a'.matchAll(/./g)");
- CHECK_EQ(1, use_counts[v8::Isolate::kRegExpMatchAllWithNonGlobalRegExp]);
- CHECK(resultReMatchAllGlobal->IsObject());
}
class UncachedExternalString
@@ -1783,6 +1771,567 @@ TEST(UncachedExternalString) {
ExpectString("external.substring(1).match(re)[1]", "z");
}
+// Test bytecode peephole optimization
+
+void CreatePeepholeNoChangeBytecode(RegExpMacroAssembler* m) {
+ Label fail, backtrack;
+ m->PushBacktrack(&fail);
+ m->CheckNotAtStart(0, nullptr);
+ m->LoadCurrentCharacter(2, nullptr);
+ m->CheckNotCharacter('o', nullptr);
+ m->LoadCurrentCharacter(1, nullptr, false);
+ m->CheckNotCharacter('o', nullptr);
+ m->LoadCurrentCharacter(0, nullptr, false);
+ m->CheckNotCharacter('f', nullptr);
+ m->WriteCurrentPositionToRegister(0, 0);
+ m->WriteCurrentPositionToRegister(1, 3);
+ m->AdvanceCurrentPosition(3);
+ m->PushBacktrack(&backtrack);
+ m->Succeed();
+ m->Bind(&backtrack);
+ m->Backtrack();
+ m->Bind(&fail);
+ m->Fail();
+}
+
+TEST(PeepholeNoChange) {
+ Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ HandleScope scope(isolate);
+
+ RegExpBytecodeGenerator orig(CcTest::i_isolate(), &zone);
+ RegExpBytecodeGenerator opt(CcTest::i_isolate(), &zone);
+
+ CreatePeepholeNoChangeBytecode(&orig);
+ CreatePeepholeNoChangeBytecode(&opt);
+
+ Handle<String> source = factory->NewStringFromStaticChars("^foo");
+
+ i::FLAG_regexp_peephole_optimization = false;
+ Handle<ByteArray> array = Handle<ByteArray>::cast(orig.GetCode(source));
+ int length = array->length();
+ byte* byte_array = array->GetDataStartAddress();
+
+ i::FLAG_regexp_peephole_optimization = true;
+ Handle<ByteArray> array_optimized =
+ Handle<ByteArray>::cast(opt.GetCode(source));
+ byte* byte_array_optimized = array_optimized->GetDataStartAddress();
+
+ CHECK_EQ(0, memcmp(byte_array, byte_array_optimized, length));
+}
+
+void CreatePeepholeSkipUntilCharBytecode(RegExpMacroAssembler* m) {
+ Label start;
+ m->Bind(&start);
+ m->LoadCurrentCharacter(0, nullptr, true);
+ m->CheckCharacter('x', nullptr);
+ m->AdvanceCurrentPosition(1);
+ m->GoTo(&start);
+}
+
+TEST(PeepholeSkipUntilChar) {
+ Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ HandleScope scope(isolate);
+
+ RegExpBytecodeGenerator orig(CcTest::i_isolate(), &zone);
+ RegExpBytecodeGenerator opt(CcTest::i_isolate(), &zone);
+
+ CreatePeepholeSkipUntilCharBytecode(&orig);
+ CreatePeepholeSkipUntilCharBytecode(&opt);
+
+ Handle<String> source = factory->NewStringFromStaticChars("dummy");
+
+ i::FLAG_regexp_peephole_optimization = false;
+ Handle<ByteArray> array = Handle<ByteArray>::cast(orig.GetCode(source));
+ int length = array->length();
+
+ i::FLAG_regexp_peephole_optimization = true;
+ Handle<ByteArray> array_optimized =
+ Handle<ByteArray>::cast(opt.GetCode(source));
+ int length_optimized = array_optimized->length();
+
+ int length_expected = RegExpBytecodeLength(BC_LOAD_CURRENT_CHAR) +
+ RegExpBytecodeLength(BC_CHECK_CHAR) +
+ RegExpBytecodeLength(BC_ADVANCE_CP_AND_GOTO) +
+ RegExpBytecodeLength(BC_POP_BT);
+ int length_optimized_expected = RegExpBytecodeLength(BC_SKIP_UNTIL_CHAR) +
+ RegExpBytecodeLength(BC_POP_BT);
+
+ CHECK_EQ(length, length_expected);
+ CHECK_EQ(length_optimized, length_optimized_expected);
+
+ CHECK_EQ(BC_SKIP_UNTIL_CHAR, array_optimized->get(0));
+ CHECK_EQ(BC_POP_BT,
+ array_optimized->get(RegExpBytecodeLength(BC_SKIP_UNTIL_CHAR)));
+}
+
+void CreatePeepholeSkipUntilBitInTableBytecode(RegExpMacroAssembler* m,
+ Factory* factory) {
+ Handle<ByteArray> bit_table = factory->NewByteArray(
+ RegExpMacroAssembler::kTableSize, AllocationType::kOld);
+ for (uint32_t i = 0; i < RegExpMacroAssembler::kTableSize; i++) {
+ bit_table->set(i, 0);
+ }
+
+ Label start;
+ m->Bind(&start);
+ m->LoadCurrentCharacter(0, nullptr, true);
+ m->CheckBitInTable(bit_table, nullptr);
+ m->AdvanceCurrentPosition(1);
+ m->GoTo(&start);
+}
+
+TEST(PeepholeSkipUntilBitInTable) {
+ Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ HandleScope scope(isolate);
+
+ RegExpBytecodeGenerator orig(CcTest::i_isolate(), &zone);
+ RegExpBytecodeGenerator opt(CcTest::i_isolate(), &zone);
+
+ CreatePeepholeSkipUntilBitInTableBytecode(&orig, factory);
+ CreatePeepholeSkipUntilBitInTableBytecode(&opt, factory);
+
+ Handle<String> source = factory->NewStringFromStaticChars("dummy");
+
+ i::FLAG_regexp_peephole_optimization = false;
+ Handle<ByteArray> array = Handle<ByteArray>::cast(orig.GetCode(source));
+ int length = array->length();
+
+ i::FLAG_regexp_peephole_optimization = true;
+ Handle<ByteArray> array_optimized =
+ Handle<ByteArray>::cast(opt.GetCode(source));
+ int length_optimized = array_optimized->length();
+
+ int length_expected = RegExpBytecodeLength(BC_LOAD_CURRENT_CHAR) +
+ RegExpBytecodeLength(BC_CHECK_BIT_IN_TABLE) +
+ RegExpBytecodeLength(BC_ADVANCE_CP_AND_GOTO) +
+ RegExpBytecodeLength(BC_POP_BT);
+ int length_optimized_expected =
+ RegExpBytecodeLength(BC_SKIP_UNTIL_BIT_IN_TABLE) +
+ RegExpBytecodeLength(BC_POP_BT);
+
+ CHECK_EQ(length, length_expected);
+ CHECK_EQ(length_optimized, length_optimized_expected);
+
+ CHECK_EQ(BC_SKIP_UNTIL_BIT_IN_TABLE, array_optimized->get(0));
+ CHECK_EQ(BC_POP_BT, array_optimized->get(
+ RegExpBytecodeLength(BC_SKIP_UNTIL_BIT_IN_TABLE)));
+}
+
+void CreatePeepholeSkipUntilCharPosCheckedBytecode(RegExpMacroAssembler* m) {
+ Label start;
+ m->Bind(&start);
+ m->LoadCurrentCharacter(0, nullptr, true, 1, 2);
+ m->CheckCharacter('x', nullptr);
+ m->AdvanceCurrentPosition(1);
+ m->GoTo(&start);
+}
+
+TEST(PeepholeSkipUntilCharPosChecked) {
+ Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ HandleScope scope(isolate);
+
+ RegExpBytecodeGenerator orig(CcTest::i_isolate(), &zone);
+ RegExpBytecodeGenerator opt(CcTest::i_isolate(), &zone);
+
+ CreatePeepholeSkipUntilCharPosCheckedBytecode(&orig);
+ CreatePeepholeSkipUntilCharPosCheckedBytecode(&opt);
+
+ Handle<String> source = factory->NewStringFromStaticChars("dummy");
+
+ i::FLAG_regexp_peephole_optimization = false;
+ Handle<ByteArray> array = Handle<ByteArray>::cast(orig.GetCode(source));
+ int length = array->length();
+
+ i::FLAG_regexp_peephole_optimization = true;
+ Handle<ByteArray> array_optimized =
+ Handle<ByteArray>::cast(opt.GetCode(source));
+ int length_optimized = array_optimized->length();
+
+ int length_expected = RegExpBytecodeLength(BC_CHECK_CURRENT_POSITION) +
+ RegExpBytecodeLength(BC_LOAD_CURRENT_CHAR_UNCHECKED) +
+ RegExpBytecodeLength(BC_CHECK_CHAR) +
+ RegExpBytecodeLength(BC_ADVANCE_CP_AND_GOTO) +
+ RegExpBytecodeLength(BC_POP_BT);
+ int length_optimized_expected =
+ RegExpBytecodeLength(BC_SKIP_UNTIL_CHAR_POS_CHECKED) +
+ RegExpBytecodeLength(BC_POP_BT);
+
+ CHECK_EQ(length, length_expected);
+ CHECK_EQ(length_optimized, length_optimized_expected);
+
+ CHECK_EQ(BC_SKIP_UNTIL_CHAR_POS_CHECKED, array_optimized->get(0));
+ CHECK_EQ(BC_POP_BT, array_optimized->get(RegExpBytecodeLength(
+ BC_SKIP_UNTIL_CHAR_POS_CHECKED)));
+}
+
+void CreatePeepholeSkipUntilCharAndBytecode(RegExpMacroAssembler* m) {
+ Label start;
+ m->Bind(&start);
+ m->LoadCurrentCharacter(0, nullptr, true, 1, 2);
+ m->CheckCharacterAfterAnd('x', 0xFF, nullptr);
+ m->AdvanceCurrentPosition(1);
+ m->GoTo(&start);
+}
+
+TEST(PeepholeSkipUntilCharAnd) {
+ Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ HandleScope scope(isolate);
+
+ RegExpBytecodeGenerator orig(CcTest::i_isolate(), &zone);
+ RegExpBytecodeGenerator opt(CcTest::i_isolate(), &zone);
+
+ CreatePeepholeSkipUntilCharAndBytecode(&orig);
+ CreatePeepholeSkipUntilCharAndBytecode(&opt);
+
+ Handle<String> source = factory->NewStringFromStaticChars("dummy");
+
+ i::FLAG_regexp_peephole_optimization = false;
+ Handle<ByteArray> array = Handle<ByteArray>::cast(orig.GetCode(source));
+ int length = array->length();
+
+ i::FLAG_regexp_peephole_optimization = true;
+ Handle<ByteArray> array_optimized =
+ Handle<ByteArray>::cast(opt.GetCode(source));
+ int length_optimized = array_optimized->length();
+
+ int length_expected = RegExpBytecodeLength(BC_CHECK_CURRENT_POSITION) +
+ RegExpBytecodeLength(BC_LOAD_CURRENT_CHAR_UNCHECKED) +
+ RegExpBytecodeLength(BC_AND_CHECK_CHAR) +
+ RegExpBytecodeLength(BC_ADVANCE_CP_AND_GOTO) +
+ RegExpBytecodeLength(BC_POP_BT);
+ int length_optimized_expected = RegExpBytecodeLength(BC_SKIP_UNTIL_CHAR_AND) +
+ RegExpBytecodeLength(BC_POP_BT);
+
+ CHECK_EQ(length, length_expected);
+ CHECK_EQ(length_optimized, length_optimized_expected);
+
+ CHECK_EQ(BC_SKIP_UNTIL_CHAR_AND, array_optimized->get(0));
+ CHECK_EQ(BC_POP_BT,
+ array_optimized->get(RegExpBytecodeLength(BC_SKIP_UNTIL_CHAR_AND)));
+}
+
+void CreatePeepholeSkipUntilCharOrCharBytecode(RegExpMacroAssembler* m) {
+ Label start;
+ m->Bind(&start);
+ m->LoadCurrentCharacter(0, nullptr, true);
+ m->CheckCharacter('x', nullptr);
+ m->CheckCharacter('y', nullptr);
+ m->AdvanceCurrentPosition(1);
+ m->GoTo(&start);
+}
+
+TEST(PeepholeSkipUntilCharOrChar) {
+ Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ HandleScope scope(isolate);
+
+ RegExpBytecodeGenerator orig(CcTest::i_isolate(), &zone);
+ RegExpBytecodeGenerator opt(CcTest::i_isolate(), &zone);
+
+ CreatePeepholeSkipUntilCharOrCharBytecode(&orig);
+ CreatePeepholeSkipUntilCharOrCharBytecode(&opt);
+
+ Handle<String> source = factory->NewStringFromStaticChars("dummy");
+
+ i::FLAG_regexp_peephole_optimization = false;
+ Handle<ByteArray> array = Handle<ByteArray>::cast(orig.GetCode(source));
+ int length = array->length();
+
+ i::FLAG_regexp_peephole_optimization = true;
+ Handle<ByteArray> array_optimized =
+ Handle<ByteArray>::cast(opt.GetCode(source));
+ int length_optimized = array_optimized->length();
+
+ int length_expected = RegExpBytecodeLength(BC_LOAD_CURRENT_CHAR) +
+ RegExpBytecodeLength(BC_CHECK_CHAR) +
+ RegExpBytecodeLength(BC_CHECK_CHAR) +
+ RegExpBytecodeLength(BC_ADVANCE_CP_AND_GOTO) +
+ RegExpBytecodeLength(BC_POP_BT);
+ int length_optimized_expected =
+ RegExpBytecodeLength(BC_SKIP_UNTIL_CHAR_OR_CHAR) +
+ RegExpBytecodeLength(BC_POP_BT);
+
+ CHECK_EQ(length, length_expected);
+ CHECK_EQ(length_optimized, length_optimized_expected);
+
+ CHECK_EQ(BC_SKIP_UNTIL_CHAR_OR_CHAR, array_optimized->get(0));
+ CHECK_EQ(BC_POP_BT, array_optimized->get(
+ RegExpBytecodeLength(BC_SKIP_UNTIL_CHAR_OR_CHAR)));
+}
+
+void CreatePeepholeSkipUntilGtOrNotBitInTableBytecode(RegExpMacroAssembler* m,
+ Factory* factory) {
+ Handle<ByteArray> bit_table = factory->NewByteArray(
+ RegExpMacroAssembler::kTableSize, AllocationType::kOld);
+ for (uint32_t i = 0; i < RegExpMacroAssembler::kTableSize; i++) {
+ bit_table->set(i, 0);
+ }
+
+ Label start, end, advance;
+ m->Bind(&start);
+ m->LoadCurrentCharacter(0, nullptr, true);
+ m->CheckCharacterGT('x', nullptr);
+ m->CheckBitInTable(bit_table, &advance);
+ m->GoTo(&end);
+ m->Bind(&advance);
+ m->AdvanceCurrentPosition(1);
+ m->GoTo(&start);
+ m->Bind(&end);
+}
+
+TEST(PeepholeSkipUntilGtOrNotBitInTable) {
+ Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ HandleScope scope(isolate);
+
+ RegExpBytecodeGenerator orig(CcTest::i_isolate(), &zone);
+ RegExpBytecodeGenerator opt(CcTest::i_isolate(), &zone);
+
+ CreatePeepholeSkipUntilGtOrNotBitInTableBytecode(&orig, factory);
+ CreatePeepholeSkipUntilGtOrNotBitInTableBytecode(&opt, factory);
+
+ Handle<String> source = factory->NewStringFromStaticChars("dummy");
+
+ i::FLAG_regexp_peephole_optimization = false;
+ Handle<ByteArray> array = Handle<ByteArray>::cast(orig.GetCode(source));
+ int length = array->length();
+
+ i::FLAG_regexp_peephole_optimization = true;
+ Handle<ByteArray> array_optimized =
+ Handle<ByteArray>::cast(opt.GetCode(source));
+ int length_optimized = array_optimized->length();
+
+ int length_expected = RegExpBytecodeLength(BC_LOAD_CURRENT_CHAR) +
+ RegExpBytecodeLength(BC_CHECK_GT) +
+ RegExpBytecodeLength(BC_CHECK_BIT_IN_TABLE) +
+ RegExpBytecodeLength(BC_GOTO) +
+ RegExpBytecodeLength(BC_ADVANCE_CP_AND_GOTO) +
+ RegExpBytecodeLength(BC_POP_BT);
+ int length_optimized_expected =
+ RegExpBytecodeLength(BC_SKIP_UNTIL_GT_OR_NOT_BIT_IN_TABLE) +
+ RegExpBytecodeLength(BC_POP_BT);
+
+ CHECK_EQ(length, length_expected);
+ CHECK_EQ(length_optimized, length_optimized_expected);
+
+ CHECK_EQ(BC_SKIP_UNTIL_GT_OR_NOT_BIT_IN_TABLE, array_optimized->get(0));
+ CHECK_EQ(BC_POP_BT, array_optimized->get(RegExpBytecodeLength(
+ BC_SKIP_UNTIL_GT_OR_NOT_BIT_IN_TABLE)));
+}
+
+void CreatePeepholeLabelFixupsInsideBytecode(RegExpMacroAssembler* m,
+ Label* dummy_before,
+ Label* dummy_after,
+ Label* dummy_inside) {
+ Label loop;
+ m->Bind(dummy_before);
+ m->LoadCurrentCharacter(0, dummy_before);
+ m->CheckCharacter('a', dummy_after);
+ m->CheckCharacter('b', dummy_inside);
+ m->Bind(&loop);
+ m->LoadCurrentCharacter(0, nullptr, true);
+ m->CheckCharacter('x', nullptr);
+ m->Bind(dummy_inside);
+ m->CheckCharacter('y', nullptr);
+ m->AdvanceCurrentPosition(1);
+ m->GoTo(&loop);
+ m->Bind(dummy_after);
+ m->LoadCurrentCharacter(0, dummy_before);
+ m->CheckCharacter('a', dummy_after);
+ m->CheckCharacter('b', dummy_inside);
+}
+
+TEST(PeepholeLabelFixupsInside) {
+ Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ HandleScope scope(isolate);
+
+ RegExpBytecodeGenerator orig(CcTest::i_isolate(), &zone);
+ RegExpBytecodeGenerator opt(CcTest::i_isolate(), &zone);
+
+ {
+ Label dummy_before, dummy_after, dummy_inside;
+ CreatePeepholeLabelFixupsInsideBytecode(&opt, &dummy_before, &dummy_after,
+ &dummy_inside);
+ }
+ Label dummy_before, dummy_after, dummy_inside;
+ CreatePeepholeLabelFixupsInsideBytecode(&orig, &dummy_before, &dummy_after,
+ &dummy_inside);
+
+ CHECK_EQ(0x00, dummy_before.pos());
+ CHECK_EQ(0x28, dummy_inside.pos());
+ CHECK_EQ(0x38, dummy_after.pos());
+
+ const Label* labels[] = {&dummy_before, &dummy_after, &dummy_inside};
+ const int label_positions[4][3] = {
+ {0x04, 0x3C}, // dummy_before
+ {0x0C, 0x44}, // dummy after
+ {0x14, 0x4C} // dummy inside
+ };
+
+ Handle<String> source = factory->NewStringFromStaticChars("dummy");
+
+ i::FLAG_regexp_peephole_optimization = false;
+ Handle<ByteArray> array = Handle<ByteArray>::cast(orig.GetCode(source));
+
+ for (int label_idx = 0; label_idx < 3; label_idx++) {
+ for (int pos_idx = 0; pos_idx < 2; pos_idx++) {
+ CHECK_EQ(labels[label_idx]->pos(),
+ array->get(label_positions[label_idx][pos_idx]));
+ }
+ }
+
+ i::FLAG_regexp_peephole_optimization = true;
+ Handle<ByteArray> array_optimized =
+ Handle<ByteArray>::cast(opt.GetCode(source));
+
+ const int pos_fixups[] = {
+ 0, // Position before optimization should be unchanged.
+ 4, // Position after first replacement should be 4 (optimized size (20) -
+ // original size (32) + preserve length (16)).
+ };
+ const int target_fixups[] = {
+ 0, // dummy_before should be unchanged
+ 4, // dummy_inside should be 4
+ 4 // dummy_after should be 4
+ };
+
+ for (int label_idx = 0; label_idx < 3; label_idx++) {
+ for (int pos_idx = 0; pos_idx < 2; pos_idx++) {
+ int label_pos = label_positions[label_idx][pos_idx] + pos_fixups[pos_idx];
+ int jump_address = *reinterpret_cast<uint32_t*>(
+ array_optimized->GetDataStartAddress() + label_pos);
+ int expected_jump_address =
+ labels[label_idx]->pos() + target_fixups[label_idx];
+ CHECK_EQ(expected_jump_address, jump_address);
+ }
+ }
+}
+
+void CreatePeepholeLabelFixupsComplexBytecode(RegExpMacroAssembler* m,
+ Label* dummy_before,
+ Label* dummy_between,
+ Label* dummy_after,
+ Label* dummy_inside) {
+ Label loop1, loop2;
+ m->Bind(dummy_before);
+ m->LoadCurrentCharacter(0, dummy_before);
+ m->CheckCharacter('a', dummy_between);
+ m->CheckCharacter('b', dummy_after);
+ m->CheckCharacter('c', dummy_inside);
+ m->Bind(&loop1);
+ m->LoadCurrentCharacter(0, nullptr, true);
+ m->CheckCharacter('x', nullptr);
+ m->CheckCharacter('y', nullptr);
+ m->AdvanceCurrentPosition(1);
+ m->GoTo(&loop1);
+ m->Bind(dummy_between);
+ m->LoadCurrentCharacter(0, dummy_before);
+ m->CheckCharacter('a', dummy_between);
+ m->CheckCharacter('b', dummy_after);
+ m->CheckCharacter('c', dummy_inside);
+ m->Bind(&loop2);
+ m->LoadCurrentCharacter(0, nullptr, true);
+ m->CheckCharacter('x', nullptr);
+ m->Bind(dummy_inside);
+ m->CheckCharacter('y', nullptr);
+ m->AdvanceCurrentPosition(1);
+ m->GoTo(&loop2);
+ m->Bind(dummy_after);
+ m->LoadCurrentCharacter(0, dummy_before);
+ m->CheckCharacter('a', dummy_between);
+ m->CheckCharacter('b', dummy_after);
+ m->CheckCharacter('c', dummy_inside);
+}
+
+TEST(PeepholeLabelFixupsComplex) {
+ Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ HandleScope scope(isolate);
+
+ RegExpBytecodeGenerator orig(CcTest::i_isolate(), &zone);
+ RegExpBytecodeGenerator opt(CcTest::i_isolate(), &zone);
+
+ {
+ Label dummy_before, dummy_between, dummy_after, dummy_inside;
+ CreatePeepholeLabelFixupsComplexBytecode(
+ &opt, &dummy_before, &dummy_between, &dummy_after, &dummy_inside);
+ }
+ Label dummy_before, dummy_between, dummy_after, dummy_inside;
+ CreatePeepholeLabelFixupsComplexBytecode(&orig, &dummy_before, &dummy_between,
+ &dummy_after, &dummy_inside);
+
+ CHECK_EQ(0x00, dummy_before.pos());
+ CHECK_EQ(0x40, dummy_between.pos());
+ CHECK_EQ(0x70, dummy_inside.pos());
+ CHECK_EQ(0x80, dummy_after.pos());
+
+ const Label* labels[] = {&dummy_before, &dummy_between, &dummy_after,
+ &dummy_inside};
+ const int label_positions[4][3] = {
+ {0x04, 0x44, 0x84}, // dummy_before
+ {0x0C, 0x4C, 0x8C}, // dummy between
+ {0x14, 0x54, 0x94}, // dummy after
+ {0x1C, 0x5C, 0x9C} // dummy inside
+ };
+
+ Handle<String> source = factory->NewStringFromStaticChars("dummy");
+
+ i::FLAG_regexp_peephole_optimization = false;
+ Handle<ByteArray> array = Handle<ByteArray>::cast(orig.GetCode(source));
+
+ for (int label_idx = 0; label_idx < 4; label_idx++) {
+ for (int pos_idx = 0; pos_idx < 3; pos_idx++) {
+ CHECK_EQ(labels[label_idx]->pos(),
+ array->get(label_positions[label_idx][pos_idx]));
+ }
+ }
+
+ i::FLAG_regexp_peephole_optimization = true;
+ Handle<ByteArray> array_optimized =
+ Handle<ByteArray>::cast(opt.GetCode(source));
+
+ const int pos_fixups[] = {
+ 0, // Position before optimization should be unchanged.
+ -12, // Position after first replacement should be -12 (optimized size =
+ // 20 - 32 = original size).
+ -8 // Position after second replacement should be -8 (-12 from first
+ // optimization -12 from second optimization + 16 preserved
+ // bytecodes).
+ };
+ const int target_fixups[] = {
+ 0, // dummy_before should be unchanged
+ -12, // dummy_between should be -12
+ -8, // dummy_inside should be -8
+ -8 // dummy_after should be -8
+ };
+
+ for (int label_idx = 0; label_idx < 4; label_idx++) {
+ for (int pos_idx = 0; pos_idx < 3; pos_idx++) {
+ int label_pos = label_positions[label_idx][pos_idx] + pos_fixups[pos_idx];
+ int jump_address = *reinterpret_cast<uint32_t*>(
+ array_optimized->GetDataStartAddress() + label_pos);
+ int expected_jump_address =
+ labels[label_idx]->pos() + target_fixups[label_idx];
+ CHECK_EQ(expected_jump_address, jump_address);
+ }
+ }
+}
+
#undef CHECK_PARSE_ERROR
#undef CHECK_SIMPLE
#undef CHECK_MIN_MAX
diff --git a/deps/v8/test/cctest/test-roots.cc b/deps/v8/test/cctest/test-roots.cc
index d041903639..f3d1a56543 100644
--- a/deps/v8/test/cctest/test-roots.cc
+++ b/deps/v8/test/cctest/test-roots.cc
@@ -46,8 +46,8 @@ bool IsInitiallyMutable(Factory* factory, Address object_address) {
V(detached_contexts) \
V(dirty_js_finalization_groups) \
V(feedback_vectors_for_profiling_tools) \
+ V(shared_wasm_memories) \
V(materialized_objects) \
- V(noscript_shared_function_infos) \
V(public_symbol_table) \
V(retained_maps) \
V(retaining_path_targets) \
diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc
index 407437c4b1..e2ab996796 100644
--- a/deps/v8/test/cctest/test-serialize.cc
+++ b/deps/v8/test/cctest/test-serialize.cc
@@ -98,8 +98,7 @@ class TestSerializer {
return v8_isolate;
}
- static v8::Isolate* NewIsolateFromBlob(
- StartupBlobs& blobs) { // NOLINT(runtime/references)
+ static v8::Isolate* NewIsolateFromBlob(const StartupBlobs& blobs) {
SnapshotData startup_snapshot(blobs.startup);
SnapshotData read_only_snapshot(blobs.read_only);
ReadOnlyDeserializer read_only_deserializer(&read_only_snapshot);
@@ -204,8 +203,7 @@ Vector<const uint8_t> ConstructSource(Vector<const uint8_t> head,
source_length);
}
-static v8::Isolate* Deserialize(
- StartupBlobs& blobs) { // NOLINT(runtime/references)
+static v8::Isolate* Deserialize(const StartupBlobs& blobs) {
v8::Isolate* isolate = TestSerializer::NewIsolateFromBlob(blobs);
CHECK(isolate);
return isolate;
@@ -1522,7 +1520,8 @@ TEST(CodeSerializerWithProfiler) {
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
- isolate->compilation_cache()->Disable(); // Disable same-isolate code cache.
+ isolate->compilation_cache()
+ ->DisableScriptAndEval(); // Disable same-isolate code cache.
v8::HandleScope scope(CcTest::isolate());
@@ -1563,7 +1562,8 @@ TEST(CodeSerializerWithProfiler) {
void TestCodeSerializerOnePlusOneImpl(bool verify_builtins_count = true) {
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
- isolate->compilation_cache()->Disable(); // Disable same-isolate code cache.
+ isolate->compilation_cache()
+ ->DisableScriptAndEval(); // Disable same-isolate code cache.
v8::HandleScope scope(CcTest::isolate());
@@ -1676,7 +1676,8 @@ TEST(CodeSerializerPromotedToCompilationCache) {
TEST(CodeSerializerInternalizedString) {
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
- isolate->compilation_cache()->Disable(); // Disable same-isolate code cache.
+ isolate->compilation_cache()
+ ->DisableScriptAndEval(); // Disable same-isolate code cache.
v8::HandleScope scope(CcTest::isolate());
@@ -1734,7 +1735,8 @@ TEST(CodeSerializerInternalizedString) {
TEST(CodeSerializerLargeCodeObject) {
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
- isolate->compilation_cache()->Disable(); // Disable same-isolate code cache.
+ isolate->compilation_cache()
+ ->DisableScriptAndEval(); // Disable same-isolate code cache.
v8::HandleScope scope(CcTest::isolate());
@@ -1793,7 +1795,8 @@ TEST(CodeSerializerLargeCodeObjectWithIncrementalMarking) {
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
- isolate->compilation_cache()->Disable(); // Disable same-isolate code cache.
+ isolate->compilation_cache()
+ ->DisableScriptAndEval(); // Disable same-isolate code cache.
v8::HandleScope scope(CcTest::isolate());
@@ -1863,7 +1866,8 @@ TEST(CodeSerializerLargeStrings) {
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
Factory* f = isolate->factory();
- isolate->compilation_cache()->Disable(); // Disable same-isolate code cache.
+ isolate->compilation_cache()
+ ->DisableScriptAndEval(); // Disable same-isolate code cache.
v8::HandleScope scope(CcTest::isolate());
@@ -1919,7 +1923,8 @@ TEST(CodeSerializerThreeBigStrings) {
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
Factory* f = isolate->factory();
- isolate->compilation_cache()->Disable(); // Disable same-isolate code cache.
+ isolate->compilation_cache()
+ ->DisableScriptAndEval(); // Disable same-isolate code cache.
v8::HandleScope scope(CcTest::isolate());
@@ -2038,7 +2043,8 @@ class SerializerTwoByteResource : public v8::String::ExternalStringResource {
TEST(CodeSerializerExternalString) {
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
- isolate->compilation_cache()->Disable(); // Disable same-isolate code cache.
+ isolate->compilation_cache()
+ ->DisableScriptAndEval(); // Disable same-isolate code cache.
v8::HandleScope scope(CcTest::isolate());
@@ -2104,7 +2110,8 @@ TEST(CodeSerializerExternalString) {
TEST(CodeSerializerLargeExternalString) {
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
- isolate->compilation_cache()->Disable(); // Disable same-isolate code cache.
+ isolate->compilation_cache()
+ ->DisableScriptAndEval(); // Disable same-isolate code cache.
Factory* f = isolate->factory();
@@ -2164,7 +2171,8 @@ TEST(CodeSerializerLargeExternalString) {
TEST(CodeSerializerExternalScriptName) {
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
- isolate->compilation_cache()->Disable(); // Disable same-isolate code cache.
+ isolate->compilation_cache()
+ ->DisableScriptAndEval(); // Disable same-isolate code cache.
Factory* f = isolate->factory();
@@ -3606,13 +3614,13 @@ UNINITIALIZED_TEST(SnapshotCreatorIncludeGlobalProxy) {
// We can introduce new extensions, which could override functions already
// in the snapshot.
auto extension =
- base::make_unique<v8::Extension>("new extension",
- "function i() { return 24; }"
- "function j() { return 25; }"
- "let a = 26;"
- "try {"
- " if (o.p == 7) o.p++;"
- "} catch {}");
+ std::make_unique<v8::Extension>("new extension",
+ "function i() { return 24; }"
+ "function j() { return 25; }"
+ "let a = 26;"
+ "try {"
+ " if (o.p == 7) o.p++;"
+ "} catch {}");
extension->set_auto_enable(true);
v8::RegisterExtension(std::move(extension));
{
@@ -3900,7 +3908,7 @@ UNINITIALIZED_TEST(WeakArraySerializationInSnapshot) {
TEST(WeakArraySerializationInCodeCache) {
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
- isolate->compilation_cache()->Disable();
+ isolate->compilation_cache()->DisableScriptAndEval();
v8::HandleScope scope(CcTest::isolate());
@@ -3929,7 +3937,8 @@ TEST(CachedCompileFunctionInContext) {
DisableAlwaysOpt();
LocalContext env;
Isolate* isolate = CcTest::i_isolate();
- isolate->compilation_cache()->Disable(); // Disable same-isolate code cache.
+ isolate->compilation_cache()
+ ->DisableScriptAndEval(); // Disable same-isolate code cache.
v8::HandleScope scope(CcTest::isolate());
diff --git a/deps/v8/test/cctest/test-smi-lexicographic-compare.cc b/deps/v8/test/cctest/test-smi-lexicographic-compare.cc
index 914444c634..7b3e600c14 100644
--- a/deps/v8/test/cctest/test-smi-lexicographic-compare.cc
+++ b/deps/v8/test/cctest/test-smi-lexicographic-compare.cc
@@ -14,11 +14,11 @@ namespace internal {
namespace {
-void AddSigned(std::set<Smi>& smis, int64_t x) { // NOLINT(runtime/references)
+void AddSigned(std::set<Smi>* smis, int64_t x) {
if (!Smi::IsValid(x)) return;
- smis.insert(Smi::FromInt(static_cast<int>(x)));
- smis.insert(Smi::FromInt(static_cast<int>(-x)));
+ smis->insert(Smi::FromInt(static_cast<int>(x)));
+ smis->insert(Smi::FromInt(static_cast<int>(-x)));
}
// Uses std::lexicographical_compare twice to convert the result to -1, 0 or 1.
@@ -58,14 +58,14 @@ TEST(TestSmiLexicographicCompare) {
for (int64_t xb = 1; xb <= Smi::kMaxValue; xb *= 10) {
for (int64_t xf = 0; xf <= 9; ++xf) {
for (int64_t xo = -1; xo <= 1; ++xo) {
- AddSigned(smis, xb * xf + xo);
+ AddSigned(&smis, xb * xf + xo);
}
}
}
for (int64_t yb = 1; yb <= Smi::kMaxValue; yb *= 2) {
for (int64_t yo = -2; yo <= 2; ++yo) {
- AddSigned(smis, yb + yo);
+ AddSigned(&smis, yb + yo);
}
}
diff --git a/deps/v8/test/cctest/test-strings.cc b/deps/v8/test/cctest/test-strings.cc
index 796f38a73b..f64c044a0c 100644
--- a/deps/v8/test/cctest/test-strings.cc
+++ b/deps/v8/test/cctest/test-strings.cc
@@ -1857,6 +1857,48 @@ GC_INSIDE_NEW_STRING_FROM_UTF8_SUB_STRING(
#undef GC_INSIDE_NEW_STRING_FROM_UTF8_SUB_STRING
+namespace {
+
+struct IndexData {
+ const char* string;
+ bool is_array_index;
+ uint32_t array_index;
+ bool is_integer_index;
+ size_t integer_index;
+};
+
+void TestString(i::Isolate* isolate, const IndexData& data) {
+ Handle<String> s = isolate->factory()->NewStringFromAsciiChecked(data.string);
+ if (data.is_array_index) {
+ uint32_t index;
+ CHECK(s->AsArrayIndex(&index));
+ CHECK_EQ(data.array_index, index);
+ // AsArrayIndex only forces hash computation for cacheable indices;
+ // so trigger hash computation for longer strings manually.
+ if (s->length() > String::kMaxCachedArrayIndexLength) s->Hash();
+ CHECK_EQ(0, s->hash_field() & String::kIsNotArrayIndexMask);
+ CHECK(s->HasHashCode());
+ }
+ if (data.is_integer_index) {
+ size_t index;
+ CHECK(s->AsIntegerIndex(&index));
+ CHECK_EQ(data.integer_index, index);
+ s->Hash();
+ CHECK_EQ(0, s->hash_field() & String::kIsNotIntegerIndexMask);
+ CHECK(s->HasHashCode());
+ }
+ if (!s->HasHashCode()) s->Hash();
+ CHECK(s->HasHashCode());
+ if (!data.is_array_index) {
+ CHECK_NE(0, s->hash_field() & String::kIsNotArrayIndexMask);
+ }
+ if (!data.is_integer_index) {
+ CHECK_NE(0, s->hash_field() & String::kIsNotIntegerIndexMask);
+ }
+}
+
+} // namespace
+
TEST(HashArrayIndexStrings) {
CcTest::InitializeVM();
LocalContext context;
@@ -1870,6 +1912,27 @@ TEST(HashArrayIndexStrings) {
CHECK_EQ(StringHasher::MakeArrayIndexHash(1 /* value */, 1 /* length */) >>
Name::kHashShift,
isolate->factory()->one_string()->Hash());
+
+ IndexData tests[] = {
+ {"", false, 0, false, 0},
+ {"123no", false, 0, false, 0},
+ {"12345", true, 12345, true, 12345},
+ {"12345678", true, 12345678, true, 12345678},
+ {"4294967294", true, 4294967294u, true, 4294967294u},
+#if V8_TARGET_ARCH_32_BIT
+ {"4294967295", false, 0, false, 0}, // Valid length but not index.
+ {"4294967296", false, 0, false, 0},
+ {"18446744073709551615", false, 0, false, 0},
+#else
+ {"4294967295", false, 0, true, 4294967295u},
+ {"4294967296", false, 0, true, 4294967296ull},
+ {"18446744073709551615", false, 0, true, 18446744073709551615ull},
+#endif
+ {"18446744073709551616", false, 0, false, 0}
+ };
+ for (int i = 0, n = arraysize(tests); i < n; i++) {
+ TestString(isolate, tests[i]);
+ }
}
TEST(StringEquals) {
diff --git a/deps/v8/test/cctest/test-threads.cc b/deps/v8/test/cctest/test-threads.cc
index 2062724043..5c852d7232 100644
--- a/deps/v8/test/cctest/test-threads.cc
+++ b/deps/v8/test/cctest/test-threads.cc
@@ -75,7 +75,7 @@ TEST(ThreadIdValidation) {
ThreadIdValidationThread* prev =
i == kNThreads - 1 ? nullptr : threads[i + 1].get();
threads[i] =
- base::make_unique<ThreadIdValidationThread>(prev, refs, i, &semaphore);
+ std::make_unique<ThreadIdValidationThread>(prev, refs, i, &semaphore);
}
CHECK(threads[0]->Start());
for (int i = 0; i < kNThreads; i++) {
diff --git a/deps/v8/test/cctest/test-trace-event.cc b/deps/v8/test/cctest/test-trace-event.cc
index 7b3c215d69..0f4a699d8a 100644
--- a/deps/v8/test/cctest/test-trace-event.cc
+++ b/deps/v8/test/cctest/test-trace-event.cc
@@ -6,7 +6,6 @@
#include "src/init/v8.h"
-#include "src/base/template-utils.h"
#include "test/cctest/cctest.h"
#include "src/tracing/trace-event.h"
@@ -55,9 +54,8 @@ class MockTracingController : public v8::TracingController {
const uint64_t* arg_values,
std::unique_ptr<v8::ConvertableToTraceFormat>* arg_convertables,
unsigned int flags, int64_t timestamp) override {
- std::unique_ptr<MockTraceObject> to =
- v8::base::make_unique<MockTraceObject>(
- phase, std::string(name), id, bind_id, num_args, flags, timestamp);
+ std::unique_ptr<MockTraceObject> to = std::make_unique<MockTraceObject>(
+ phase, std::string(name), id, bind_id, num_args, flags, timestamp);
trace_objects_.push_back(std::move(to));
return 0;
}
diff --git a/deps/v8/test/cctest/test-typedarrays.cc b/deps/v8/test/cctest/test-typedarrays.cc
index fb4740cb92..5e715b75e0 100644
--- a/deps/v8/test/cctest/test-typedarrays.cc
+++ b/deps/v8/test/cctest/test-typedarrays.cc
@@ -7,6 +7,7 @@
#include "src/init/v8.h"
#include "test/cctest/cctest.h"
+#include "src/execution/protectors-inl.h"
#include "src/heap/heap.h"
#include "src/objects/objects-inl.h"
#include "src/objects/objects.h"
@@ -14,12 +15,11 @@
namespace v8 {
namespace internal {
-void TestArrayBufferViewContents(
- LocalContext& env, // NOLINT(runtime/references)
- bool should_use_buffer) {
+void TestArrayBufferViewContents(LocalContext* env, bool should_use_buffer) {
v8::Local<v8::Object> obj_a = v8::Local<v8::Object>::Cast(
- env->Global()
- ->Get(env->GetIsolate()->GetCurrentContext(), v8_str("a"))
+ (*env)
+ ->Global()
+ ->Get((*env)->GetIsolate()->GetCurrentContext(), v8_str("a"))
.ToLocalChecked());
CHECK(obj_a->IsArrayBufferView());
v8::Local<v8::ArrayBufferView> array_buffer_view =
@@ -43,7 +43,7 @@ TEST(CopyContentsTypedArray) {
"a[1] = 1;"
"a[2] = 2;"
"a[3] = 3;");
- TestArrayBufferViewContents(env, false);
+ TestArrayBufferViewContents(&env, false);
}
@@ -51,7 +51,7 @@ TEST(CopyContentsArray) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
CompileRun("var a = new Uint8Array([0, 1, 2, 3]);");
- TestArrayBufferViewContents(env, false);
+ TestArrayBufferViewContents(&env, false);
}
@@ -68,7 +68,7 @@ TEST(CopyContentsView) {
"c[4] = 2;"
"c[5] = 3;"
"var a = new DataView(b, 2);");
- TestArrayBufferViewContents(env, true);
+ TestArrayBufferViewContents(&env, true);
}
@@ -82,7 +82,7 @@ TEST(AllocateNotExternal) {
v8::ArrayBuffer::New(env->GetIsolate(), memory, 1024,
v8::ArrayBufferCreationMode::kInternalized);
CHECK(!buffer->IsExternal());
- CHECK_EQ(memory, buffer->GetContents().Data());
+ CHECK_EQ(memory, buffer->GetBackingStore()->Data());
}
void TestSpeciesProtector(char* code,
@@ -115,12 +115,12 @@ void TestSpeciesProtector(char* code,
v8::internal::Isolate* i_isolate =
reinterpret_cast<v8::internal::Isolate*>(isolate);
- CHECK(i_isolate->IsTypedArraySpeciesLookupChainIntact());
+ CHECK(Protectors::IsTypedArraySpeciesLookupChainIntact(i_isolate));
CompileRun(code);
if (invalidates_species_protector) {
- CHECK(!i_isolate->IsTypedArraySpeciesLookupChainIntact());
+ CHECK(!Protectors::IsTypedArraySpeciesLookupChainIntact(i_isolate));
} else {
- CHECK(i_isolate->IsTypedArraySpeciesLookupChainIntact());
+ CHECK(Protectors::IsTypedArraySpeciesLookupChainIntact(i_isolate));
}
v8::Local<v8::Value> my_typed_array = CompileRun("MyTypedArray");
diff --git a/deps/v8/test/cctest/test-unboxed-doubles.cc b/deps/v8/test/cctest/test-unboxed-doubles.cc
index 9cfc40d37d..ebeb05597e 100644
--- a/deps/v8/test/cctest/test-unboxed-doubles.cc
+++ b/deps/v8/test/cctest/test-unboxed-doubles.cc
@@ -78,8 +78,9 @@ static double GetDoubleFieldValue(JSObject obj, FieldIndex field_index) {
}
}
-void WriteToField(JSObject object, int descriptor, Object value) {
+void WriteToField(JSObject object, int index, Object value) {
DescriptorArray descriptors = object.map().instance_descriptors();
+ InternalIndex descriptor(index);
PropertyDetails details = descriptors.GetDetails(descriptor);
object.WriteToField(descriptor, details, value);
}
@@ -811,7 +812,7 @@ static Handle<LayoutDescriptor> TestLayoutDescriptorAppendIfFastOrUseFull(
Handle<Map> map;
// Now check layout descriptors of all intermediate maps.
for (int i = 0; i < number_of_descriptors; i++) {
- PropertyDetails details = descriptors->GetDetails(i);
+ PropertyDetails details = descriptors->GetDetails(InternalIndex(i));
map = maps[i];
LayoutDescriptor layout_desc = map->layout_descriptor();
@@ -962,7 +963,7 @@ TEST(Regress436816) {
CHECK(fake_object.IsHeapObject());
uint64_t boom_value = bit_cast<uint64_t>(fake_object);
- for (int i = 0; i < kPropsCount; i++) {
+ for (InternalIndex i : InternalIndex::Range(kPropsCount)) {
FieldIndex index = FieldIndex::ForDescriptor(*map, i);
CHECK(map->IsUnboxedDoubleField(index));
object->RawFastDoublePropertyAsBitsAtPut(index, boom_value);
@@ -1100,7 +1101,7 @@ TEST(DoScavenge) {
{
// Ensure the object is properly set up.
- FieldIndex field_index = FieldIndex::ForDescriptor(*map, 0);
+ FieldIndex field_index = FieldIndex::ForDescriptor(*map, InternalIndex(0));
CHECK(field_index.is_inobject() && field_index.is_double());
CHECK_EQ(FLAG_unbox_double_fields, map->IsUnboxedDoubleField(field_index));
CHECK_EQ(42.5, GetDoubleFieldValue(*obj, field_index));
@@ -1119,7 +1120,8 @@ TEST(DoScavenge) {
Address fake_object = temp->ptr() + kSystemPointerSize;
double boom_value = bit_cast<double>(fake_object);
- FieldIndex field_index = FieldIndex::ForDescriptor(obj->map(), 0);
+ FieldIndex field_index =
+ FieldIndex::ForDescriptor(obj->map(), InternalIndex(0));
auto boom_number = factory->NewHeapNumber(boom_value);
obj->FastPropertyAtPut(field_index, *boom_number);
@@ -1182,12 +1184,12 @@ TEST(DoScavengeWithIncrementalWriteBarrier) {
{
// Ensure the object is properly set up.
- FieldIndex field_index = FieldIndex::ForDescriptor(*map, 0);
+ FieldIndex field_index = FieldIndex::ForDescriptor(*map, InternalIndex(0));
CHECK(field_index.is_inobject() && field_index.is_double());
CHECK_EQ(FLAG_unbox_double_fields, map->IsUnboxedDoubleField(field_index));
CHECK_EQ(42.5, GetDoubleFieldValue(*obj, field_index));
- field_index = FieldIndex::ForDescriptor(*map, 1);
+ field_index = FieldIndex::ForDescriptor(*map, InternalIndex(1));
CHECK(field_index.is_inobject() && !field_index.is_double());
CHECK(!map->IsUnboxedDoubleField(field_index));
}
@@ -1225,7 +1227,7 @@ TEST(DoScavengeWithIncrementalWriteBarrier) {
// |obj_value| must be evacuated.
CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(*obj_value));
- FieldIndex field_index = FieldIndex::ForDescriptor(*map, 1);
+ FieldIndex field_index = FieldIndex::ForDescriptor(*map, InternalIndex(1));
CHECK_EQ(*obj_value, obj->RawFastPropertyAt(field_index));
}
@@ -1248,7 +1250,7 @@ static void TestLayoutDescriptorHelper(Isolate* isolate,
int end_offset = instance_size * 2;
int first_non_tagged_field_offset = end_offset;
- for (int i = 0; i < number_of_descriptors; i++) {
+ for (InternalIndex i : InternalIndex::Range(number_of_descriptors)) {
PropertyDetails details = descriptors->GetDetails(i);
if (details.location() != kField) continue;
FieldIndex index = FieldIndex::ForDescriptor(*map, i);
@@ -1430,9 +1432,9 @@ TEST(LayoutDescriptorSharing) {
CHECK(map2->layout_descriptor().IsConsistentWithMap(*map2, true));
}
-
static void TestWriteBarrier(Handle<Map> map, Handle<Map> new_map,
- int tagged_descriptor, int double_descriptor,
+ InternalIndex tagged_descriptor,
+ InternalIndex double_descriptor,
bool check_tagged_value = true) {
FLAG_stress_compaction = true;
FLAG_manual_evacuation_candidates_selection = true;
@@ -1491,10 +1493,9 @@ static void TestWriteBarrier(Handle<Map> map, Handle<Map> new_map,
CHECK_EQ(boom_value, obj->RawFastDoublePropertyAsBitsAt(double_field_index));
}
-
static void TestIncrementalWriteBarrier(Handle<Map> map, Handle<Map> new_map,
- int tagged_descriptor,
- int double_descriptor,
+ InternalIndex tagged_descriptor,
+ InternalIndex double_descriptor,
bool check_tagged_value = true) {
if (FLAG_never_compact || !FLAG_incremental_marking) return;
ManualGCScope manual_gc_scope;
@@ -1607,14 +1608,16 @@ static void TestWriteBarrierObjectShiftFieldsRight(
.ToHandleChecked();
// Shift fields right by turning constant property to a field.
- Handle<Map> new_map = Map::ReconfigureProperty(
- isolate, map, 0, kData, NONE, Representation::Tagged(), any_type);
+ Handle<Map> new_map =
+ Map::ReconfigureProperty(isolate, map, InternalIndex(0), kData, NONE,
+ Representation::Tagged(), any_type);
if (write_barrier_kind == OLD_TO_NEW_WRITE_BARRIER) {
- TestWriteBarrier(map, new_map, 2, 1);
+ TestWriteBarrier(map, new_map, InternalIndex(2), InternalIndex(1));
} else {
CHECK_EQ(OLD_TO_OLD_WRITE_BARRIER, write_barrier_kind);
- TestIncrementalWriteBarrier(map, new_map, 2, 1);
+ TestIncrementalWriteBarrier(map, new_map, InternalIndex(2),
+ InternalIndex(1));
}
}
diff --git a/deps/v8/test/cctest/torque/test-torque.cc b/deps/v8/test/cctest/torque/test-torque.cc
index 184a867946..5cf70f3374 100644
--- a/deps/v8/test/cctest/torque/test-torque.cc
+++ b/deps/v8/test/cctest/torque/test-torque.cc
@@ -26,7 +26,6 @@ namespace compiler {
namespace {
-using Label = CodeAssemblerLabel;
using Variable = CodeAssemblerVariable;
class TestTorqueAssembler : public CodeStubAssembler {
diff --git a/deps/v8/test/cctest/wasm/OWNERS b/deps/v8/test/cctest/wasm/OWNERS
index dc68b39733..16b08f3b3b 100644
--- a/deps/v8/test/cctest/wasm/OWNERS
+++ b/deps/v8/test/cctest/wasm/OWNERS
@@ -1,5 +1,5 @@
ahaas@chromium.org
-clemensh@chromium.org
+clemensb@chromium.org
titzer@chromium.org
# COMPONENT: Blink>JavaScript>WebAssembly
diff --git a/deps/v8/test/cctest/wasm/test-grow-memory.cc b/deps/v8/test/cctest/wasm/test-grow-memory.cc
new file mode 100644
index 0000000000..a188707cae
--- /dev/null
+++ b/deps/v8/test/cctest/wasm/test-grow-memory.cc
@@ -0,0 +1,131 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/wasm-objects-inl.h"
+#include "src/wasm/wasm-opcodes.h"
+
+#include "src/wasm/wasm-module-builder.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/manually-externalized-buffer.h"
+#include "test/common/wasm/flag-utils.h"
+#include "test/common/wasm/test-signatures.h"
+#include "test/common/wasm/wasm-macro-gen.h"
+#include "test/common/wasm/wasm-module-runner.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+namespace test_grow_memory {
+
+using testing::CompileAndInstantiateForTesting;
+using v8::internal::testing::ManuallyExternalizedBuffer;
+
+namespace {
+void ExportAsMain(WasmFunctionBuilder* f) {
+ f->builder()->AddExport(CStrVector("main"), f);
+}
+#define EMIT_CODE_WITH_END(f, code) \
+ do { \
+ f->EmitCode(code, sizeof(code)); \
+ f->Emit(kExprEnd); \
+ } while (false)
+
+void Cleanup(Isolate* isolate = CcTest::InitIsolateOnce()) {
+ // By sending a low memory notifications, we will try hard to collect all
+ // garbage and will therefore also invoke all weak callbacks of actually
+ // unreachable persistent handles.
+ reinterpret_cast<v8::Isolate*>(isolate)->LowMemoryNotification();
+}
+} // namespace
+
+TEST(GrowMemDetaches) {
+ {
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ HandleScope scope(isolate);
+ Handle<WasmMemoryObject> memory_object =
+ WasmMemoryObject::New(isolate, 16, 100, SharedFlag::kNotShared)
+ .ToHandleChecked();
+ Handle<JSArrayBuffer> buffer(memory_object->array_buffer(), isolate);
+ int32_t result = WasmMemoryObject::Grow(isolate, memory_object, 0);
+ CHECK_EQ(16, result);
+ CHECK_NE(*buffer, memory_object->array_buffer());
+ CHECK(buffer->was_detached());
+ }
+ Cleanup();
+}
+
+TEST(Externalized_GrowMemMemSize) {
+ {
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ HandleScope scope(isolate);
+ Handle<WasmMemoryObject> memory_object =
+ WasmMemoryObject::New(isolate, 16, 100, SharedFlag::kNotShared)
+ .ToHandleChecked();
+ ManuallyExternalizedBuffer external(
+ handle(memory_object->array_buffer(), isolate));
+ int32_t result = WasmMemoryObject::Grow(isolate, memory_object, 0);
+ CHECK_EQ(16, result);
+ CHECK_NE(*external.buffer_, memory_object->array_buffer());
+ CHECK(external.buffer_->was_detached());
+ }
+ Cleanup();
+}
+
+TEST(Run_WasmModule_Buffer_Externalized_GrowMem) {
+ {
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ HandleScope scope(isolate);
+ TestSignatures sigs;
+ v8::internal::AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+
+ WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
+ WasmFunctionBuilder* f = builder->AddFunction(sigs.i_v());
+ ExportAsMain(f);
+ byte code[] = {WASM_GROW_MEMORY(WASM_I32V_1(6)), WASM_DROP,
+ WASM_MEMORY_SIZE};
+ EMIT_CODE_WITH_END(f, code);
+
+ ZoneBuffer buffer(&zone);
+ builder->WriteTo(&buffer);
+ testing::SetupIsolateForWasmModule(isolate);
+ ErrorThrower thrower(isolate, "Test");
+ const Handle<WasmInstanceObject> instance =
+ CompileAndInstantiateForTesting(
+ isolate, &thrower, ModuleWireBytes(buffer.begin(), buffer.end()))
+ .ToHandleChecked();
+ Handle<WasmMemoryObject> memory_object(instance->memory_object(), isolate);
+
+ // Fake the Embedder flow by externalizing the array buffer.
+ ManuallyExternalizedBuffer external1(
+ handle(memory_object->array_buffer(), isolate));
+
+ // Grow using the API.
+ uint32_t result = WasmMemoryObject::Grow(isolate, memory_object, 4);
+ CHECK_EQ(16, result);
+ CHECK(external1.buffer_->was_detached()); // growing always detaches
+ CHECK_EQ(0, external1.buffer_->byte_length());
+
+ CHECK_NE(*external1.buffer_, memory_object->array_buffer());
+
+ // Fake the Embedder flow by externalizing the array buffer.
+ ManuallyExternalizedBuffer external2(
+ handle(memory_object->array_buffer(), isolate));
+
+ // Grow using an internal WASM bytecode.
+ result = testing::RunWasmModuleForTesting(isolate, instance, 0, nullptr);
+ CHECK_EQ(26, result);
+ CHECK(external2.buffer_->was_detached()); // growing always detaches
+ CHECK_EQ(0, external2.buffer_->byte_length());
+ CHECK_NE(*external2.buffer_, memory_object->array_buffer());
+ }
+ Cleanup();
+}
+
+} // namespace test_grow_memory
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#undef EMIT_CODE_WITH_END
diff --git a/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc b/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc
index 556d74daef..d3aa75a64e 100644
--- a/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc
+++ b/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc
@@ -36,7 +36,7 @@ constexpr size_t kThunkBufferSize = AssemblerBase::kMinimalBufferSize;
#if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64
constexpr uint32_t kAvailableBufferSlots =
- (kMaxWasmCodeMemory - kJumpTableSize) / kThunkBufferSize;
+ (kMaxWasmCodeSpaceSize - kJumpTableSize) / kThunkBufferSize;
constexpr uint32_t kBufferSlotStartOffset =
RoundUp<kThunkBufferSize>(kJumpTableSize);
#else
@@ -49,7 +49,7 @@ Address AllocateJumpTableThunk(
std::vector<std::unique_ptr<TestingAssemblerBuffer>>* thunk_buffers) {
#if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64
// To guarantee that the branch range lies within the near-call range,
- // generate the thunk in the same (kMaxWasmCodeMemory-sized) buffer as the
+ // generate the thunk in the same (kMaxWasmCodeSpaceSize-sized) buffer as the
// jump_target itself.
//
// Allocate a slot that we haven't already used. This is necessary because
@@ -181,11 +181,13 @@ class JumpTablePatcher : public v8::base::Thread {
// Then, repeatedly patch the jump table to jump to one of the two thunks.
constexpr int kNumberOfPatchIterations = 64;
for (int i = 0; i < kNumberOfPatchIterations; ++i) {
- TRACE(" patcher %p patch slot " V8PRIxPTR_FMT " to thunk #%d\n", this,
- slot_address, i % 2);
+ TRACE(" patcher %p patch slot " V8PRIxPTR_FMT
+ " to thunk #%d (" V8PRIxPTR_FMT ")\n",
+ this, slot_address, i % 2, thunks_[i % 2]);
base::MutexGuard jump_table_guard(jump_table_mutex_);
JumpTableAssembler::PatchJumpTableSlot(
- slot_start_, slot_index_, thunks_[i % 2], WasmCode::kFlushICache);
+ slot_start_ + JumpTableAssembler::JumpSlotIndexToOffset(slot_index_),
+ kNullAddress, thunks_[i % 2]);
}
TRACE("Patcher %p is stopping ...\n", this);
}
@@ -219,11 +221,8 @@ TEST(JumpTablePatchingStress) {
// is not reliable enough to guarantee that we can always achieve this with
// separate allocations, so for Arm64 we generate all code in a single
// kMaxMasmCodeMemory-sized chunk.
- //
- // TODO(wasm): Currently {kMaxWasmCodeMemory} limits code sufficiently, so
- // that the jump table only supports {near_call} distances.
- STATIC_ASSERT(kMaxWasmCodeMemory >= kJumpTableSize);
- auto buffer = AllocateAssemblerBuffer(kMaxWasmCodeMemory);
+ STATIC_ASSERT(kMaxWasmCodeSpaceSize >= kJumpTableSize);
+ auto buffer = AllocateAssemblerBuffer(kMaxWasmCodeSpaceSize);
byte* thunk_slot_buffer = buffer->start() + kBufferSlotStartOffset;
#else
auto buffer = AllocateAssemblerBuffer(kJumpTableSize);
@@ -242,8 +241,9 @@ TEST(JumpTablePatchingStress) {
std::vector<std::unique_ptr<TestingAssemblerBuffer>> thunk_buffers;
// Patch the jump table slot to jump to itself. This will later be patched
// by the patchers.
- JumpTableAssembler::PatchJumpTableSlot(
- slot_start, slot, slot_start + slot_offset, WasmCode::kFlushICache);
+ Address slot_addr =
+ slot_start + JumpTableAssembler::JumpSlotIndexToOffset(slot);
+ JumpTableAssembler::PatchJumpTableSlot(slot_addr, kNullAddress, slot_addr);
// For each patcher, generate two thunks where this patcher can emit code
// which finally jumps back to {slot} in the jump table.
std::vector<Address> patcher_thunks;
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-64.cc b/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
index 3f96f8720f..09d1eb7fda 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
@@ -1502,7 +1502,7 @@ static void CompileCallIndirectMany(ExecutionTier tier, ValueType param) {
std::vector<byte> code;
for (byte p = 0; p < num_params; p++) {
- ADD_CODE(code, kExprGetLocal, p);
+ ADD_CODE(code, kExprLocalGet, p);
}
ADD_CODE(code, kExprI32Const, 0);
ADD_CODE(code, kExprCallIndirect, 1, TABLE_ZERO);
@@ -1563,7 +1563,7 @@ static void Run_WasmMixedCall_N(ExecutionTier execution_tier, int start) {
// Store the result in a local.
byte local_index = r.AllocateLocal(ValueTypes::ValueTypeFor(result));
- ADD_CODE(code, kExprSetLocal, local_index);
+ ADD_CODE(code, kExprLocalSet, local_index);
// Store the result in memory.
ADD_CODE(code,
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-bulk-memory.cc b/deps/v8/test/cctest/wasm/test-run-wasm-bulk-memory.cc
index e794c00ece..d2ac3434df 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-bulk-memory.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-bulk-memory.cc
@@ -13,11 +13,10 @@ namespace wasm {
namespace test_run_wasm_bulk_memory {
namespace {
-void CheckMemoryEquals(
- TestingModuleBuilder& builder, // NOLINT(runtime/references)
- size_t index, const std::vector<byte>& expected) {
- const byte* mem_start = builder.raw_mem_start<byte>();
- const byte* mem_end = builder.raw_mem_end<byte>();
+void CheckMemoryEquals(TestingModuleBuilder* builder, size_t index,
+ const std::vector<byte>& expected) {
+ const byte* mem_start = builder->raw_mem_start<byte>();
+ const byte* mem_end = builder->raw_mem_end<byte>();
size_t mem_size = mem_end - mem_start;
CHECK_LE(index, mem_size);
CHECK_LE(index + expected.size(), mem_size);
@@ -26,11 +25,10 @@ void CheckMemoryEquals(
}
}
-void CheckMemoryEqualsZero(
- TestingModuleBuilder& builder, // NOLINT(runtime/references)
- size_t index, size_t length) {
- const byte* mem_start = builder.raw_mem_start<byte>();
- const byte* mem_end = builder.raw_mem_end<byte>();
+void CheckMemoryEqualsZero(TestingModuleBuilder* builder, size_t index,
+ size_t length) {
+ const byte* mem_start = builder->raw_mem_start<byte>();
+ const byte* mem_end = builder->raw_mem_end<byte>();
size_t mem_size = mem_end - mem_start;
CHECK_LE(index, mem_size);
CHECK_LE(index + length, mem_size);
@@ -39,12 +37,11 @@ void CheckMemoryEqualsZero(
}
}
-void CheckMemoryEqualsFollowedByZeroes(
- TestingModuleBuilder& builder, // NOLINT(runtime/references)
- const std::vector<byte>& expected) {
+void CheckMemoryEqualsFollowedByZeroes(TestingModuleBuilder* builder,
+ const std::vector<byte>& expected) {
CheckMemoryEquals(builder, 0, expected);
CheckMemoryEqualsZero(builder, expected.size(),
- builder.mem_size() - expected.size());
+ builder->mem_size() - expected.size());
}
} // namespace
@@ -60,24 +57,24 @@ WASM_EXEC_TEST(MemoryInit) {
kExprI32Const, 0);
// All zeroes.
- CheckMemoryEqualsZero(r.builder(), 0, kWasmPageSize);
+ CheckMemoryEqualsZero(&r.builder(), 0, kWasmPageSize);
// Copy all bytes from data segment 0, to memory at [10, 20).
CHECK_EQ(0, r.Call(10, 0, 10));
CheckMemoryEqualsFollowedByZeroes(
- r.builder(),
+ &r.builder(),
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
// Copy bytes in range [5, 10) from data segment 0, to memory at [0, 5).
CHECK_EQ(0, r.Call(0, 5, 5));
CheckMemoryEqualsFollowedByZeroes(
- r.builder(),
+ &r.builder(),
{5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
// Copy 0 bytes does nothing.
CHECK_EQ(0, r.Call(10, 1, 0));
CheckMemoryEqualsFollowedByZeroes(
- r.builder(),
+ &r.builder(),
{5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
// Copy 0 at end of memory region or data segment is OK.
@@ -100,12 +97,12 @@ WASM_EXEC_TEST(MemoryInitOutOfBoundsData) {
// Write all values up to the out-of-bounds write.
CHECK_EQ(0xDEADBEEF, r.Call(kWasmPageSize - 5, 0, 6));
- CheckMemoryEquals(r.builder(), last_5_bytes, {0, 1, 2, 3, 4});
+ CheckMemoryEquals(&r.builder(), last_5_bytes, {0, 1, 2, 3, 4});
// Write all values up to the out-of-bounds read.
r.builder().BlankMemory();
CHECK_EQ(0xDEADBEEF, r.Call(0, 5, 6));
- CheckMemoryEqualsFollowedByZeroes(r.builder(), {5, 6, 7, 8, 9});
+ CheckMemoryEqualsFollowedByZeroes(&r.builder(), {5, 6, 7, 8, 9});
}
WASM_EXEC_TEST(MemoryInitOutOfBounds) {
@@ -155,13 +152,13 @@ WASM_EXEC_TEST(MemoryCopy) {
// Copy from [1, 8] to [10, 16].
CHECK_EQ(0, r.Call(10, 1, 8));
CheckMemoryEqualsFollowedByZeroes(
- r.builder(),
+ &r.builder(),
{0, 11, 22, 33, 44, 55, 66, 77, 0, 0, 11, 22, 33, 44, 55, 66, 77});
// Copy 0 bytes does nothing.
CHECK_EQ(0, r.Call(10, 2, 0));
CheckMemoryEqualsFollowedByZeroes(
- r.builder(),
+ &r.builder(),
{0, 11, 22, 33, 44, 55, 66, 77, 0, 0, 11, 22, 33, 44, 55, 66, 77});
// Copy 0 at end of memory region is OK.
@@ -184,12 +181,12 @@ WASM_EXEC_TEST(MemoryCopyOverlapping) {
// Copy from [0, 3] -> [2, 5]. The copy must not overwrite 30 before copying
// it (i.e. cannot copy forward in this case).
CHECK_EQ(0, r.Call(2, 0, 3));
- CheckMemoryEqualsFollowedByZeroes(r.builder(), {10, 20, 10, 20, 30});
+ CheckMemoryEqualsFollowedByZeroes(&r.builder(), {10, 20, 10, 20, 30});
// Copy from [2, 5] -> [0, 3]. The copy must not write the first 10 (i.e.
// cannot copy backward in this case).
CHECK_EQ(0, r.Call(0, 2, 3));
- CheckMemoryEqualsFollowedByZeroes(r.builder(), {10, 20, 30, 20, 30});
+ CheckMemoryEqualsFollowedByZeroes(&r.builder(), {10, 20, 30, 20, 30});
}
WASM_EXEC_TEST(MemoryCopyOutOfBoundsData) {
@@ -209,21 +206,21 @@ WASM_EXEC_TEST(MemoryCopyOutOfBoundsData) {
// Copy with source < destination. Copy would happen backwards,
// but the first byte to copy is out-of-bounds, so no data should be written.
CHECK_EQ(0xDEADBEEF, r.Call(last_5_bytes, 0, 6));
- CheckMemoryEquals(r.builder(), last_5_bytes, {0, 0, 0, 0, 0});
+ CheckMemoryEquals(&r.builder(), last_5_bytes, {0, 0, 0, 0, 0});
// Copy overlapping with destination < source. Copy will happen forwards, up
// to the out-of-bounds access.
r.builder().BlankMemory();
memcpy(mem + last_5_bytes, data, 5);
CHECK_EQ(0xDEADBEEF, r.Call(0, last_5_bytes, kWasmPageSize));
- CheckMemoryEquals(r.builder(), 0, {11, 22, 33, 44, 55});
+ CheckMemoryEquals(&r.builder(), 0, {11, 22, 33, 44, 55});
// Copy overlapping with source < destination. Copy would happen backwards,
// but the first byte to copy is out-of-bounds, so no data should be written.
r.builder().BlankMemory();
memcpy(mem, data, 5);
CHECK_EQ(0xDEADBEEF, r.Call(last_5_bytes, 0, kWasmPageSize));
- CheckMemoryEquals(r.builder(), last_5_bytes, {0, 0, 0, 0, 0});
+ CheckMemoryEquals(&r.builder(), last_5_bytes, {0, 0, 0, 0, 0});
}
WASM_EXEC_TEST(MemoryCopyOutOfBounds) {
@@ -265,15 +262,15 @@ WASM_EXEC_TEST(MemoryFill) {
WASM_MEMORY_FILL(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1), WASM_GET_LOCAL(2)),
kExprI32Const, 0);
CHECK_EQ(0, r.Call(1, 33, 5));
- CheckMemoryEqualsFollowedByZeroes(r.builder(), {0, 33, 33, 33, 33, 33});
+ CheckMemoryEqualsFollowedByZeroes(&r.builder(), {0, 33, 33, 33, 33, 33});
CHECK_EQ(0, r.Call(4, 66, 4));
- CheckMemoryEqualsFollowedByZeroes(r.builder(),
+ CheckMemoryEqualsFollowedByZeroes(&r.builder(),
{0, 33, 33, 33, 66, 66, 66, 66});
// Fill 0 bytes does nothing.
CHECK_EQ(0, r.Call(4, 66, 0));
- CheckMemoryEqualsFollowedByZeroes(r.builder(),
+ CheckMemoryEqualsFollowedByZeroes(&r.builder(),
{0, 33, 33, 33, 66, 66, 66, 66});
// Fill 0 at end of memory region is OK.
@@ -290,7 +287,7 @@ WASM_EXEC_TEST(MemoryFillValueWrapsToByte) {
kExprI32Const, 0);
CHECK_EQ(0, r.Call(0, 1000, 3));
const byte expected = 1000 & 255;
- CheckMemoryEqualsFollowedByZeroes(r.builder(),
+ CheckMemoryEqualsFollowedByZeroes(&r.builder(),
{expected, expected, expected});
}
@@ -304,7 +301,7 @@ WASM_EXEC_TEST(MemoryFillOutOfBoundsData) {
kExprI32Const, 0);
const byte v = 123;
CHECK_EQ(0xDEADBEEF, r.Call(kWasmPageSize - 5, v, 999));
- CheckMemoryEquals(r.builder(), kWasmPageSize - 6, {0, v, v, v, v, v});
+ CheckMemoryEquals(&r.builder(), kWasmPageSize - 6, {0, v, v, v, v, v});
}
WASM_EXEC_TEST(MemoryFillOutOfBounds) {
@@ -408,14 +405,13 @@ void CheckTable(Isolate* isolate, Handle<WasmTableObject> table, Args... args) {
template <typename WasmRunner, typename... Args>
void CheckTableCall(Isolate* isolate, Handle<WasmTableObject> table,
- WasmRunner& r, // NOLINT(runtime/references)
- uint32_t function_index, Args... args) {
+ WasmRunner* r, uint32_t function_index, Args... args) {
uint32_t args_length = static_cast<uint32_t>(sizeof...(args));
CHECK_EQ(table->current_length(), args_length);
double expected[] = {args...};
for (uint32_t i = 0; i < args_length; ++i) {
Handle<Object> buffer[] = {isolate->factory()->NewNumber(i)};
- r.CheckCallApplyViaJS(expected[i], function_index, buffer, 1);
+ r->CheckCallApplyViaJS(expected[i], function_index, buffer, 1);
}
}
} // namespace
@@ -462,7 +458,7 @@ void TestTableInitElems(ExecutionTier execution_tier, int table_index) {
isolate);
const double null = 0xDEADBEEF;
- CheckTableCall(isolate, table, r, call_index, null, null, null, null, null);
+ CheckTableCall(isolate, table, &r, call_index, null, null, null, null, null);
// 0 count is ok in bounds, and at end of regions.
r.CheckCallViaJS(0, 0, 0, 0);
@@ -471,19 +467,19 @@ void TestTableInitElems(ExecutionTier execution_tier, int table_index) {
// Test actual writes.
r.CheckCallViaJS(0, 0, 0, 1);
- CheckTableCall(isolate, table, r, call_index, 0, null, null, null, null);
+ CheckTableCall(isolate, table, &r, call_index, 0, null, null, null, null);
r.CheckCallViaJS(0, 0, 0, 2);
- CheckTableCall(isolate, table, r, call_index, 0, 1, null, null, null);
+ CheckTableCall(isolate, table, &r, call_index, 0, 1, null, null, null);
r.CheckCallViaJS(0, 0, 0, 3);
- CheckTableCall(isolate, table, r, call_index, 0, 1, 2, null, null);
+ CheckTableCall(isolate, table, &r, call_index, 0, 1, 2, null, null);
r.CheckCallViaJS(0, 3, 0, 2);
- CheckTableCall(isolate, table, r, call_index, 0, 1, 2, 0, 1);
+ CheckTableCall(isolate, table, &r, call_index, 0, 1, 2, 0, 1);
r.CheckCallViaJS(0, 3, 1, 2);
- CheckTableCall(isolate, table, r, call_index, 0, 1, 2, 1, 2);
+ CheckTableCall(isolate, table, &r, call_index, 0, 1, 2, 1, 2);
r.CheckCallViaJS(0, 3, 2, 2);
- CheckTableCall(isolate, table, r, call_index, 0, 1, 2, 2, 3);
+ CheckTableCall(isolate, table, &r, call_index, 0, 1, 2, 2, 3);
r.CheckCallViaJS(0, 3, 3, 2);
- CheckTableCall(isolate, table, r, call_index, 0, 1, 2, 3, 4);
+ CheckTableCall(isolate, table, &r, call_index, 0, 1, 2, 3, 4);
}
WASM_EXEC_TEST(TableInitElems0) { TestTableInitElems(execution_tier, 0); }
@@ -534,15 +530,15 @@ void TestTableInitOob(ExecutionTier execution_tier, int table_index) {
isolate);
const double null = 0xDEADBEEF;
- CheckTableCall(isolate, table, r, call_index, null, null, null, null, null);
+ CheckTableCall(isolate, table, &r, call_index, null, null, null, null, null);
// Write all values up to the out-of-bounds write.
r.CheckCallViaJS(0xDEADBEEF, 3, 0, 3);
- CheckTableCall(isolate, table, r, call_index, null, null, null, 0, 1);
+ CheckTableCall(isolate, table, &r, call_index, null, null, null, 0, 1);
// Write all values up to the out-of-bounds read.
r.CheckCallViaJS(0xDEADBEEF, 0, 3, 3);
- CheckTableCall(isolate, table, r, call_index, 3, 4, null, 0, 1);
+ CheckTableCall(isolate, table, &r, call_index, 3, 4, null, 0, 1);
// 0-count is never oob.
r.CheckCallViaJS(0, kTableSize + 1, 0, 0);
@@ -696,21 +692,21 @@ void TestTableCopyCalls(ExecutionTier execution_tier, int table_dst,
isolate);
if (table_dst == table_src) {
- CheckTableCall(isolate, table, r, call_index, 0, 1, 2, 3, 4);
+ CheckTableCall(isolate, table, &r, call_index, 0, 1, 2, 3, 4);
r.CheckCallViaJS(0, 0, 1, 1);
- CheckTableCall(isolate, table, r, call_index, 1, 1, 2, 3, 4);
+ CheckTableCall(isolate, table, &r, call_index, 1, 1, 2, 3, 4);
r.CheckCallViaJS(0, 0, 1, 2);
- CheckTableCall(isolate, table, r, call_index, 1, 2, 2, 3, 4);
+ CheckTableCall(isolate, table, &r, call_index, 1, 2, 2, 3, 4);
r.CheckCallViaJS(0, 3, 0, 2);
- CheckTableCall(isolate, table, r, call_index, 1, 2, 2, 1, 2);
+ CheckTableCall(isolate, table, &r, call_index, 1, 2, 2, 1, 2);
} else {
- CheckTableCall(isolate, table, r, call_index, 0, 1, 2, 3, 4);
+ CheckTableCall(isolate, table, &r, call_index, 0, 1, 2, 3, 4);
r.CheckCallViaJS(0, 0, 1, 1);
- CheckTableCall(isolate, table, r, call_index, 1, 1, 2, 3, 4);
+ CheckTableCall(isolate, table, &r, call_index, 1, 1, 2, 3, 4);
r.CheckCallViaJS(0, 0, 1, 2);
- CheckTableCall(isolate, table, r, call_index, 1, 2, 2, 3, 4);
+ CheckTableCall(isolate, table, &r, call_index, 1, 2, 2, 3, 4);
r.CheckCallViaJS(0, 3, 0, 2);
- CheckTableCall(isolate, table, r, call_index, 1, 2, 2, 0, 1);
+ CheckTableCall(isolate, table, &r, call_index, 1, 2, 2, 0, 1);
}
}
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc b/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
index 4c1842b537..1b64135cb8 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
@@ -278,7 +278,7 @@ TEST(Breakpoint_I32Add) {
static const int kNumBreakpoints = 3;
byte code[] = {WASM_I32_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))};
std::unique_ptr<int[]> offsets =
- Find(code, sizeof(code), kNumBreakpoints, kExprGetLocal, kExprGetLocal,
+ Find(code, sizeof(code), kNumBreakpoints, kExprLocalGet, kExprLocalGet,
kExprI32Add);
WasmRunner<int32_t, uint32_t, uint32_t> r(ExecutionTier::kInterpreter);
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
index 51d97650d4..5f70ab6c7b 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
@@ -11,7 +11,6 @@
#include "src/utils/version.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-engine.h"
-#include "src/wasm/wasm-memory.h"
#include "src/wasm/wasm-module-builder.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
@@ -943,154 +942,6 @@ TEST(MemoryWithOOBEmptyDataSegment) {
Cleanup();
}
-// Utility to free the allocated memory for a buffer that is manually
-// externalized in a test.
-struct ManuallyExternalizedBuffer {
- Isolate* isolate_;
- Handle<JSArrayBuffer> buffer_;
- void* allocation_base_;
- size_t allocation_length_;
- bool const should_free_;
-
- ManuallyExternalizedBuffer(JSArrayBuffer buffer, Isolate* isolate)
- : isolate_(isolate),
- buffer_(buffer, isolate),
- allocation_base_(buffer.allocation_base()),
- allocation_length_(buffer.allocation_length()),
- should_free_(!isolate_->wasm_engine()->memory_tracker()->IsWasmMemory(
- buffer.backing_store())) {
- if (!isolate_->wasm_engine()->memory_tracker()->IsWasmMemory(
- buffer.backing_store())) {
- v8::Utils::ToLocal(buffer_)->Externalize();
- }
- }
- ~ManuallyExternalizedBuffer() {
- if (should_free_) {
- buffer_->FreeBackingStoreFromMainThread();
- }
- }
-};
-
-TEST(Run_WasmModule_Buffer_Externalized_GrowMem) {
- {
- Isolate* isolate = CcTest::InitIsolateOnce();
- HandleScope scope(isolate);
- TestSignatures sigs;
- v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator, ZONE_NAME);
-
- WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
- WasmFunctionBuilder* f = builder->AddFunction(sigs.i_v());
- ExportAsMain(f);
- byte code[] = {WASM_GROW_MEMORY(WASM_I32V_1(6)), WASM_DROP,
- WASM_MEMORY_SIZE};
- EMIT_CODE_WITH_END(f, code);
-
- ZoneBuffer buffer(&zone);
- builder->WriteTo(&buffer);
- testing::SetupIsolateForWasmModule(isolate);
- ErrorThrower thrower(isolate, "Test");
- const Handle<WasmInstanceObject> instance =
- CompileAndInstantiateForTesting(
- isolate, &thrower, ModuleWireBytes(buffer.begin(), buffer.end()))
- .ToHandleChecked();
- Handle<WasmMemoryObject> memory_object(instance->memory_object(), isolate);
-
- // Fake the Embedder flow by externalizing the array buffer.
- ManuallyExternalizedBuffer buffer1(memory_object->array_buffer(), isolate);
-
- // Grow using the API.
- uint32_t result = WasmMemoryObject::Grow(isolate, memory_object, 4);
- CHECK_EQ(16, result);
- CHECK(buffer1.buffer_->was_detached()); // growing always detaches
- CHECK_EQ(0, buffer1.buffer_->byte_length());
-
- CHECK_NE(*buffer1.buffer_, memory_object->array_buffer());
-
- // Fake the Embedder flow by externalizing the array buffer.
- ManuallyExternalizedBuffer buffer2(memory_object->array_buffer(), isolate);
-
- // Grow using an internal WASM bytecode.
- result = testing::RunWasmModuleForTesting(isolate, instance, 0, nullptr);
- CHECK_EQ(26, result);
- CHECK(buffer2.buffer_->was_detached()); // growing always detaches
- CHECK_EQ(0, buffer2.buffer_->byte_length());
- CHECK_NE(*buffer2.buffer_, memory_object->array_buffer());
- }
- Cleanup();
-}
-
-TEST(Run_WasmModule_Buffer_Externalized_GrowMemMemSize) {
- {
- Isolate* isolate = CcTest::InitIsolateOnce();
- HandleScope scope(isolate);
- Handle<JSArrayBuffer> buffer;
- CHECK(wasm::NewArrayBuffer(isolate, 16 * kWasmPageSize).ToHandle(&buffer));
- Handle<WasmMemoryObject> mem_obj =
- WasmMemoryObject::New(isolate, buffer, 100);
- auto const contents = v8::Utils::ToLocal(buffer)->Externalize();
- int32_t result = WasmMemoryObject::Grow(isolate, mem_obj, 0);
- CHECK_EQ(16, result);
- constexpr bool is_wasm_memory = true;
- const JSArrayBuffer::Allocation allocation{contents.AllocationBase(),
- contents.AllocationLength(),
- contents.Data(), is_wasm_memory};
- JSArrayBuffer::FreeBackingStore(isolate, allocation);
- }
- Cleanup();
-}
-
-TEST(Run_WasmModule_Buffer_Externalized_Detach) {
- {
- // Regression test for
- // https://bugs.chromium.org/p/chromium/issues/detail?id=731046
- Isolate* isolate = CcTest::InitIsolateOnce();
- HandleScope scope(isolate);
- Handle<JSArrayBuffer> buffer;
- CHECK(wasm::NewArrayBuffer(isolate, 16 * kWasmPageSize).ToHandle(&buffer));
- auto const contents = v8::Utils::ToLocal(buffer)->Externalize();
- wasm::DetachMemoryBuffer(isolate, buffer, true);
- constexpr bool is_wasm_memory = true;
- const JSArrayBuffer::Allocation allocation{contents.AllocationBase(),
- contents.AllocationLength(),
- contents.Data(), is_wasm_memory};
- JSArrayBuffer::FreeBackingStore(isolate, allocation);
- }
- Cleanup();
-}
-
-TEST(Run_WasmModule_Buffer_Externalized_Regression_UseAfterFree) {
- // Regresion test for https://crbug.com/813876
- Isolate* isolate = CcTest::InitIsolateOnce();
- HandleScope scope(isolate);
- Handle<JSArrayBuffer> buffer;
- CHECK(wasm::NewArrayBuffer(isolate, 16 * kWasmPageSize).ToHandle(&buffer));
- Handle<WasmMemoryObject> mem = WasmMemoryObject::New(isolate, buffer, 128);
- auto contents = v8::Utils::ToLocal(buffer)->Externalize();
- WasmMemoryObject::Grow(isolate, mem, 0);
- constexpr bool is_wasm_memory = true;
- JSArrayBuffer::FreeBackingStore(
- isolate, JSArrayBuffer::Allocation(contents.AllocationBase(),
- contents.AllocationLength(),
- contents.Data(), is_wasm_memory));
- // Make sure we can write to the buffer without crashing
- uint32_t* int_buffer =
- reinterpret_cast<uint32_t*>(mem->array_buffer().backing_store());
- int_buffer[0] = 0;
-}
-
-#if V8_TARGET_ARCH_64_BIT
-TEST(Run_WasmModule_Reclaim_Memory) {
- // Make sure we can allocate memories without running out of address space.
- Isolate* isolate = CcTest::InitIsolateOnce();
- Handle<JSArrayBuffer> buffer;
- for (int i = 0; i < 256; ++i) {
- HandleScope scope(isolate);
- CHECK(NewArrayBuffer(isolate, kWasmPageSize).ToHandle(&buffer));
- }
-}
-#endif
-
TEST(AtomicOpDisassembly) {
{
EXPERIMENTAL_FLAG_SCOPE(threads);
@@ -1118,12 +969,15 @@ TEST(AtomicOpDisassembly) {
ErrorThrower thrower(isolate, "Test");
auto enabled_features = WasmFeaturesFromIsolate(isolate);
- MaybeHandle<WasmModuleObject> module_object =
- isolate->wasm_engine()->SyncCompile(
- isolate, enabled_features, &thrower,
- ModuleWireBytes(buffer.begin(), buffer.end()));
+ Handle<WasmModuleObject> module_object =
+ isolate->wasm_engine()
+ ->SyncCompile(isolate, enabled_features, &thrower,
+ ModuleWireBytes(buffer.begin(), buffer.end()))
+ .ToHandleChecked();
+ NativeModule* native_module = module_object->native_module();
+ ModuleWireBytes wire_bytes(native_module->wire_bytes());
- module_object.ToHandleChecked()->DisassembleFunction(0);
+ DisassembleWasmFunction(native_module->module(), wire_bytes, 0);
}
Cleanup();
}
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc b/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
index b48321df40..d76c4c3643 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
@@ -184,13 +184,20 @@ T UnsignedGreaterEqual(T a, T b) {
template <typename T>
T LogicalShiftLeft(T a, int shift) {
using UnsignedT = typename std::make_unsigned<T>::type;
- return static_cast<UnsignedT>(a) << shift;
+ return static_cast<UnsignedT>(a) << (shift % (sizeof(T) * 8));
}
template <typename T>
T LogicalShiftRight(T a, int shift) {
using UnsignedT = typename std::make_unsigned<T>::type;
- return static_cast<UnsignedT>(a) >> shift;
+ return static_cast<UnsignedT>(a) >> (shift % (sizeof(T) * 8));
+}
+
+// Define our own ArithmeticShiftRight instead of using the one from utils.h
+// because the shift amount needs to be taken modulo lane width.
+template <typename T>
+T ArithmeticShiftRight(T a, int shift) {
+ return a >> (shift % (sizeof(T) * 8));
}
template <typename T>
@@ -279,7 +286,7 @@ T Sqrt(T a) {
return std::sqrt(a);
}
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_IA32
// only used for F64x2 tests below
int64_t Equal(double a, double b) { return a == b ? -1 : 0; }
@@ -292,14 +299,106 @@ int64_t GreaterEqual(double a, double b) { return a >= b ? -1 : 0; }
int64_t Less(double a, double b) { return a < b ? -1 : 0; }
int64_t LessEqual(double a, double b) { return a <= b ? -1 : 0; }
+
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
+// Only used for qfma and qfms tests below.
+
+// FMOperation holds the params (a, b, c) for a Multiply-Add or
+// Multiply-Subtract operation, and the expected result if the operation was
+// fused, rounded only once for the entire operation, or unfused, rounded after
+// multiply and again after add/subtract.
+template <typename T>
+struct FMOperation {
+ const T a;
+ const T b;
+ const T c;
+ const T fused_result;
+ const T unfused_result;
+};
+
+// large_n is large number that overflows T when multiplied by itself, this is a
+// useful constant to test fused/unfused behavior.
+template <typename T>
+constexpr T large_n = T(0);
+
+template <>
+constexpr double large_n<double> = 1e200;
+
+template <>
+constexpr float large_n<float> = 1e20;
+
+// Fused Multiply-Add performs a + b * c.
+template <typename T>
+static constexpr FMOperation<T> qfma_array[] = {
+ {1.0f, 2.0f, 3.0f, 7.0f, 7.0f},
+ // fused: a + b * c = -inf + (positive overflow) = -inf
+ // unfused: a + b * c = -inf + inf = NaN
+ {-std::numeric_limits<T>::infinity(), large_n<T>, large_n<T>,
+ -std::numeric_limits<T>::infinity(), std::numeric_limits<T>::quiet_NaN()},
+ // fused: a + b * c = inf + (negative overflow) = inf
+ // unfused: a + b * c = inf + -inf = NaN
+ {std::numeric_limits<T>::infinity(), -large_n<T>, large_n<T>,
+ std::numeric_limits<T>::infinity(), std::numeric_limits<T>::quiet_NaN()},
+ // NaN
+ {std::numeric_limits<T>::quiet_NaN(), 2.0f, 3.0f,
+ std::numeric_limits<T>::quiet_NaN(), std::numeric_limits<T>::quiet_NaN()},
+ // -NaN
+ {-std::numeric_limits<T>::quiet_NaN(), 2.0f, 3.0f,
+ std::numeric_limits<T>::quiet_NaN(), std::numeric_limits<T>::quiet_NaN()}};
+
+template <typename T>
+static constexpr Vector<const FMOperation<T>> qfma_vector() {
+ return ArrayVector(qfma_array<T>);
+}
+
+// Fused Multiply-Subtract performs a - b * c.
+template <typename T>
+static constexpr FMOperation<T> qfms_array[]{
+ {1.0f, 2.0f, 3.0f, -5.0f, -5.0f},
+ // fused: a - b * c = inf - (positive overflow) = inf
+ // unfused: a - b * c = inf - inf = NaN
+ {std::numeric_limits<T>::infinity(), large_n<T>, large_n<T>,
+ std::numeric_limits<T>::infinity(), std::numeric_limits<T>::quiet_NaN()},
+ // fused: a - b * c = -inf - (negative overflow) = -inf
+ // unfused: a - b * c = -inf - -inf = NaN
+ {-std::numeric_limits<T>::infinity(), -large_n<T>, large_n<T>,
+ -std::numeric_limits<T>::infinity(), std::numeric_limits<T>::quiet_NaN()},
+ // NaN
+ {std::numeric_limits<T>::quiet_NaN(), 2.0f, 3.0f,
+ std::numeric_limits<T>::quiet_NaN(), std::numeric_limits<T>::quiet_NaN()},
+ // -NaN
+ {-std::numeric_limits<T>::quiet_NaN(), 2.0f, 3.0f,
+ std::numeric_limits<T>::quiet_NaN(), std::numeric_limits<T>::quiet_NaN()}};
+
+template <typename T>
+static constexpr Vector<const FMOperation<T>> qfms_vector() {
+ return ArrayVector(qfms_array<T>);
+}
+
+// Fused results only when fma3 feature is enabled, and running on TurboFan.
+bool ExpectFused(ExecutionTier tier) {
+#ifdef V8_TARGET_ARCH_X64
+ return CpuFeatures::IsSupported(FMA3) && (tier == ExecutionTier::kTurbofan);
+#else
+ return (tier == ExecutionTier::kTurbofan);
+#endif
+}
#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
+#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_IA32
} // namespace
-#define WASM_SIMD_CHECK_LANE(TYPE, value, LANE_TYPE, lane_value, lane_index) \
- WASM_IF(WASM_##LANE_TYPE##_NE(WASM_GET_LOCAL(lane_value), \
- WASM_SIMD_##TYPE##_EXTRACT_LANE( \
- lane_index, WASM_GET_LOCAL(value))), \
+#define WASM_SIMD_CHECK_LANE_S(TYPE, value, LANE_TYPE, lane_value, lane_index) \
+ WASM_IF(WASM_##LANE_TYPE##_NE(WASM_GET_LOCAL(lane_value), \
+ WASM_SIMD_##TYPE##_EXTRACT_LANE( \
+ lane_index, WASM_GET_LOCAL(value))), \
+ WASM_RETURN1(WASM_ZERO))
+
+// Unsigned Extracts are only available for I8x16, I16x8 types
+#define WASM_SIMD_CHECK_LANE_U(TYPE, value, LANE_TYPE, lane_value, lane_index) \
+ WASM_IF(WASM_##LANE_TYPE##_NE(WASM_GET_LOCAL(lane_value), \
+ WASM_SIMD_##TYPE##_EXTRACT_LANE_U( \
+ lane_index, WASM_GET_LOCAL(value))), \
WASM_RETURN1(WASM_ZERO))
#define TO_BYTE(val) static_cast<byte>(val)
@@ -338,13 +437,17 @@ int64_t LessEqual(double a, double b) { return a <= b ? -1 : 0; }
#define WASM_SIMD_I16x8_SPLAT(x) WASM_SIMD_SPLAT(I16x8, x)
#define WASM_SIMD_I16x8_EXTRACT_LANE(lane, x) \
- x, WASM_SIMD_OP(kExprI16x8ExtractLane), TO_BYTE(lane)
+ x, WASM_SIMD_OP(kExprI16x8ExtractLaneS), TO_BYTE(lane)
+#define WASM_SIMD_I16x8_EXTRACT_LANE_U(lane, x) \
+ x, WASM_SIMD_OP(kExprI16x8ExtractLaneU), TO_BYTE(lane)
#define WASM_SIMD_I16x8_REPLACE_LANE(lane, x, y) \
x, y, WASM_SIMD_OP(kExprI16x8ReplaceLane), TO_BYTE(lane)
#define WASM_SIMD_I8x16_SPLAT(x) WASM_SIMD_SPLAT(I8x16, x)
#define WASM_SIMD_I8x16_EXTRACT_LANE(lane, x) \
- x, WASM_SIMD_OP(kExprI8x16ExtractLane), TO_BYTE(lane)
+ x, WASM_SIMD_OP(kExprI8x16ExtractLaneS), TO_BYTE(lane)
+#define WASM_SIMD_I8x16_EXTRACT_LANE_U(lane, x) \
+ x, WASM_SIMD_OP(kExprI8x16ExtractLaneU), TO_BYTE(lane)
#define WASM_SIMD_I8x16_REPLACE_LANE(lane, x, y) \
x, y, WASM_SIMD_OP(kExprI8x16ReplaceLane), TO_BYTE(lane)
@@ -357,8 +460,17 @@ int64_t LessEqual(double a, double b) { return a <= b ? -1 : 0; }
#define WASM_SIMD_LOAD_MEM(index) \
index, WASM_SIMD_OP(kExprS128LoadMem), ZERO_ALIGNMENT, ZERO_OFFSET
+#define WASM_SIMD_LOAD_MEM_OFFSET(offset, index) \
+ index, WASM_SIMD_OP(kExprS128LoadMem), ZERO_ALIGNMENT, offset
#define WASM_SIMD_STORE_MEM(index, val) \
index, val, WASM_SIMD_OP(kExprS128StoreMem), ZERO_ALIGNMENT, ZERO_OFFSET
+#define WASM_SIMD_STORE_MEM_OFFSET(offset, index, val) \
+ index, val, WASM_SIMD_OP(kExprS128StoreMem), ZERO_ALIGNMENT, offset
+
+#define WASM_SIMD_F64x2_QFMA(a, b, c) a, b, c, WASM_SIMD_OP(kExprF64x2Qfma)
+#define WASM_SIMD_F64x2_QFMS(a, b, c) a, b, c, WASM_SIMD_OP(kExprF64x2Qfms)
+#define WASM_SIMD_F32x4_QFMA(a, b, c) a, b, c, WASM_SIMD_OP(kExprF32x4Qfma)
+#define WASM_SIMD_F32x4_QFMS(a, b, c) a, b, c, WASM_SIMD_OP(kExprF32x4Qfms)
// Runs tests of compiled code, using the interpreter as a reference.
#define WASM_SIMD_COMPILED_TEST(name) \
@@ -589,10 +701,15 @@ void RunF32x4UnOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
WASM_SIMD_TEST(F32x4Abs) {
RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4Abs, std::abs);
}
+
WASM_SIMD_TEST(F32x4Neg) {
RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4Neg, Negate);
}
+WASM_SIMD_TEST(F32x4Sqrt) {
+ RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4Sqrt, Sqrt);
+}
+
WASM_SIMD_TEST(F32x4RecipApprox) {
RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4RecipApprox,
base::Recip, false /* !exact */);
@@ -725,6 +842,57 @@ WASM_SIMD_TEST(F32x4Le) {
}
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
+WASM_SIMD_TEST_NO_LOWERING(F32x4Qfma) {
+ WasmRunner<int32_t, float, float, float> r(execution_tier, lower_simd);
+ // Set up global to hold mask output.
+ float* g = r.builder().AddGlobal<float>(kWasmS128);
+ // Build fn to splat test values, perform compare op, and write the result.
+ byte value1 = 0, value2 = 1, value3 = 2;
+ BUILD(r,
+ WASM_SET_GLOBAL(0, WASM_SIMD_F32x4_QFMA(
+ WASM_SIMD_F32x4_SPLAT(WASM_GET_LOCAL(value1)),
+ WASM_SIMD_F32x4_SPLAT(WASM_GET_LOCAL(value2)),
+ WASM_SIMD_F32x4_SPLAT(WASM_GET_LOCAL(value3)))),
+ WASM_ONE);
+
+ for (FMOperation<float> x : qfma_vector<float>()) {
+ r.Call(x.a, x.b, x.c);
+ float expected =
+ ExpectFused(execution_tier) ? x.fused_result : x.unfused_result;
+ for (int i = 0; i < 4; i++) {
+ float actual = ReadLittleEndianValue<float>(&g[i]);
+ CheckFloatResult(x.a, x.b, expected, actual, true /* exact */);
+ }
+ }
+}
+
+WASM_SIMD_TEST_NO_LOWERING(F32x4Qfms) {
+ WasmRunner<int32_t, float, float, float> r(execution_tier, lower_simd);
+ // Set up global to hold mask output.
+ float* g = r.builder().AddGlobal<float>(kWasmS128);
+ // Build fn to splat test values, perform compare op, and write the result.
+ byte value1 = 0, value2 = 1, value3 = 2;
+ BUILD(r,
+ WASM_SET_GLOBAL(0, WASM_SIMD_F32x4_QFMS(
+ WASM_SIMD_F32x4_SPLAT(WASM_GET_LOCAL(value1)),
+ WASM_SIMD_F32x4_SPLAT(WASM_GET_LOCAL(value2)),
+ WASM_SIMD_F32x4_SPLAT(WASM_GET_LOCAL(value3)))),
+ WASM_ONE);
+
+ for (FMOperation<float> x : qfms_vector<float>()) {
+ r.Call(x.a, x.b, x.c);
+ float expected =
+ ExpectFused(execution_tier) ? x.fused_result : x.unfused_result;
+ for (int i = 0; i < 4; i++) {
+ float actual = ReadLittleEndianValue<float>(&g[i]);
+ CheckFloatResult(x.a, x.b, expected, actual, true /* exact */);
+ }
+ }
+}
+#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
+
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_IA32
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
WASM_SIMD_TEST_NO_LOWERING(I64x2Splat) {
WasmRunner<int32_t, int64_t> r(execution_tier, lower_simd);
// Set up a global to hold output vector.
@@ -803,7 +971,8 @@ WASM_SIMD_TEST_NO_LOWERING(I64x2Neg) {
void RunI64x2ShiftOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode opcode, Int64ShiftOp expected_op) {
- for (int shift = 1; shift < 64; shift++) {
+ // Intentionally shift by 64, should be no-op.
+ for (int shift = 1; shift <= 64; shift++) {
WasmRunner<int32_t, int64_t> r(execution_tier, lower_simd);
int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
byte value = 0;
@@ -918,6 +1087,7 @@ WASM_SIMD_TEST_NO_LOWERING(I64x2GeU) {
RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2GeU,
UnsignedGreaterEqual);
}
+#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
WASM_SIMD_TEST_NO_LOWERING(F64x2Splat) {
WasmRunner<int32_t, double> r(execution_tier, lower_simd);
@@ -941,6 +1111,7 @@ WASM_SIMD_TEST_NO_LOWERING(F64x2Splat) {
}
}
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
WASM_SIMD_TEST_NO_LOWERING(F64x2ExtractLaneWithI64x2) {
WasmRunner<int64_t> r(execution_tier, lower_simd);
BUILD(r, WASM_IF_ELSE_L(
@@ -950,6 +1121,7 @@ WASM_SIMD_TEST_NO_LOWERING(F64x2ExtractLaneWithI64x2) {
WASM_I64V(1), WASM_I64V(0)));
CHECK_EQ(1, r.Call());
}
+#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
WASM_SIMD_TEST_NO_LOWERING(F64x2ExtractLane) {
WasmRunner<double, double> r(execution_tier, lower_simd);
@@ -973,6 +1145,7 @@ WASM_SIMD_TEST_NO_LOWERING(F64x2ExtractLane) {
}
}
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
WASM_SIMD_TEST_NO_LOWERING(I64x2ExtractWithF64x2) {
WasmRunner<int64_t> r(execution_tier, lower_simd);
BUILD(r, WASM_IF_ELSE_L(
@@ -982,6 +1155,7 @@ WASM_SIMD_TEST_NO_LOWERING(I64x2ExtractWithF64x2) {
WASM_I64V(1), WASM_I64V(0)));
CHECK_EQ(1, r.Call());
}
+#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
WASM_SIMD_TEST_NO_LOWERING(F64x2ReplaceLane) {
WasmRunner<int32_t> r(execution_tier, lower_simd);
@@ -1124,6 +1298,10 @@ WASM_SIMD_TEST_NO_LOWERING(F64x2Neg) {
RunF64x2UnOpTest(execution_tier, lower_simd, kExprF64x2Neg, Negate);
}
+WASM_SIMD_TEST_NO_LOWERING(F64x2Sqrt) {
+ RunF64x2UnOpTest(execution_tier, lower_simd, kExprF64x2Sqrt, Sqrt);
+}
+
void RunF64x2BinOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode opcode, DoubleBinOp expected_op) {
WasmRunner<int32_t, double, double> r(execution_tier, lower_simd);
@@ -1249,12 +1427,14 @@ WASM_SIMD_TEST_NO_LOWERING(F64x2Max) {
RunF64x2BinOpTest(execution_tier, lower_simd, kExprF64x2Max, JSMax);
}
-#if V8_TARGET_ARCH_X64
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
WASM_SIMD_TEST_NO_LOWERING(I64x2Mul) {
RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2Mul,
base::MulWithWraparound);
}
+#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
+#if V8_TARGET_ARCH_X64
WASM_SIMD_TEST_NO_LOWERING(I64x2MinS) {
RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2MinS, Minimum);
}
@@ -1273,7 +1453,57 @@ WASM_SIMD_TEST_NO_LOWERING(I64x2MaxU) {
UnsignedMaximum);
}
#endif // V8_TARGET_ARCH_X64
+
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
+WASM_SIMD_TEST_NO_LOWERING(F64x2Qfma) {
+ WasmRunner<int32_t, double, double, double> r(execution_tier, lower_simd);
+ // Set up global to hold mask output.
+ double* g = r.builder().AddGlobal<double>(kWasmS128);
+ // Build fn to splat test values, perform compare op, and write the result.
+ byte value1 = 0, value2 = 1, value3 = 2;
+ BUILD(r,
+ WASM_SET_GLOBAL(0, WASM_SIMD_F64x2_QFMA(
+ WASM_SIMD_F64x2_SPLAT(WASM_GET_LOCAL(value1)),
+ WASM_SIMD_F64x2_SPLAT(WASM_GET_LOCAL(value2)),
+ WASM_SIMD_F64x2_SPLAT(WASM_GET_LOCAL(value3)))),
+ WASM_ONE);
+
+ for (FMOperation<double> x : qfma_vector<double>()) {
+ r.Call(x.a, x.b, x.c);
+ double expected =
+ ExpectFused(execution_tier) ? x.fused_result : x.unfused_result;
+ for (int i = 0; i < 2; i++) {
+ double actual = ReadLittleEndianValue<double>(&g[i]);
+ CheckDoubleResult(x.a, x.b, expected, actual, true /* exact */);
+ }
+ }
+}
+
+WASM_SIMD_TEST_NO_LOWERING(F64x2Qfms) {
+ WasmRunner<int32_t, double, double, double> r(execution_tier, lower_simd);
+ // Set up global to hold mask output.
+ double* g = r.builder().AddGlobal<double>(kWasmS128);
+ // Build fn to splat test values, perform compare op, and write the result.
+ byte value1 = 0, value2 = 1, value3 = 2;
+ BUILD(r,
+ WASM_SET_GLOBAL(0, WASM_SIMD_F64x2_QFMS(
+ WASM_SIMD_F64x2_SPLAT(WASM_GET_LOCAL(value1)),
+ WASM_SIMD_F64x2_SPLAT(WASM_GET_LOCAL(value2)),
+ WASM_SIMD_F64x2_SPLAT(WASM_GET_LOCAL(value3)))),
+ WASM_ONE);
+
+ for (FMOperation<double> x : qfms_vector<double>()) {
+ r.Call(x.a, x.b, x.c);
+ double expected =
+ ExpectFused(execution_tier) ? x.fused_result : x.unfused_result;
+ for (int i = 0; i < 2; i++) {
+ double actual = ReadLittleEndianValue<double>(&g[i]);
+ CheckDoubleResult(x.a, x.b, expected, actual, true /* exact */);
+ }
+ }
+}
#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
+#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_IA32
WASM_SIMD_TEST(I32x4Splat) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
@@ -1652,7 +1882,8 @@ WASM_SIMD_TEST(I32x4GeU) {
void RunI32x4ShiftOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode opcode, Int32ShiftOp expected_op) {
- for (int shift = 1; shift < 32; shift++) {
+ // Intentionally shift by 32, should be no-op.
+ for (int shift = 1; shift <= 32; shift++) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
int32_t* g = r.builder().AddGlobal<int32_t>(kWasmS128);
byte value = 0;
@@ -1902,7 +2133,8 @@ WASM_SIMD_TEST(I16x8LeU) {
void RunI16x8ShiftOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode opcode, Int16ShiftOp expected_op) {
- for (int shift = 1; shift < 16; shift++) {
+ // Intentionally shift by 16, should be no-op.
+ for (int shift = 1; shift <= 16; shift++) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
int16_t* g = r.builder().AddGlobal<int16_t>(kWasmS128);
byte value = 0;
@@ -1917,7 +2149,7 @@ void RunI16x8ShiftOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
FOR_INT16_INPUTS(x) {
r.Call(x);
- float expected = expected_op(x, shift);
+ int16_t expected = expected_op(x, shift);
for (int i = 0; i < 8; i++) {
CHECK_EQ(expected, ReadLittleEndianValue<int16_t>(&g[i]));
}
@@ -2118,7 +2350,8 @@ WASM_SIMD_TEST(I8x16Mul) {
void RunI8x16ShiftOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode opcode, Int8ShiftOp expected_op) {
- for (int shift = 1; shift < 8; shift++) {
+ // Intentionally shift by 8, should be no-op.
+ for (int shift = 1; shift <= 8; shift++) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
int8_t* g = r.builder().AddGlobal<int8_t>(kWasmS128);
byte value = 0;
@@ -2184,10 +2417,10 @@ WASM_SIMD_TEST_NO_LOWERING(I8x16ShrU) {
format, WASM_GET_LOCAL(src1), WASM_GET_LOCAL(src2), \
WASM_SIMD_BINOP(kExprI##format##Ne, WASM_GET_LOCAL(mask), \
WASM_GET_LOCAL(zero)))), \
- WASM_SIMD_CHECK_LANE(I##format, mask, I32, val2, 0), \
- WASM_SIMD_CHECK_LANE(I##format, mask, I32, val1, 1), \
- WASM_SIMD_CHECK_LANE(I##format, mask, I32, val1, 2), \
- WASM_SIMD_CHECK_LANE(I##format, mask, I32, val2, 3), WASM_ONE); \
+ WASM_SIMD_CHECK_LANE_S(I##format, mask, I32, val2, 0), \
+ WASM_SIMD_CHECK_LANE_S(I##format, mask, I32, val1, 1), \
+ WASM_SIMD_CHECK_LANE_S(I##format, mask, I32, val1, 2), \
+ WASM_SIMD_CHECK_LANE_S(I##format, mask, I32, val2, 3), WASM_ONE); \
\
CHECK_EQ(1, r.Call(0x12, 0x34)); \
}
@@ -2222,10 +2455,10 @@ WASM_SIMD_SELECT_TEST(8x16)
WASM_SET_LOCAL(mask, WASM_SIMD_SELECT(format, WASM_GET_LOCAL(src1), \
WASM_GET_LOCAL(src2), \
WASM_GET_LOCAL(mask))), \
- WASM_SIMD_CHECK_LANE(I##format, mask, I32, val2, 0), \
- WASM_SIMD_CHECK_LANE(I##format, mask, I32, combined, 1), \
- WASM_SIMD_CHECK_LANE(I##format, mask, I32, combined, 2), \
- WASM_SIMD_CHECK_LANE(I##format, mask, I32, val2, 3), WASM_ONE); \
+ WASM_SIMD_CHECK_LANE_S(I##format, mask, I32, val2, 0), \
+ WASM_SIMD_CHECK_LANE_S(I##format, mask, I32, combined, 1), \
+ WASM_SIMD_CHECK_LANE_S(I##format, mask, I32, combined, 2), \
+ WASM_SIMD_CHECK_LANE_S(I##format, mask, I32, val2, 3), WASM_ONE); \
\
CHECK_EQ(1, r.Call(0x12, 0x34, 0x32)); \
}
@@ -2454,6 +2687,62 @@ WASM_SIMD_TEST(S8x16Concat) {
}
}
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64
+struct SwizzleTestArgs {
+ const Shuffle input;
+ const Shuffle indices;
+ const Shuffle expected;
+};
+
+static constexpr SwizzleTestArgs swizzle_test_args[] = {
+ {{15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0},
+ {15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0},
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}},
+ {{15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0},
+ {15, 0, 14, 1, 13, 2, 12, 3, 11, 4, 10, 5, 9, 6, 8, 7},
+ {0, 15, 1, 14, 2, 13, 3, 12, 4, 11, 5, 10, 6, 9, 7, 8}},
+ {{15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0},
+ {0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30},
+ {15, 13, 11, 9, 7, 5, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0}},
+ // all indices are out of range
+ {{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
+ {16, 17, 18, 19, 20, 124, 125, 126, 127, -1, -2, -3, -4, -5, -6, -7},
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}};
+
+static constexpr Vector<const SwizzleTestArgs> swizzle_test_vector =
+ ArrayVector(swizzle_test_args);
+
+WASM_SIMD_TEST(S8x16Swizzle) {
+ // RunBinaryLaneOpTest set up the two globals to be consecutive integers,
+ // [0-15] and [16-31]. Using [0-15] as the indices will not sufficiently test
+ // swizzle since the expected result is a no-op, using [16-31] will result in
+ // all 0s.
+ WasmRunner<int32_t> r(execution_tier, lower_simd);
+ static const int kElems = kSimd128Size / sizeof(uint8_t);
+ uint8_t* dst = r.builder().AddGlobal<uint8_t>(kWasmS128);
+ uint8_t* src0 = r.builder().AddGlobal<uint8_t>(kWasmS128);
+ uint8_t* src1 = r.builder().AddGlobal<uint8_t>(kWasmS128);
+ BUILD(
+ r,
+ WASM_SET_GLOBAL(0, WASM_SIMD_BINOP(kExprS8x16Swizzle, WASM_GET_GLOBAL(1),
+ WASM_GET_GLOBAL(2))),
+ WASM_ONE);
+
+ for (SwizzleTestArgs si : swizzle_test_vector) {
+ for (int i = 0; i < kElems; i++) {
+ WriteLittleEndianValue<uint8_t>(&src0[i], si.input[i]);
+ WriteLittleEndianValue<uint8_t>(&src1[i], si.indices[i]);
+ }
+
+ CHECK_EQ(1, r.Call());
+
+ for (int i = 0; i < kElems; i++) {
+ CHECK_EQ(ReadLittleEndianValue<uint8_t>(&dst[i]), si.expected[i]);
+ }
+ }
+}
+#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64
+
// Combine 3 shuffles a, b, and c by applying both a and b and then applying c
// to those two results.
Shuffle Combine(const Shuffle& a, const Shuffle& b, const Shuffle& c) {
@@ -2487,7 +2776,7 @@ void AppendShuffle(const Shuffle& shuffle, std::vector<byte>* buffer) {
for (size_t i = 0; i < kSimd128Size; ++i) buffer->push_back((shuffle[i]));
}
-void BuildShuffle(std::vector<Shuffle>& shuffles, // NOLINT(runtime/references)
+void BuildShuffle(const std::vector<Shuffle>& shuffles,
std::vector<byte>* buffer) {
// Perform the leaf shuffles on globals 0 and 1.
size_t row_index = (shuffles.size() - 1) / 2;
@@ -2504,7 +2793,7 @@ void BuildShuffle(std::vector<Shuffle>& shuffles, // NOLINT(runtime/references)
}
row_index /= 2;
} while (row_index != 0);
- byte epilog[] = {kExprSetGlobal, static_cast<byte>(0), WASM_ONE};
+ byte epilog[] = {kExprGlobalSet, static_cast<byte>(0), WASM_ONE};
for (size_t j = 0; j < arraysize(epilog); ++j) buffer->push_back(epilog[j]);
}
@@ -2895,11 +3184,34 @@ WASM_SIMD_TEST(SimdLoadStoreLoad) {
r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
// Load memory, store it, then reload it and extract the first lane. Use a
// non-zero offset into the memory of 1 lane (4 bytes) to test indexing.
- BUILD(r, WASM_SIMD_STORE_MEM(WASM_I32V(4), WASM_SIMD_LOAD_MEM(WASM_I32V(4))),
- WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_SIMD_LOAD_MEM(WASM_I32V(4))));
+ BUILD(r, WASM_SIMD_STORE_MEM(WASM_I32V(8), WASM_SIMD_LOAD_MEM(WASM_I32V(4))),
+ WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_SIMD_LOAD_MEM(WASM_I32V(8))));
+
+ FOR_INT32_INPUTS(i) {
+ int32_t expected = i;
+ r.builder().WriteMemory(&memory[1], expected);
+ CHECK_EQ(expected, r.Call());
+ }
+}
+
+WASM_SIMD_TEST(SimdLoadStoreLoadMemargOffset) {
+ WasmRunner<int32_t> r(execution_tier, lower_simd);
+ int32_t* memory =
+ r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
+ constexpr byte offset_1 = 4;
+ constexpr byte offset_2 = 8;
+ // Load from memory at offset_1, store to offset_2, load from offset_2, and
+ // extract first lane. We use non-zero memarg offsets to test offset decoding.
+ BUILD(
+ r,
+ WASM_SIMD_STORE_MEM_OFFSET(
+ offset_2, WASM_ZERO, WASM_SIMD_LOAD_MEM_OFFSET(offset_1, WASM_ZERO)),
+ WASM_SIMD_I32x4_EXTRACT_LANE(
+ 0, WASM_SIMD_LOAD_MEM_OFFSET(offset_2, WASM_ZERO)));
FOR_INT32_INPUTS(i) {
int32_t expected = i;
+ // Index 1 of memory (int32_t) will be bytes 4 to 8.
r.builder().WriteMemory(&memory[1], expected);
CHECK_EQ(expected, r.Call());
}
@@ -3040,8 +3352,48 @@ WASM_SIMD_TEST_NO_LOWERING(I16x8GtUMixed) {
UnsignedGreater);
}
+#define WASM_EXTRACT_I16x8_TEST(Sign, Type) \
+ WASM_SIMD_TEST(I16X8ExtractLane##Sign) { \
+ WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd); \
+ byte int_val = r.AllocateLocal(kWasmI32); \
+ byte simd_val = r.AllocateLocal(kWasmS128); \
+ BUILD(r, \
+ WASM_SET_LOCAL(simd_val, \
+ WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(int_val))), \
+ WASM_SIMD_CHECK_LANE_U(I16x8, simd_val, I32, int_val, 0), \
+ WASM_SIMD_CHECK_LANE_U(I16x8, simd_val, I32, int_val, 2), \
+ WASM_SIMD_CHECK_LANE_U(I16x8, simd_val, I32, int_val, 4), \
+ WASM_SIMD_CHECK_LANE_U(I16x8, simd_val, I32, int_val, 6), WASM_ONE); \
+ FOR_##Type##_INPUTS(x) { CHECK_EQ(1, r.Call(x)); } \
+ }
+WASM_EXTRACT_I16x8_TEST(S, UINT16) WASM_EXTRACT_I16x8_TEST(I, INT16)
+#undef WASM_EXTRACT_I16x8_TEST
+
+#define WASM_EXTRACT_I8x16_TEST(Sign, Type) \
+ WASM_SIMD_TEST(I8x16ExtractLane##Sign) { \
+ WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd); \
+ byte int_val = r.AllocateLocal(kWasmI32); \
+ byte simd_val = r.AllocateLocal(kWasmS128); \
+ BUILD(r, \
+ WASM_SET_LOCAL(simd_val, \
+ WASM_SIMD_I8x16_SPLAT(WASM_GET_LOCAL(int_val))), \
+ WASM_SIMD_CHECK_LANE_U(I8x16, simd_val, I32, int_val, 1), \
+ WASM_SIMD_CHECK_LANE_U(I8x16, simd_val, I32, int_val, 3), \
+ WASM_SIMD_CHECK_LANE_U(I8x16, simd_val, I32, int_val, 5), \
+ WASM_SIMD_CHECK_LANE_U(I8x16, simd_val, I32, int_val, 7), \
+ WASM_SIMD_CHECK_LANE_U(I8x16, simd_val, I32, int_val, 9), \
+ WASM_SIMD_CHECK_LANE_U(I8x16, simd_val, I32, int_val, 10), \
+ WASM_SIMD_CHECK_LANE_U(I8x16, simd_val, I32, int_val, 11), \
+ WASM_SIMD_CHECK_LANE_U(I8x16, simd_val, I32, int_val, 13), \
+ WASM_ONE); \
+ FOR_##Type##_INPUTS(x) { CHECK_EQ(1, r.Call(x)); } \
+ }
+ WASM_EXTRACT_I8x16_TEST(S, UINT8) WASM_EXTRACT_I8x16_TEST(I, INT8)
+#undef WASM_EXTRACT_I8x16_TEST
+
#undef WASM_SIMD_TEST
-#undef WASM_SIMD_CHECK_LANE
+#undef WASM_SIMD_CHECK_LANE_S
+#undef WASM_SIMD_CHECK_LANE_U
#undef TO_BYTE
#undef WASM_SIMD_OP
#undef WASM_SIMD_SPLAT
@@ -3064,13 +3416,17 @@ WASM_SIMD_TEST_NO_LOWERING(I16x8GtUMixed) {
#undef WASM_SIMD_I32x4_REPLACE_LANE
#undef WASM_SIMD_I16x8_SPLAT
#undef WASM_SIMD_I16x8_EXTRACT_LANE
+#undef WASM_SIMD_I16x8_EXTRACT_LANE_U
#undef WASM_SIMD_I16x8_REPLACE_LANE
#undef WASM_SIMD_I8x16_SPLAT
#undef WASM_SIMD_I8x16_EXTRACT_LANE
+#undef WASM_SIMD_I8x16_EXTRACT_LANE_U
#undef WASM_SIMD_I8x16_REPLACE_LANE
#undef WASM_SIMD_S8x16_SHUFFLE_OP
#undef WASM_SIMD_LOAD_MEM
+#undef WASM_SIMD_LOAD_MEM_OFFSET
#undef WASM_SIMD_STORE_MEM
+#undef WASM_SIMD_STORE_MEM_OFFSET
#undef WASM_SIMD_SELECT_TEST
#undef WASM_SIMD_NON_CANONICAL_SELECT_TEST
#undef WASM_SIMD_COMPILED_TEST
@@ -3078,6 +3434,10 @@ WASM_SIMD_TEST_NO_LOWERING(I16x8GtUMixed) {
#undef WASM_SIMD_TEST_NO_LOWERING
#undef WASM_SIMD_ANYTRUE_TEST
#undef WASM_SIMD_ALLTRUE_TEST
+#undef WASM_SIMD_F64x2_QFMA
+#undef WASM_SIMD_F64x2_QFMS
+#undef WASM_SIMD_F32x4_QFMA
+#undef WASM_SIMD_F32x4_QFMS
} // namespace test_run_wasm_simd
} // namespace wasm
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm.cc b/deps/v8/test/cctest/wasm/test-run-wasm.cc
index 26df61ceb8..aa6195b8b3 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm.cc
@@ -49,8 +49,8 @@ WASM_EXEC_TEST(Int32Const_many) {
WASM_EXEC_TEST(GraphTrimming) {
// This WebAssembly code requires graph trimming in the TurboFan compiler.
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r, kExprGetLocal, 0, kExprGetLocal, 0, kExprGetLocal, 0, kExprI32RemS,
- kExprI32Eq, kExprGetLocal, 0, kExprI32DivS, kExprUnreachable);
+ BUILD(r, kExprLocalGet, 0, kExprLocalGet, 0, kExprLocalGet, 0, kExprI32RemS,
+ kExprI32Eq, kExprLocalGet, 0, kExprI32DivS, kExprUnreachable);
r.Call(1);
}
@@ -1810,18 +1810,18 @@ WASM_EXEC_TEST(CheckMachIntsZero) {
BUILD(r, // --
/**/ kExprLoop, kLocalVoid, // --
- /* */ kExprGetLocal, 0, // --
+ /* */ kExprLocalGet, 0, // --
/* */ kExprIf, kLocalVoid, // --
- /* */ kExprGetLocal, 0, // --
+ /* */ kExprLocalGet, 0, // --
/* */ kExprI32LoadMem, 0, 0, // --
/* */ kExprIf, kLocalVoid, // --
/* */ kExprI32Const, 127, // --
/* */ kExprReturn, // --
/* */ kExprEnd, // --
- /* */ kExprGetLocal, 0, // --
+ /* */ kExprLocalGet, 0, // --
/* */ kExprI32Const, 4, // --
/* */ kExprI32Sub, // --
- /* */ kExprTeeLocal, 0, // --
+ /* */ kExprLocalTee, 0, // --
/* */ kExprBr, DEPTH_0, // --
/* */ kExprEnd, // --
/**/ kExprEnd, // --
@@ -2012,16 +2012,16 @@ static void TestBuildGraphForSimpleExpression(WasmOpcode opcode) {
FunctionSig* sig = WasmOpcodes::Signature(opcode);
if (sig->parameter_count() == 1) {
- byte code[] = {WASM_NO_LOCALS, kExprGetLocal, 0, static_cast<byte>(opcode),
+ byte code[] = {WASM_NO_LOCALS, kExprLocalGet, 0, static_cast<byte>(opcode),
WASM_END};
TestBuildingGraph(&zone, &jsgraph, nullptr, sig, nullptr, code,
code + arraysize(code));
} else {
CHECK_EQ(2, sig->parameter_count());
byte code[] = {WASM_NO_LOCALS,
- kExprGetLocal,
+ kExprLocalGet,
0,
- kExprGetLocal,
+ kExprLocalGet,
1,
static_cast<byte>(opcode),
WASM_END};
@@ -2667,7 +2667,7 @@ static void Run_WasmMixedCall_N(ExecutionTier execution_tier, int start) {
// Store the result in a local.
byte local_index = r.AllocateLocal(ValueTypes::ValueTypeFor(result));
- ADD_CODE(code, kExprSetLocal, local_index);
+ ADD_CODE(code, kExprLocalSet, local_index);
// Store the result in memory.
ADD_CODE(code,
@@ -2761,10 +2761,11 @@ void RunMultiReturnSelect(ExecutionTier execution_tier, const T* inputs) {
WASM_GET_LOCAL(3)),
WASM_DROP);
} else {
- BUILD(r, WASM_CALL_FUNCTION(r1.function_index(), WASM_GET_LOCAL(0),
- WASM_GET_LOCAL(1), WASM_GET_LOCAL(2),
- WASM_GET_LOCAL(3)),
- kExprSetLocal, 0, WASM_DROP, WASM_GET_LOCAL(0));
+ BUILD(r,
+ WASM_CALL_FUNCTION(r1.function_index(), WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(1), WASM_GET_LOCAL(2),
+ WASM_GET_LOCAL(3)),
+ kExprLocalSet, 0, WASM_DROP, WASM_GET_LOCAL(0));
}
T expected = inputs[k == 0 ? i : j];
@@ -3330,7 +3331,7 @@ static void CompileCallIndirectMany(ExecutionTier tier, ValueType param) {
std::vector<byte> code;
for (byte p = 0; p < num_params; ++p) {
- ADD_CODE(code, kExprGetLocal, p);
+ ADD_CODE(code, kExprLocalGet, p);
}
ADD_CODE(code, kExprI32Const, 0);
ADD_CODE(code, kExprCallIndirect, 1, TABLE_ZERO);
diff --git a/deps/v8/test/cctest/wasm/test-streaming-compilation.cc b/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
index 795fa30e72..f9089b7821 100644
--- a/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
+++ b/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
@@ -194,17 +194,17 @@ ZoneBuffer GetValidModuleBytes(Zone* zone) {
WasmModuleBuilder builder(zone);
{
WasmFunctionBuilder* f = builder.AddFunction(sigs.i_iii());
- uint8_t code[] = {kExprGetLocal, 0, kExprEnd};
+ uint8_t code[] = {kExprLocalGet, 0, kExprEnd};
f->EmitCode(code, arraysize(code));
}
{
WasmFunctionBuilder* f = builder.AddFunction(sigs.i_iii());
- uint8_t code[] = {kExprGetLocal, 1, kExprEnd};
+ uint8_t code[] = {kExprLocalGet, 1, kExprEnd};
f->EmitCode(code, arraysize(code));
}
{
WasmFunctionBuilder* f = builder.AddFunction(sigs.i_iii());
- uint8_t code[] = {kExprGetLocal, 2, kExprEnd};
+ uint8_t code[] = {kExprLocalGet, 2, kExprEnd};
f->EmitCode(code, arraysize(code));
}
builder.WriteTo(&buffer);
@@ -317,17 +317,17 @@ ZoneBuffer GetModuleWithInvalidSection(Zone* zone) {
WasmInitExpr(WasmInitExpr::kGlobalIndex, 12));
{
WasmFunctionBuilder* f = builder.AddFunction(sigs.i_iii());
- uint8_t code[] = {kExprGetLocal, 0, kExprEnd};
+ uint8_t code[] = {kExprLocalGet, 0, kExprEnd};
f->EmitCode(code, arraysize(code));
}
{
WasmFunctionBuilder* f = builder.AddFunction(sigs.i_iii());
- uint8_t code[] = {kExprGetLocal, 1, kExprEnd};
+ uint8_t code[] = {kExprLocalGet, 1, kExprEnd};
f->EmitCode(code, arraysize(code));
}
{
WasmFunctionBuilder* f = builder.AddFunction(sigs.i_iii());
- uint8_t code[] = {kExprGetLocal, 2, kExprEnd};
+ uint8_t code[] = {kExprLocalGet, 2, kExprEnd};
f->EmitCode(code, arraysize(code));
}
builder.WriteTo(&buffer);
@@ -442,7 +442,7 @@ STREAM_TEST(TestErrorInCodeSectionDetectedByModuleDecoder) {
uint8_t code[] = {
U32V_1(4), // body size
U32V_1(0), // locals count
- kExprGetLocal, 0, kExprEnd // body
+ kExprLocalGet, 0, kExprEnd // body
};
const uint8_t bytes[] = {
@@ -481,7 +481,7 @@ STREAM_TEST(TestErrorInCodeSectionDetectedByStreamingDecoder) {
uint8_t code[] = {
U32V_1(26), // !!! invalid body size !!!
U32V_1(0), // locals count
- kExprGetLocal, 0, kExprEnd // body
+ kExprLocalGet, 0, kExprEnd // body
};
const uint8_t bytes[] = {
@@ -520,7 +520,7 @@ STREAM_TEST(TestErrorInCodeSectionDetectedByCompiler) {
uint8_t code[] = {
U32V_1(4), // !!! invalid body size !!!
U32V_1(0), // locals count
- kExprGetLocal, 0, kExprEnd // body
+ kExprLocalGet, 0, kExprEnd // body
};
uint8_t invalid_code[] = {
@@ -679,7 +679,7 @@ STREAM_TEST(TestAbortAfterFunctionGotCompiled1) {
uint8_t code[] = {
U32V_1(4), // !!! invalid body size !!!
U32V_1(0), // locals count
- kExprGetLocal, 0, kExprEnd // body
+ kExprLocalGet, 0, kExprEnd // body
};
const uint8_t bytes[] = {
@@ -713,7 +713,7 @@ STREAM_TEST(TestAbortAfterFunctionGotCompiled2) {
uint8_t code[] = {
U32V_1(4), // !!! invalid body size !!!
U32V_1(0), // locals count
- kExprGetLocal, 0, kExprEnd // body
+ kExprLocalGet, 0, kExprEnd // body
};
const uint8_t bytes[] = {
@@ -745,7 +745,7 @@ STREAM_TEST(TestAbortAfterCodeSection1) {
uint8_t code[] = {
U32V_1(4), // body size
U32V_1(0), // locals count
- kExprGetLocal, 0, kExprEnd // body
+ kExprLocalGet, 0, kExprEnd // body
};
const uint8_t bytes[] = {
@@ -781,7 +781,7 @@ STREAM_TEST(TestAbortAfterCodeSection2) {
uint8_t code[] = {
U32V_1(4), // body size
U32V_1(0), // locals count
- kExprGetLocal, 0, kExprEnd // body
+ kExprLocalGet, 0, kExprEnd // body
};
const uint8_t bytes[] = {
@@ -815,7 +815,7 @@ STREAM_TEST(TestAbortAfterCompilationError1) {
uint8_t code[] = {
U32V_1(4), // !!! invalid body size !!!
U32V_1(0), // locals count
- kExprGetLocal, 0, kExprEnd // body
+ kExprLocalGet, 0, kExprEnd // body
};
uint8_t invalid_code[] = {
@@ -857,7 +857,7 @@ STREAM_TEST(TestAbortAfterCompilationError2) {
uint8_t code[] = {
U32V_1(4), // !!! invalid body size !!!
U32V_1(0), // locals count
- kExprGetLocal, 0, kExprEnd // body
+ kExprLocalGet, 0, kExprEnd // body
};
uint8_t invalid_code[] = {
@@ -934,7 +934,7 @@ STREAM_TEST(TestModuleWithMultipleFunctions) {
uint8_t code[] = {
U32V_1(4), // body size
U32V_1(0), // locals count
- kExprGetLocal, 0, kExprEnd // body
+ kExprLocalGet, 0, kExprEnd // body
};
const uint8_t bytes[] = {
@@ -970,7 +970,7 @@ STREAM_TEST(TestModuleWithDataSection) {
uint8_t code[] = {
U32V_1(4), // body size
U32V_1(0), // locals count
- kExprGetLocal, 0, kExprEnd // body
+ kExprLocalGet, 0, kExprEnd // body
};
const uint8_t bytes[] = {
@@ -1016,7 +1016,7 @@ STREAM_TEST(TestModuleWithImportedFunction) {
builder.AddImport(ArrayVector("Test"), sigs.i_iii());
{
WasmFunctionBuilder* f = builder.AddFunction(sigs.i_iii());
- uint8_t code[] = {kExprGetLocal, 0, kExprEnd};
+ uint8_t code[] = {kExprLocalGet, 0, kExprEnd};
f->EmitCode(code, arraysize(code));
}
builder.WriteTo(&buffer);
@@ -1047,7 +1047,7 @@ STREAM_TEST(TestModuleWithErrorAfterDataSection) {
U32V_1(1), // functions count
U32V_1(4), // body size
U32V_1(0), // locals count
- kExprGetLocal, // some code
+ kExprLocalGet, // some code
0, // some code
kExprEnd, // some code
kDataSectionCode, // section code
@@ -1133,7 +1133,7 @@ STREAM_TEST(TestSetModuleCompiledCallback) {
uint8_t code[] = {
U32V_1(4), // body size
U32V_1(0), // locals count
- kExprGetLocal, 0, kExprEnd // body
+ kExprLocalGet, 0, kExprEnd // body
};
const uint8_t bytes[] = {
diff --git a/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc b/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
index e287b1139e..798e1d46da 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
@@ -22,10 +22,11 @@ namespace wasm {
namespace {
void CheckLocations(
- WasmModuleObject module_object, debug::Location start, debug::Location end,
+ NativeModule* native_module, debug::Location start, debug::Location end,
std::initializer_list<debug::Location> expected_locations_init) {
std::vector<debug::BreakLocation> locations;
- bool success = module_object.GetPossibleBreakpoints(start, end, &locations);
+ bool success = WasmModuleObject::GetPossibleBreakpoints(native_module, start,
+ end, &locations);
CHECK(success);
printf("got %d locations: ", static_cast<int>(locations.size()));
@@ -45,10 +46,11 @@ void CheckLocations(
}
}
-void CheckLocationsFail(WasmModuleObject module_object, debug::Location start,
+void CheckLocationsFail(NativeModule* native_module, debug::Location start,
debug::Location end) {
std::vector<debug::BreakLocation> locations;
- bool success = module_object.GetPossibleBreakpoints(start, end, &locations);
+ bool success = WasmModuleObject::GetPossibleBreakpoints(native_module, start,
+ end, &locations);
CHECK(!success);
}
@@ -63,8 +65,12 @@ class BreakHandler : public debug::DebugDelegate {
struct BreakPoint {
int position;
Action action;
+ std::function<void(void)> pre_action;
BreakPoint(int position, Action action)
- : position(position), action(action) {}
+ : position(position), action(action), pre_action([]() {}) {}
+ BreakPoint(int position, Action action,
+ std::function<void(void)> pre_action)
+ : position(position), action(action), pre_action(pre_action) {}
};
explicit BreakHandler(Isolate* isolate,
@@ -96,6 +102,7 @@ class BreakHandler : public debug::DebugDelegate {
auto summ = FrameSummary::GetTop(frame_it.frame()).AsWasmInterpreted();
CHECK_EQ(expected_breaks_[count_].position, summ.byte_offset());
+ expected_breaks_[count_].pre_action();
Action next_action = expected_breaks_[count_].action;
switch (next_action) {
case Continue:
@@ -112,22 +119,21 @@ class BreakHandler : public debug::DebugDelegate {
}
};
-void SetBreakpoint(WasmRunnerBase& runner, // NOLINT(runtime/references)
- int function_index, int byte_offset,
- int expected_set_byte_offset = -1) {
+Handle<BreakPoint> SetBreakpoint(WasmRunnerBase* runner, int function_index,
+ int byte_offset,
+ int expected_set_byte_offset = -1) {
int func_offset =
- runner.builder().GetFunctionAt(function_index)->code.offset();
+ runner->builder().GetFunctionAt(function_index)->code.offset();
int code_offset = func_offset + byte_offset;
if (expected_set_byte_offset == -1) expected_set_byte_offset = byte_offset;
- Handle<WasmInstanceObject> instance = runner.builder().instance_object();
- Handle<WasmModuleObject> module_object(instance->module_object(),
- runner.main_isolate());
+ Handle<WasmInstanceObject> instance = runner->builder().instance_object();
+ Handle<Script> script(instance->module_object().script(),
+ runner->main_isolate());
static int break_index = 0;
Handle<BreakPoint> break_point =
- runner.main_isolate()->factory()->NewBreakPoint(
- break_index++, runner.main_isolate()->factory()->empty_string());
- CHECK(WasmModuleObject::SetBreakPoint(module_object, &code_offset,
- break_point));
+ runner->main_isolate()->factory()->NewBreakPoint(
+ break_index++, runner->main_isolate()->factory()->empty_string());
+ CHECK(WasmModuleObject::SetBreakPoint(script, &code_offset, break_point));
int set_byte_offset = code_offset - func_offset;
CHECK_EQ(expected_set_byte_offset, set_byte_offset);
// Also set breakpoint on the debug info of the instance directly, since the
@@ -135,6 +141,24 @@ void SetBreakpoint(WasmRunnerBase& runner, // NOLINT(runtime/references)
Handle<WasmDebugInfo> debug_info =
WasmInstanceObject::GetOrCreateDebugInfo(instance);
WasmDebugInfo::SetBreakpoint(debug_info, function_index, set_byte_offset);
+
+ return break_point;
+}
+
+void ClearBreakpoint(WasmRunnerBase* runner, int function_index,
+ int byte_offset, Handle<BreakPoint> break_point) {
+ int func_offset =
+ runner->builder().GetFunctionAt(function_index)->code.offset();
+ int code_offset = func_offset + byte_offset;
+ Handle<WasmInstanceObject> instance = runner->builder().instance_object();
+ Handle<Script> script(instance->module_object().script(),
+ runner->main_isolate());
+ CHECK(WasmModuleObject::ClearBreakPoint(script, code_offset, break_point));
+ // Also clear breakpoint on the debug info of the instance directly, since the
+ // instance chain is not setup properly in tests.
+ Handle<WasmDebugInfo> debug_info =
+ WasmInstanceObject::GetOrCreateDebugInfo(instance);
+ WasmDebugInfo::ClearBreakpoint(debug_info, function_index, byte_offset);
}
// Wrapper with operator<<.
@@ -247,25 +271,25 @@ WASM_COMPILED_EXEC_TEST(WasmCollectPossibleBreakpoints) {
BUILD(runner, WASM_NOP, WASM_I32_ADD(WASM_ZERO, WASM_ONE));
WasmInstanceObject instance = *runner.builder().instance_object();
- WasmModuleObject module_object = instance.module_object();
+ NativeModule* native_module = instance.module_object().native_module();
std::vector<debug::Location> locations;
// Check all locations for function 0.
- CheckLocations(module_object, {0, 0}, {1, 0},
+ CheckLocations(native_module, {0, 0}, {1, 0},
{{0, 1}, {0, 2}, {0, 4}, {0, 6}, {0, 7}});
// Check a range ending at an instruction.
- CheckLocations(module_object, {0, 2}, {0, 4}, {{0, 2}});
+ CheckLocations(native_module, {0, 2}, {0, 4}, {{0, 2}});
// Check a range ending one behind an instruction.
- CheckLocations(module_object, {0, 2}, {0, 5}, {{0, 2}, {0, 4}});
+ CheckLocations(native_module, {0, 2}, {0, 5}, {{0, 2}, {0, 4}});
// Check a range starting at an instruction.
- CheckLocations(module_object, {0, 7}, {0, 8}, {{0, 7}});
+ CheckLocations(native_module, {0, 7}, {0, 8}, {{0, 7}});
// Check from an instruction to beginning of next function.
- CheckLocations(module_object, {0, 7}, {1, 0}, {{0, 7}});
+ CheckLocations(native_module, {0, 7}, {1, 0}, {{0, 7}});
// Check from end of one function (no valid instruction position) to beginning
// of next function. Must be empty, but not fail.
- CheckLocations(module_object, {0, 8}, {1, 0}, {});
+ CheckLocations(native_module, {0, 8}, {1, 0}, {});
// Check from one after the end of the function. Must fail.
- CheckLocationsFail(module_object, {0, 9}, {1, 0});
+ CheckLocationsFail(native_module, {0, 9}, {1, 0});
}
WASM_COMPILED_EXEC_TEST(WasmSimpleBreak) {
@@ -276,7 +300,7 @@ WASM_COMPILED_EXEC_TEST(WasmSimpleBreak) {
Handle<JSFunction> main_fun_wrapper =
runner.builder().WrapCode(runner.function_index());
- SetBreakpoint(runner, runner.function_index(), 4, 4);
+ SetBreakpoint(&runner, runner.function_index(), 4, 4);
BreakHandler count_breaks(isolate, {{4, BreakHandler::Continue}});
@@ -298,7 +322,7 @@ WASM_COMPILED_EXEC_TEST(WasmSimpleStepping) {
runner.builder().WrapCode(runner.function_index());
// Set breakpoint at the first I32Const.
- SetBreakpoint(runner, runner.function_index(), 1, 1);
+ SetBreakpoint(&runner, runner.function_index(), 1, 1);
BreakHandler count_breaks(isolate,
{
@@ -340,12 +364,12 @@ WASM_COMPILED_EXEC_TEST(WasmStepInAndOut) {
Handle<JSFunction> main_fun_wrapper =
runner.builder().WrapCode(f2.function_index());
- // Set first breakpoint on the GetLocal (offset 19) before the Call.
- SetBreakpoint(runner, f2.function_index(), 19, 19);
+ // Set first breakpoint on the LocalGet (offset 19) before the Call.
+ SetBreakpoint(&runner, f2.function_index(), 19, 19);
BreakHandler count_breaks(isolate,
{
- {19, BreakHandler::StepIn}, // GetLocal
+ {19, BreakHandler::StepIn}, // LocalGet
{21, BreakHandler::StepIn}, // Call
{1, BreakHandler::StepOut}, // in f2
{23, BreakHandler::Continue} // After Call
@@ -377,7 +401,7 @@ WASM_COMPILED_EXEC_TEST(WasmGetLocalsAndStack) {
// Set breakpoint at the first instruction (7 bytes for local decls: num
// entries + 3x<count, type>).
- SetBreakpoint(runner, runner.function_index(), 7, 7);
+ SetBreakpoint(&runner, runner.function_index(), 7, 7);
CollectValuesBreakHandler break_handler(
isolate,
@@ -401,6 +425,104 @@ WASM_COMPILED_EXEC_TEST(WasmGetLocalsAndStack) {
CHECK(!Execution::Call(isolate, main_fun_wrapper, global, 1, args).is_null());
}
+WASM_COMPILED_EXEC_TEST(WasmRemoveBreakPoint) {
+ WasmRunner<int> runner(execution_tier);
+ Isolate* isolate = runner.main_isolate();
+
+ BUILD(runner, WASM_NOP, WASM_NOP, WASM_NOP, WASM_NOP, WASM_NOP,
+ WASM_I32V_1(14));
+
+ Handle<JSFunction> main_fun_wrapper =
+ runner.builder().WrapCode(runner.function_index());
+
+ SetBreakpoint(&runner, runner.function_index(), 1, 1);
+ SetBreakpoint(&runner, runner.function_index(), 2, 2);
+ Handle<BreakPoint> to_delete =
+ SetBreakpoint(&runner, runner.function_index(), 3, 3);
+ SetBreakpoint(&runner, runner.function_index(), 4, 4);
+
+ BreakHandler count_breaks(isolate, {{1, BreakHandler::Continue},
+ {2, BreakHandler::Continue,
+ [&runner, &to_delete]() {
+ ClearBreakpoint(
+ &runner, runner.function_index(),
+ 3, to_delete);
+ }},
+ {4, BreakHandler::Continue}});
+
+ Handle<Object> global(isolate->context().global_object(), isolate);
+ MaybeHandle<Object> retval =
+ Execution::Call(isolate, main_fun_wrapper, global, 0, nullptr);
+ CHECK(!retval.is_null());
+ int result;
+ CHECK(retval.ToHandleChecked()->ToInt32(&result));
+ CHECK_EQ(14, result);
+}
+
+WASM_COMPILED_EXEC_TEST(WasmRemoveLastBreakPoint) {
+ WasmRunner<int> runner(execution_tier);
+ Isolate* isolate = runner.main_isolate();
+
+ BUILD(runner, WASM_NOP, WASM_NOP, WASM_NOP, WASM_NOP, WASM_NOP,
+ WASM_I32V_1(14));
+
+ Handle<JSFunction> main_fun_wrapper =
+ runner.builder().WrapCode(runner.function_index());
+
+ SetBreakpoint(&runner, runner.function_index(), 1, 1);
+ SetBreakpoint(&runner, runner.function_index(), 2, 2);
+ Handle<BreakPoint> to_delete =
+ SetBreakpoint(&runner, runner.function_index(), 3, 3);
+
+ BreakHandler count_breaks(
+ isolate, {{1, BreakHandler::Continue},
+ {2, BreakHandler::Continue, [&runner, &to_delete]() {
+ ClearBreakpoint(&runner, runner.function_index(), 3,
+ to_delete);
+ }}});
+
+ Handle<Object> global(isolate->context().global_object(), isolate);
+ MaybeHandle<Object> retval =
+ Execution::Call(isolate, main_fun_wrapper, global, 0, nullptr);
+ CHECK(!retval.is_null());
+ int result;
+ CHECK(retval.ToHandleChecked()->ToInt32(&result));
+ CHECK_EQ(14, result);
+}
+
+WASM_COMPILED_EXEC_TEST(WasmRemoveAllBreakPoint) {
+ WasmRunner<int> runner(execution_tier);
+ Isolate* isolate = runner.main_isolate();
+
+ BUILD(runner, WASM_NOP, WASM_NOP, WASM_NOP, WASM_NOP, WASM_NOP,
+ WASM_I32V_1(14));
+
+ Handle<JSFunction> main_fun_wrapper =
+ runner.builder().WrapCode(runner.function_index());
+
+ Handle<BreakPoint> bp1 =
+ SetBreakpoint(&runner, runner.function_index(), 1, 1);
+ Handle<BreakPoint> bp2 =
+ SetBreakpoint(&runner, runner.function_index(), 2, 2);
+ Handle<BreakPoint> bp3 =
+ SetBreakpoint(&runner, runner.function_index(), 3, 3);
+
+ BreakHandler count_breaks(
+ isolate, {{1, BreakHandler::Continue, [&runner, &bp1, &bp2, &bp3]() {
+ ClearBreakpoint(&runner, runner.function_index(), 1, bp1);
+ ClearBreakpoint(&runner, runner.function_index(), 3, bp3);
+ ClearBreakpoint(&runner, runner.function_index(), 2, bp2);
+ }}});
+
+ Handle<Object> global(isolate->context().global_object(), isolate);
+ MaybeHandle<Object> retval =
+ Execution::Call(isolate, main_fun_wrapper, global, 0, nullptr);
+ CHECK(!retval.is_null());
+ int result;
+ CHECK(retval.ToHandleChecked()->ToInt32(&result));
+ CHECK_EQ(14, result);
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/wasm/test-wasm-import-wrapper-cache.cc b/deps/v8/test/cctest/wasm/test-wasm-import-wrapper-cache.cc
index 299c039698..15267215e1 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-import-wrapper-cache.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-import-wrapper-cache.cc
@@ -22,10 +22,8 @@ std::shared_ptr<NativeModule> NewModule(Isolate* isolate) {
std::shared_ptr<WasmModule> module(new WasmModule);
bool can_request_more = false;
size_t size = 16384;
- auto native_module = isolate->wasm_engine()->NewNativeModule(
+ return isolate->wasm_engine()->NewNativeModule(
isolate, kAllWasmFeatures, size, can_request_more, std::move(module));
- native_module->SetRuntimeStubs(isolate);
- return native_module;
}
TEST(CacheHit) {
diff --git a/deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc b/deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc
index 736475ff55..75e927fafe 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc
@@ -32,28 +32,27 @@ namespace {
template <typename T>
class ArgPassingHelper {
public:
- ArgPassingHelper(
- WasmRunnerBase& runner, // NOLINT(runtime/references)
- WasmFunctionCompiler& inner_compiler, // NOLINT(runtime/references)
- std::initializer_list<uint8_t> bytes_inner_function,
- std::initializer_list<uint8_t> bytes_outer_function,
- const T& expected_lambda)
- : isolate_(runner.main_isolate()),
+ ArgPassingHelper(WasmRunnerBase* runner, WasmFunctionCompiler* inner_compiler,
+ std::initializer_list<uint8_t> bytes_inner_function,
+ std::initializer_list<uint8_t> bytes_outer_function,
+ const T& expected_lambda)
+ : isolate_(runner->main_isolate()),
expected_lambda_(expected_lambda),
debug_info_(WasmInstanceObject::GetOrCreateDebugInfo(
- runner.builder().instance_object())) {
+ runner->builder().instance_object())) {
std::vector<uint8_t> inner_code{bytes_inner_function};
- inner_compiler.Build(inner_code.data(),
- inner_code.data() + inner_code.size());
+ inner_compiler->Build(inner_code.data(),
+ inner_code.data() + inner_code.size());
std::vector<uint8_t> outer_code{bytes_outer_function};
- runner.Build(outer_code.data(), outer_code.data() + outer_code.size());
+ runner->Build(outer_code.data(), outer_code.data() + outer_code.size());
- int funcs_to_redict[] = {static_cast<int>(inner_compiler.function_index())};
- runner.builder().SetExecutable();
+ int funcs_to_redict[] = {
+ static_cast<int>(inner_compiler->function_index())};
+ runner->builder().SetExecutable();
WasmDebugInfo::RedirectToInterpreter(debug_info_,
ArrayVector(funcs_to_redict));
- main_fun_wrapper_ = runner.builder().WrapCode(runner.function_index());
+ main_fun_wrapper_ = runner->builder().WrapCode(runner->function_index());
}
template <typename... Args>
@@ -82,8 +81,7 @@ class ArgPassingHelper {
template <typename T>
static ArgPassingHelper<T> GetHelper(
- WasmRunnerBase& runner, // NOLINT(runtime/references)
- WasmFunctionCompiler& inner_compiler, // NOLINT(runtime/references)
+ WasmRunnerBase* runner, WasmFunctionCompiler* inner_compiler,
std::initializer_list<uint8_t> bytes_inner_function,
std::initializer_list<uint8_t> bytes_outer_function,
const T& expected_lambda) {
@@ -99,7 +97,7 @@ TEST(TestArgumentPassing_int32) {
WasmFunctionCompiler& f2 = runner.NewFunction<int32_t, int32_t>();
auto helper = GetHelper(
- runner, f2,
+ &runner, &f2,
{// Return 2*<0> + 1.
WASM_I32_ADD(WASM_I32_MUL(WASM_I32V_1(2), WASM_GET_LOCAL(0)), WASM_ONE)},
{// Call f2 with param <0>.
@@ -117,7 +115,7 @@ TEST(TestArgumentPassing_double_int64) {
WasmFunctionCompiler& f2 = runner.NewFunction<double, int64_t>();
auto helper = GetHelper(
- runner, f2,
+ &runner, &f2,
{// Return (double)<0>.
WASM_F64_SCONVERT_I64(WASM_GET_LOCAL(0))},
{// Call f2 with param (<0> | (<1> << 32)).
@@ -150,7 +148,7 @@ TEST(TestArgumentPassing_int64_double) {
WasmFunctionCompiler& f2 = runner.NewFunction<int64_t, double>();
auto helper = GetHelper(
- runner, f2,
+ &runner, &f2,
{// Return (int64_t)<0>.
WASM_I64_SCONVERT_F64(WASM_GET_LOCAL(0))},
{// Call f2 with param <0>, convert returned value back to double.
@@ -169,7 +167,7 @@ TEST(TestArgumentPassing_float_double) {
WasmFunctionCompiler& f2 = runner.NewFunction<double, float>();
auto helper = GetHelper(
- runner, f2,
+ &runner, &f2,
{// Return 2*(double)<0> + 1.
WASM_F64_ADD(
WASM_F64_MUL(WASM_F64(2), WASM_F64_CONVERT_F32(WASM_GET_LOCAL(0))),
@@ -186,7 +184,7 @@ TEST(TestArgumentPassing_double_double) {
WasmRunner<double, double, double> runner(ExecutionTier::kTurbofan);
WasmFunctionCompiler& f2 = runner.NewFunction<double, double, double>();
- auto helper = GetHelper(runner, f2,
+ auto helper = GetHelper(&runner, &f2,
{// Return <0> + <1>.
WASM_F64_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))},
{// Call f2 with params <0>, <1>.
@@ -208,7 +206,7 @@ TEST(TestArgumentPassing_AllTypes) {
runner.NewFunction<double, int32_t, int64_t, float, double>();
auto helper = GetHelper(
- runner, f2,
+ &runner, &f2,
{
// Convert all arguments to double, add them and return the sum.
WASM_F64_ADD( // <0+1+2> + <3>
diff --git a/deps/v8/test/cctest/wasm/test-wasm-serialization.cc b/deps/v8/test/cctest/wasm/test-wasm-serialization.cc
index 1ff2a899ad..c6486650ef 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-serialization.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-serialization.cc
@@ -11,7 +11,6 @@
#include "src/utils/version.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-engine.h"
-#include "src/wasm/wasm-memory.h"
#include "src/wasm/wasm-module-builder.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
@@ -272,9 +271,8 @@ TEST(BlockWasmCodeGenAtDeserialization) {
Cleanup();
}
-namespace {
-
-void TestTransferrableWasmModules(bool should_share) {
+UNINITIALIZED_TEST(CompiledWasmModulesTransfer) {
+ FlagScope<bool> flag_scope_engine(&FLAG_wasm_shared_engine, true);
i::wasm::WasmEngine::InitializeOncePerProcess();
v8::internal::AccountingAllocator allocator;
Zone zone(&allocator, ZONE_NAME);
@@ -285,7 +283,7 @@ void TestTransferrableWasmModules(bool should_share) {
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* from_isolate = v8::Isolate::New(create_params);
- std::vector<v8::WasmModuleObject::TransferrableModule> store;
+ std::vector<v8::CompiledWasmModule> store;
std::shared_ptr<NativeModule> original_native_module;
{
v8::HandleScope scope(from_isolate);
@@ -293,7 +291,7 @@ void TestTransferrableWasmModules(bool should_share) {
Isolate* from_i_isolate = reinterpret_cast<Isolate*>(from_isolate);
testing::SetupIsolateForWasmModule(from_i_isolate);
- ErrorThrower thrower(from_i_isolate, "TestTransferrableWasmModules");
+ ErrorThrower thrower(from_i_isolate, "TestCompiledWasmModulesTransfer");
auto enabled_features = WasmFeaturesFromIsolate(from_i_isolate);
MaybeHandle<WasmModuleObject> maybe_module_object =
from_i_isolate->wasm_engine()->SyncCompile(
@@ -304,7 +302,7 @@ void TestTransferrableWasmModules(bool should_share) {
v8::Local<v8::WasmModuleObject> v8_module =
v8::Local<v8::WasmModuleObject>::Cast(
v8::Utils::ToLocal(Handle<JSObject>::cast(module_object)));
- store.push_back(v8_module->GetTransferrableModule());
+ store.push_back(v8_module->GetCompiledModule());
original_native_module = module_object->shared_native_module();
}
@@ -315,14 +313,13 @@ void TestTransferrableWasmModules(bool should_share) {
LocalContext env(to_isolate);
v8::MaybeLocal<v8::WasmModuleObject> transferred_module =
- v8::WasmModuleObject::FromTransferrableModule(to_isolate, store[0]);
+ v8::WasmModuleObject::FromCompiledModule(to_isolate, store[0]);
CHECK(!transferred_module.IsEmpty());
Handle<WasmModuleObject> module_object = Handle<WasmModuleObject>::cast(
v8::Utils::OpenHandle(*transferred_module.ToLocalChecked()));
std::shared_ptr<NativeModule> transferred_native_module =
module_object->shared_native_module();
- bool is_sharing = (original_native_module == transferred_native_module);
- CHECK_EQ(should_share, is_sharing);
+ CHECK_EQ(original_native_module, transferred_native_module);
}
to_isolate->Dispose();
}
@@ -330,19 +327,6 @@ void TestTransferrableWasmModules(bool should_share) {
from_isolate->Dispose();
}
-} // namespace
-
-UNINITIALIZED_TEST(TransferrableWasmModulesCloned) {
- FlagScope<bool> flag_scope_code(&FLAG_wasm_shared_code, false);
- TestTransferrableWasmModules(false);
-}
-
-UNINITIALIZED_TEST(TransferrableWasmModulesShared) {
- FlagScope<bool> flag_scope_engine(&FLAG_wasm_shared_engine, true);
- FlagScope<bool> flag_scope_code(&FLAG_wasm_shared_code, true);
- TestTransferrableWasmModules(true);
-}
-
#undef EMIT_CODE_WITH_END
} // namespace test_wasm_serialization
diff --git a/deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc b/deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc
index b5bacf57d4..2d6e930397 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc
@@ -27,7 +27,7 @@ namespace test_wasm_shared_engine {
class SharedEngine {
public:
explicit SharedEngine(size_t max_committed = kMaxWasmCodeMemory)
- : wasm_engine_(base::make_unique<WasmEngine>()) {}
+ : wasm_engine_(std::make_unique<WasmEngine>()) {}
~SharedEngine() {
// Ensure no remaining uses exist.
CHECK(wasm_engine_.unique());
@@ -112,19 +112,19 @@ class SharedEngineIsolate {
class SharedEngineThread : public v8::base::Thread {
public:
SharedEngineThread(SharedEngine* engine,
- std::function<void(SharedEngineIsolate&)> callback)
+ std::function<void(SharedEngineIsolate*)> callback)
: Thread(Options("SharedEngineThread")),
engine_(engine),
callback_(callback) {}
void Run() override {
SharedEngineIsolate isolate(engine_);
- callback_(isolate);
+ callback_(&isolate);
}
private:
SharedEngine* engine_;
- std::function<void(SharedEngineIsolate&)> callback_;
+ std::function<void(SharedEngineIsolate*)> callback_;
};
namespace {
@@ -159,43 +159,39 @@ class MockInstantiationResolver : public InstantiationResultResolver {
class MockCompilationResolver : public CompilationResultResolver {
public:
- MockCompilationResolver(
- SharedEngineIsolate& isolate, // NOLINT(runtime/references)
- Handle<Object>* out_instance)
+ MockCompilationResolver(SharedEngineIsolate* isolate,
+ Handle<Object>* out_instance)
: isolate_(isolate), out_instance_(out_instance) {}
void OnCompilationSucceeded(Handle<WasmModuleObject> result) override {
- isolate_.isolate()->wasm_engine()->AsyncInstantiate(
- isolate_.isolate(),
- base::make_unique<MockInstantiationResolver>(out_instance_), result,
- {});
+ isolate_->isolate()->wasm_engine()->AsyncInstantiate(
+ isolate_->isolate(),
+ std::make_unique<MockInstantiationResolver>(out_instance_), result, {});
}
void OnCompilationFailed(Handle<Object> error_reason) override {
UNREACHABLE();
}
private:
- SharedEngineIsolate& isolate_;
+ SharedEngineIsolate* isolate_;
Handle<Object>* out_instance_;
};
-void PumpMessageLoop(
- SharedEngineIsolate& isolate) { // NOLINT(runtime/references)
+void PumpMessageLoop(SharedEngineIsolate* isolate) {
v8::platform::PumpMessageLoop(i::V8::GetCurrentPlatform(),
- isolate.v8_isolate(),
+ isolate->v8_isolate(),
platform::MessageLoopBehavior::kWaitForWork);
- isolate.isolate()->default_microtask_queue()->RunMicrotasks(
- isolate.isolate());
+ isolate->isolate()->default_microtask_queue()->RunMicrotasks(
+ isolate->isolate());
}
Handle<WasmInstanceObject> CompileAndInstantiateAsync(
- SharedEngineIsolate& isolate, // NOLINT(runtime/references)
- ZoneBuffer* buffer) {
- Handle<Object> maybe_instance = handle(Smi::kZero, isolate.isolate());
- auto enabled_features = WasmFeaturesFromIsolate(isolate.isolate());
+ SharedEngineIsolate* isolate, ZoneBuffer* buffer) {
+ Handle<Object> maybe_instance = handle(Smi::kZero, isolate->isolate());
+ auto enabled_features = WasmFeaturesFromIsolate(isolate->isolate());
constexpr const char* kAPIMethodName = "Test.CompileAndInstantiateAsync";
- isolate.isolate()->wasm_engine()->AsyncCompile(
- isolate.isolate(), enabled_features,
- base::make_unique<MockCompilationResolver>(isolate, &maybe_instance),
+ isolate->isolate()->wasm_engine()->AsyncCompile(
+ isolate->isolate(), enabled_features,
+ std::make_unique<MockCompilationResolver>(isolate, &maybe_instance),
ModuleWireBytes(buffer->begin(), buffer->end()), true, kAPIMethodName);
while (!maybe_instance->IsWasmInstanceObject()) PumpMessageLoop(isolate);
Handle<WasmInstanceObject> instance =
@@ -261,17 +257,19 @@ TEST(SharedEngineRunImported) {
TEST(SharedEngineRunThreadedBuildingSync) {
SharedEngine engine;
- SharedEngineThread thread1(&engine, [](SharedEngineIsolate& isolate) {
- HandleScope scope(isolate.isolate());
- ZoneBuffer* buffer = BuildReturnConstantModule(isolate.zone(), 23);
- Handle<WasmInstanceObject> instance = isolate.CompileAndInstantiate(buffer);
- CHECK_EQ(23, isolate.Run(instance));
+ SharedEngineThread thread1(&engine, [](SharedEngineIsolate* isolate) {
+ HandleScope scope(isolate->isolate());
+ ZoneBuffer* buffer = BuildReturnConstantModule(isolate->zone(), 23);
+ Handle<WasmInstanceObject> instance =
+ isolate->CompileAndInstantiate(buffer);
+ CHECK_EQ(23, isolate->Run(instance));
});
- SharedEngineThread thread2(&engine, [](SharedEngineIsolate& isolate) {
- HandleScope scope(isolate.isolate());
- ZoneBuffer* buffer = BuildReturnConstantModule(isolate.zone(), 42);
- Handle<WasmInstanceObject> instance = isolate.CompileAndInstantiate(buffer);
- CHECK_EQ(42, isolate.Run(instance));
+ SharedEngineThread thread2(&engine, [](SharedEngineIsolate* isolate) {
+ HandleScope scope(isolate->isolate());
+ ZoneBuffer* buffer = BuildReturnConstantModule(isolate->zone(), 42);
+ Handle<WasmInstanceObject> instance =
+ isolate->CompileAndInstantiate(buffer);
+ CHECK_EQ(42, isolate->Run(instance));
});
CHECK(thread1.Start());
CHECK(thread2.Start());
@@ -281,19 +279,19 @@ TEST(SharedEngineRunThreadedBuildingSync) {
TEST(SharedEngineRunThreadedBuildingAsync) {
SharedEngine engine;
- SharedEngineThread thread1(&engine, [](SharedEngineIsolate& isolate) {
- HandleScope scope(isolate.isolate());
- ZoneBuffer* buffer = BuildReturnConstantModule(isolate.zone(), 23);
+ SharedEngineThread thread1(&engine, [](SharedEngineIsolate* isolate) {
+ HandleScope scope(isolate->isolate());
+ ZoneBuffer* buffer = BuildReturnConstantModule(isolate->zone(), 23);
Handle<WasmInstanceObject> instance =
CompileAndInstantiateAsync(isolate, buffer);
- CHECK_EQ(23, isolate.Run(instance));
+ CHECK_EQ(23, isolate->Run(instance));
});
- SharedEngineThread thread2(&engine, [](SharedEngineIsolate& isolate) {
- HandleScope scope(isolate.isolate());
- ZoneBuffer* buffer = BuildReturnConstantModule(isolate.zone(), 42);
+ SharedEngineThread thread2(&engine, [](SharedEngineIsolate* isolate) {
+ HandleScope scope(isolate->isolate());
+ ZoneBuffer* buffer = BuildReturnConstantModule(isolate->zone(), 42);
Handle<WasmInstanceObject> instance =
CompileAndInstantiateAsync(isolate, buffer);
- CHECK_EQ(42, isolate.Run(instance));
+ CHECK_EQ(42, isolate->Run(instance));
});
CHECK(thread1.Start());
CHECK(thread2.Start());
@@ -311,15 +309,15 @@ TEST(SharedEngineRunThreadedExecution) {
Handle<WasmInstanceObject> instance = isolate.CompileAndInstantiate(buffer);
module = isolate.ExportInstance(instance);
}
- SharedEngineThread thread1(&engine, [module](SharedEngineIsolate& isolate) {
- HandleScope scope(isolate.isolate());
- Handle<WasmInstanceObject> instance = isolate.ImportInstance(module);
- CHECK_EQ(23, isolate.Run(instance));
+ SharedEngineThread thread1(&engine, [module](SharedEngineIsolate* isolate) {
+ HandleScope scope(isolate->isolate());
+ Handle<WasmInstanceObject> instance = isolate->ImportInstance(module);
+ CHECK_EQ(23, isolate->Run(instance));
});
- SharedEngineThread thread2(&engine, [module](SharedEngineIsolate& isolate) {
- HandleScope scope(isolate.isolate());
- Handle<WasmInstanceObject> instance = isolate.ImportInstance(module);
- CHECK_EQ(23, isolate.Run(instance));
+ SharedEngineThread thread2(&engine, [module](SharedEngineIsolate* isolate) {
+ HandleScope scope(isolate->isolate());
+ Handle<WasmInstanceObject> instance = isolate->ImportInstance(module);
+ CHECK_EQ(23, isolate->Run(instance));
});
CHECK(thread1.Start());
CHECK(thread2.Start());
@@ -340,23 +338,23 @@ TEST(SharedEngineRunThreadedTierUp) {
constexpr int kNumberOfThreads = 5;
std::list<SharedEngineThread> threads;
for (int i = 0; i < kNumberOfThreads; ++i) {
- threads.emplace_back(&engine, [module](SharedEngineIsolate& isolate) {
+ threads.emplace_back(&engine, [module](SharedEngineIsolate* isolate) {
constexpr int kNumberOfIterations = 100;
- HandleScope scope(isolate.isolate());
- Handle<WasmInstanceObject> instance = isolate.ImportInstance(module);
+ HandleScope scope(isolate->isolate());
+ Handle<WasmInstanceObject> instance = isolate->ImportInstance(module);
for (int j = 0; j < kNumberOfIterations; ++j) {
- CHECK_EQ(23, isolate.Run(instance));
+ CHECK_EQ(23, isolate->Run(instance));
}
});
}
- threads.emplace_back(&engine, [module](SharedEngineIsolate& isolate) {
- HandleScope scope(isolate.isolate());
- Handle<WasmInstanceObject> instance = isolate.ImportInstance(module);
+ threads.emplace_back(&engine, [module](SharedEngineIsolate* isolate) {
+ HandleScope scope(isolate->isolate());
+ Handle<WasmInstanceObject> instance = isolate->ImportInstance(module);
WasmFeatures detected = kNoWasmFeatures;
WasmCompilationUnit::CompileWasmFunction(
- isolate.isolate(), module.get(), &detected,
+ isolate->isolate(), module.get(), &detected,
&module->module()->functions[0], ExecutionTier::kTurbofan);
- CHECK_EQ(23, isolate.Run(instance));
+ CHECK_EQ(23, isolate->Run(instance));
});
for (auto& thread : threads) CHECK(thread.Start());
for (auto& thread : threads) thread.Join();
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.cc b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
index 528d71f53c..09d64e5d97 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.cc
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
@@ -10,7 +10,6 @@
#include "src/wasm/graph-builder-interface.h"
#include "src/wasm/module-compiler.h"
#include "src/wasm/wasm-import-wrapper-cache.h"
-#include "src/wasm/wasm-memory.h"
#include "src/wasm/wasm-objects-inl.h"
namespace v8 {
@@ -75,29 +74,23 @@ byte* TestingModuleBuilder::AddMemory(uint32_t size, SharedFlag shared) {
CHECK_NULL(mem_start_);
CHECK_EQ(0, mem_size_);
DCHECK(!instance_object_->has_memory_object());
- DCHECK_IMPLIES(test_module_->origin == kWasmOrigin,
- size % kWasmPageSize == 0);
+ uint32_t initial_pages = RoundUp(size, kWasmPageSize) / kWasmPageSize;
+ uint32_t maximum_pages = (test_module_->maximum_pages != 0)
+ ? test_module_->maximum_pages
+ : initial_pages;
test_module_->has_memory = true;
- uint32_t max_size =
- (test_module_->maximum_pages != 0) ? test_module_->maximum_pages : size;
- uint32_t alloc_size = RoundUp(size, kWasmPageSize);
- Handle<JSArrayBuffer> new_buffer;
- if (shared == SharedFlag::kShared) {
- CHECK(NewSharedArrayBuffer(isolate_, alloc_size, max_size)
- .ToHandle(&new_buffer));
- } else {
- CHECK(NewArrayBuffer(isolate_, alloc_size).ToHandle(&new_buffer));
- }
- CHECK(!new_buffer.is_null());
- mem_start_ = reinterpret_cast<byte*>(new_buffer->backing_store());
- mem_size_ = size;
- CHECK(size == 0 || mem_start_);
- memset(mem_start_, 0, size);
// Create the WasmMemoryObject.
Handle<WasmMemoryObject> memory_object =
- WasmMemoryObject::New(isolate_, new_buffer, max_size);
+ WasmMemoryObject::New(isolate_, initial_pages, maximum_pages, shared)
+ .ToHandleChecked();
instance_object_->set_memory_object(*memory_object);
+
+ mem_start_ =
+ reinterpret_cast<byte*>(memory_object->array_buffer().backing_store());
+ mem_size_ = size;
+ CHECK(size == 0 || mem_start_);
+
WasmMemoryObject::AddInstance(isolate_, memory_object, instance_object_);
// TODO(wasm): Delete the following two lines when test-run-wasm will use a
// multiple of kPageSize as memory size. At the moment, the effect of these
@@ -328,7 +321,6 @@ Handle<WasmInstanceObject> TestingModuleBuilder::InitInstanceObject() {
auto native_module = isolate_->wasm_engine()->NewNativeModule(
isolate_, enabled_features_, test_module_);
native_module->SetWireBytes(OwnedVector<const uint8_t>());
- native_module->SetRuntimeStubs(isolate_);
Handle<WasmModuleObject> module_object =
WasmModuleObject::New(isolate_, std::move(native_module), script);
@@ -487,7 +479,7 @@ Handle<Code> WasmFunctionWrapper::GetWrapperCode() {
CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
OFStream os(tracing_scope.file());
- code->Disassemble("wasm wrapper", os);
+ code->Disassemble("wasm wrapper", os, isolate);
}
#endif
}
diff --git a/deps/v8/test/common/assembler-tester.h b/deps/v8/test/common/assembler-tester.h
index 4c3d8ff618..17e376ef60 100644
--- a/deps/v8/test/common/assembler-tester.h
+++ b/deps/v8/test/common/assembler-tester.h
@@ -5,6 +5,8 @@
#ifndef V8_TEST_COMMON_ASSEMBLER_TESTER_H_
#define V8_TEST_COMMON_ASSEMBLER_TESTER_H_
+#include <memory>
+
#include "src/codegen/assembler.h"
#include "src/codegen/code-desc.h"
@@ -74,7 +76,7 @@ class TestingAssemblerBuffer : public AssemblerBuffer {
static inline std::unique_ptr<TestingAssemblerBuffer> AllocateAssemblerBuffer(
size_t requested = v8::internal::AssemblerBase::kMinimalBufferSize,
void* address = nullptr) {
- return base::make_unique<TestingAssemblerBuffer>(requested, address);
+ return std::make_unique<TestingAssemblerBuffer>(requested, address);
}
} // namespace internal
diff --git a/deps/v8/test/common/wasm/wasm-macro-gen.h b/deps/v8/test/common/wasm/wasm-macro-gen.h
index ed20641c65..d007fbd002 100644
--- a/deps/v8/test/common/wasm/wasm-macro-gen.h
+++ b/deps/v8/test/common/wasm/wasm-macro-gen.h
@@ -27,12 +27,14 @@
#define ACTIVE_NO_INDEX 0
#define PASSIVE 1
#define ACTIVE_WITH_INDEX 2
+#define PASSIVE_WITH_ELEMENTS 5
+#define ACTIVE_WITH_ELEMENTS 6
// The table index field in an element segment was repurposed as a flags field.
// To specify a table index, we have to set the flag value to 2, followed by
// the table index.
-#define TABLE_INDEX0 U32V_1(ACTIVE_NO_INDEX)
-#define TABLE_INDEX(v) U32V_1(ACTIVE_WITH_INDEX), U32V_1(v)
+#define TABLE_INDEX0 static_cast<byte>(ACTIVE_NO_INDEX)
+#define TABLE_INDEX(v) static_cast<byte>(ACTIVE_WITH_INDEX), U32V_1(v)
#define ZERO_ALIGNMENT 0
#define ZERO_OFFSET 0
@@ -361,13 +363,13 @@ inline WasmOpcode LoadStoreOpcodeOf(MachineType type, bool store) {
#define WASM_REF_FUNC(val) kExprRefFunc, val
#define WASM_REF_IS_NULL(val) val, kExprRefIsNull
-#define WASM_GET_LOCAL(index) kExprGetLocal, static_cast<byte>(index)
-#define WASM_SET_LOCAL(index, val) val, kExprSetLocal, static_cast<byte>(index)
-#define WASM_TEE_LOCAL(index, val) val, kExprTeeLocal, static_cast<byte>(index)
+#define WASM_GET_LOCAL(index) kExprLocalGet, static_cast<byte>(index)
+#define WASM_SET_LOCAL(index, val) val, kExprLocalSet, static_cast<byte>(index)
+#define WASM_TEE_LOCAL(index, val) val, kExprLocalTee, static_cast<byte>(index)
#define WASM_DROP kExprDrop
-#define WASM_GET_GLOBAL(index) kExprGetGlobal, static_cast<byte>(index)
+#define WASM_GET_GLOBAL(index) kExprGlobalGet, static_cast<byte>(index)
#define WASM_SET_GLOBAL(index, val) \
- val, kExprSetGlobal, static_cast<byte>(index)
+ val, kExprGlobalSet, static_cast<byte>(index)
#define WASM_TABLE_GET(table_index, index) \
index, kExprTableGet, static_cast<byte>(table_index)
#define WASM_TABLE_SET(table_index, index, val) \
@@ -442,15 +444,15 @@ inline WasmOpcode LoadStoreOpcodeOf(MachineType type, bool store) {
kExprLoop, kLocalVoid, x, kExprIf, kLocalVoid, y, kExprBr, DEPTH_1, \
kExprEnd, kExprEnd
#define WASM_INC_LOCAL(index) \
- kExprGetLocal, static_cast<byte>(index), kExprI32Const, 1, kExprI32Add, \
- kExprTeeLocal, static_cast<byte>(index)
+ kExprLocalGet, static_cast<byte>(index), kExprI32Const, 1, kExprI32Add, \
+ kExprLocalTee, static_cast<byte>(index)
#define WASM_INC_LOCAL_BYV(index, count) \
- kExprGetLocal, static_cast<byte>(index), kExprI32Const, \
- static_cast<byte>(count), kExprI32Add, kExprTeeLocal, \
+ kExprLocalGet, static_cast<byte>(index), kExprI32Const, \
+ static_cast<byte>(count), kExprI32Add, kExprLocalTee, \
static_cast<byte>(index)
#define WASM_INC_LOCAL_BY(index, count) \
- kExprGetLocal, static_cast<byte>(index), kExprI32Const, \
- static_cast<byte>(count), kExprI32Add, kExprSetLocal, \
+ kExprLocalGet, static_cast<byte>(index), kExprI32Const, \
+ static_cast<byte>(count), kExprI32Add, kExprLocalSet, \
static_cast<byte>(index)
#define WASM_UNOP(opcode, x) x, static_cast<byte>(opcode)
#define WASM_BINOP(opcode, x, y) x, y, static_cast<byte>(opcode)
diff --git a/deps/v8/test/debugger/debug/debug-evaluate-shadowed-context-3.js b/deps/v8/test/debugger/debug/debug-evaluate-shadowed-context-3.js
new file mode 100644
index 0000000000..2a41109565
--- /dev/null
+++ b/deps/v8/test/debugger/debug/debug-evaluate-shadowed-context-3.js
@@ -0,0 +1,39 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Test that debug-evaluate properly shadows stack-allocated variables.
+
+Debug = debug.Debug
+
+let exception = null;
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ assertEquals(2, exec_state.frame(0).evaluate("b").value());
+ assertEquals(3, exec_state.frame(0).evaluate("c").value())
+ assertThrows(() => exec_state.frame(0).evaluate("a").value());
+ } catch (e) {
+ exception = e;
+ print(e + e.stack);
+ }
+}
+
+Debug.setListener(listener);
+
+(function f() {
+ let a = 1;
+ let b = 2;
+ let c = 3;
+ () => a + c; // a and c are context-allocated
+ return function g() {
+ let a = 2; // a is stack-allocated
+ return function h() {
+ b; // b is allocated onto f's context.
+ debugger;
+ }
+ }
+})()()();
+
+Debug.setListener(null);
+assertNull(exception);
diff --git a/deps/v8/test/debugger/debug/es6/generators-relocation.js b/deps/v8/test/debugger/debug/es6/generators-relocation.js
index 78413fde6e..13ebb01d0f 100644
--- a/deps/v8/test/debugger/debug/es6/generators-relocation.js
+++ b/deps/v8/test/debugger/debug/es6/generators-relocation.js
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --expose-gc
var Debug = debug.Debug;
@@ -28,6 +29,8 @@ function RunTest(formals_and_body, args, value1, value2) {
// function and relocation of the suspended generator activation.
Debug.setListener(listener);
+ gc();
+
// Add a breakpoint on line 3 (the second yield).
var bp = Debug.setBreakPoint(gen, 3);
diff --git a/deps/v8/test/debugger/debugger.status b/deps/v8/test/debugger/debugger.status
index 503e5e7145..85e4cec3ee 100644
--- a/deps/v8/test/debugger/debugger.status
+++ b/deps/v8/test/debugger/debugger.status
@@ -11,6 +11,9 @@
# not work, but we expect it to not crash.
'debug/debug-step-turbofan': [PASS, FAIL],
+ # BUG (v8:9721)
+ 'debug/es6/generators-relocation': [FAIL],
+
# Issue 3641: The new 'then' semantics suppress some exceptions.
# These tests may be changed or removed when 'chain' is deprecated.
'debug/es6/debug-promises/reject-with-throw-in-reject': [FAIL],
@@ -136,4 +139,10 @@
'*': [SKIP],
}], # variant == jitless and not embedded_builtins
+##############################################################################
+['variant == turboprop', {
+ # Deopts differently than TurboFan.
+ 'debug/debug-optimize': [SKIP],
+}], # variant == turboprop
+
]
diff --git a/deps/v8/test/debugger/regress/regress-9482.js b/deps/v8/test/debugger/regress/regress-9482.js
new file mode 100644
index 0000000000..e07c660a08
--- /dev/null
+++ b/deps/v8/test/debugger/regress/regress-9482.js
@@ -0,0 +1,32 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Debug = debug.Debug
+var exception = null;
+
+function listener(event, exec_state, event_data, data) {
+ try {
+ if (event == Debug.DebugEvent.Break) {
+ assertEquals("n", exec_state.frame(0).evaluate("n").value());
+ assertEquals("m", exec_state.frame(0).evaluate("m").value());
+ }
+ } catch(e) {
+ exception = e;
+ print(e, e.stack);
+ }
+};
+
+Debug.setListener(listener);
+
+(function foo () {
+ var n = "n";
+ var m = "m";
+ (function bar() {
+ assertEquals("m", eval("m")); // force context-allocation.
+ debugger;
+ })();
+})();
+
+assertNull(exception);
+Debug.setListener(null);
diff --git a/deps/v8/test/fuzzer/fuzzer-support.cc b/deps/v8/test/fuzzer/fuzzer-support.cc
index 5d6861dd62..6235ea5f82 100644
--- a/deps/v8/test/fuzzer/fuzzer-support.cc
+++ b/deps/v8/test/fuzzer/fuzzer-support.cc
@@ -63,7 +63,7 @@ std::unique_ptr<FuzzerSupport> FuzzerSupport::fuzzer_support_;
void FuzzerSupport::InitializeFuzzerSupport(int* argc, char*** argv) {
DCHECK_NULL(FuzzerSupport::fuzzer_support_);
FuzzerSupport::fuzzer_support_ =
- v8::base::make_unique<v8_fuzzer::FuzzerSupport>(argc, argv);
+ std::make_unique<v8_fuzzer::FuzzerSupport>(argc, argv);
}
// static
diff --git a/deps/v8/test/fuzzer/fuzzer-support.h b/deps/v8/test/fuzzer/fuzzer-support.h
index 229c8c6b49..7b967073b5 100644
--- a/deps/v8/test/fuzzer/fuzzer-support.h
+++ b/deps/v8/test/fuzzer/fuzzer-support.h
@@ -5,6 +5,8 @@
#ifndef TEST_FUZZER_FUZZER_SUPPORT_H_
#define TEST_FUZZER_FUZZER_SUPPORT_H_
+#include <memory>
+
#include "include/libplatform/libplatform.h"
#include "include/v8.h"
diff --git a/deps/v8/test/fuzzer/wasm-compile.cc b/deps/v8/test/fuzzer/wasm-compile.cc
index 29f2ebb02d..10e41e1039 100644
--- a/deps/v8/test/fuzzer/wasm-compile.cc
+++ b/deps/v8/test/fuzzer/wasm-compile.cc
@@ -322,7 +322,7 @@ class WasmGenerator {
return Generate<wanted_type>(data);
}
- if (opcode != kExprGetLocal) Generate(local.type, data);
+ if (opcode != kExprLocalGet) Generate(local.type, data);
builder_->EmitWithU32V(opcode, local.index);
if (wanted_type != kWasmStmt && local.type != wanted_type) {
Convert(local.type, wanted_type);
@@ -332,14 +332,14 @@ class WasmGenerator {
template <ValueType wanted_type>
void get_local(DataRange* data) {
static_assert(wanted_type != kWasmStmt, "illegal type");
- local_op<wanted_type>(data, kExprGetLocal);
+ local_op<wanted_type>(data, kExprLocalGet);
}
- void set_local(DataRange* data) { local_op<kWasmStmt>(data, kExprSetLocal); }
+ void set_local(DataRange* data) { local_op<kWasmStmt>(data, kExprLocalSet); }
template <ValueType wanted_type>
void tee_local(DataRange* data) {
- local_op<wanted_type>(data, kExprTeeLocal);
+ local_op<wanted_type>(data, kExprLocalTee);
}
template <size_t num_bytes>
@@ -377,7 +377,7 @@ class WasmGenerator {
}
if (is_set) Generate(global.type, data);
- builder_->EmitWithU32V(is_set ? kExprSetGlobal : kExprGetGlobal,
+ builder_->EmitWithU32V(is_set ? kExprGlobalSet : kExprGlobalGet,
global.index);
if (!is_set && global.type != wanted_type) {
Convert(global.type, wanted_type);
@@ -465,7 +465,7 @@ class WasmGenerator {
template <ValueType T1, ValueType T2, ValueType... Ts>
void Generate(DataRange* data) {
- // TODO(clemensh): Implement a more even split.
+ // TODO(clemensb): Implement a more even split.
auto first_data = data->split();
Generate<T1>(&first_data);
Generate<T2, Ts...>(data);
diff --git a/deps/v8/test/fuzzer/wasm-fuzzer-common.h b/deps/v8/test/fuzzer/wasm-fuzzer-common.h
index 8ab802a702..bca9a2c433 100644
--- a/deps/v8/test/fuzzer/wasm-fuzzer-common.h
+++ b/deps/v8/test/fuzzer/wasm-fuzzer-common.h
@@ -7,6 +7,7 @@
#include <stddef.h>
#include <stdint.h>
+#include <memory>
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-interpreter.h"
diff --git a/deps/v8/test/inspector/cpu-profiler/console-profile-wasm.js b/deps/v8/test/inspector/cpu-profiler/console-profile-wasm.js
index dc96406d4a..0541ce02bb 100644
--- a/deps/v8/test/inspector/cpu-profiler/console-profile-wasm.js
+++ b/deps/v8/test/inspector/cpu-profiler/console-profile-wasm.js
@@ -11,14 +11,14 @@ utils.load('test/mjsunit/wasm/wasm-module-builder.js');
var builder = new WasmModuleBuilder();
builder.addFunction('fib', kSig_i_i)
.addBody([
- kExprGetLocal, 0,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
+ kExprLocalGet, 0,
kExprI32Const, 2,
kExprI32LeS, // i < 2 ?
kExprBrIf, 0, // --> return i
kExprI32Const, 1, kExprI32Sub, // i - 1
kExprCallFunction, 0, // fib(i - 1)
- kExprGetLocal, 0, kExprI32Const, 2, kExprI32Sub, // i - 2
+ kExprLocalGet, 0, kExprI32Const, 2, kExprI32Sub, // i - 2
kExprCallFunction, 0, // fib(i - 2)
kExprI32Add
])
diff --git a/deps/v8/test/inspector/debugger/class-fields-scopes-expected.txt b/deps/v8/test/inspector/debugger/class-fields-scopes-expected.txt
index 4be4e96efe..28708d7950 100644
--- a/deps/v8/test/inspector/debugger/class-fields-scopes-expected.txt
+++ b/deps/v8/test/inspector/debugger/class-fields-scopes-expected.txt
@@ -110,26 +110,6 @@ Running test: testScopesPaused
}
[1] : {
endLocation : {
- columnNumber : 3
- lineNumber : 15
- scriptId : <scriptId>
- }
- name : run
- object : {
- className : Object
- description : Object
- objectId : <objectId>
- type : object
- }
- startLocation : {
- columnNumber : 9
- lineNumber : 11
- scriptId : <scriptId>
- }
- type : block
- }
- [2] : {
- endLocation : {
columnNumber : 1
lineNumber : 19
scriptId : <scriptId>
@@ -148,7 +128,7 @@ Running test: testScopesPaused
}
type : local
}
- [3] : {
+ [2] : {
object : {
className : global
description : global
diff --git a/deps/v8/test/inspector/debugger/pause-on-async-call-expected.txt b/deps/v8/test/inspector/debugger/pause-on-async-call-expected.txt
index 012ab4b0b9..4fb08f6319 100644
--- a/deps/v8/test/inspector/debugger/pause-on-async-call-expected.txt
+++ b/deps/v8/test/inspector/debugger/pause-on-async-call-expected.txt
@@ -30,13 +30,6 @@ paused at:
paused at:
debugger;
- Promise.resolve().#then(v => v * 2);
-}
-
-asyncCallStackTraceId is set
-
-paused at:
- debugger;
Promise.resolve().then(v => v #* 2);
}
@@ -54,13 +47,6 @@ paused at:
paused at:
debugger;
- p.#then(v => v * 2);
- resolveCallback();
-
-asyncCallStackTraceId is set
-
-paused at:
- debugger;
p.then(v => v #* 2);
resolveCallback();
@@ -78,13 +64,6 @@ paused at:
paused at:
debugger;
- Promise.resolve().#then(v => v * 2);
- Promise.resolve().then(v => v * 4);
-
-asyncCallStackTraceId is set
-
-paused at:
- debugger;
Promise.resolve().then(v => v #* 2);
Promise.resolve().then(v => v * 4);
@@ -107,13 +86,6 @@ paused at:
paused at:
Promise.resolve().then(v => v * 2);
- Promise.resolve().#then(v => v * 4);
-}
-
-asyncCallStackTraceId is set
-
-paused at:
- Promise.resolve().then(v => v * 2);
Promise.resolve().then(v => v #* 4);
}
@@ -130,13 +102,6 @@ paused at:
debugger;
paused at:
- debugger;
- Promise.resolve().#then(v => v * 2);
- debugger;
-
-asyncCallStackTraceId is set
-
-paused at:
Promise.resolve().then(v => v * 2);
#debugger;
Promise.resolve().then(v => v * 4);
@@ -148,13 +113,6 @@ paused at:
paused at:
debugger;
- Promise.resolve().#then(v => v * 4);
-}
-
-asyncCallStackTraceId is set
-
-paused at:
- debugger;
Promise.resolve().then(v => v #* 4);
}
@@ -172,13 +130,6 @@ paused at:
paused at:
debugger;
- Promise.all([ Promise.resolve(), Promise.resolve() ]).#then(v => v * 2);
-}
-
-asyncCallStackTraceId is set
-
-paused at:
- debugger;
Promise.all([ Promise.resolve(), Promise.resolve() ]).then(v => v #* 2);
}
@@ -196,13 +147,6 @@ paused at:
paused at:
debugger;
- createPromise().#then(v => v * 2);
-}
-
-asyncCallStackTraceId is set
-
-paused at:
- debugger;
createPromise().then(v => v #* 2);
}
@@ -220,13 +164,6 @@ paused at:
paused at:
debugger;
- createPromise().#then(v => v * 2);
-}
-
-asyncCallStackTraceId is set
-
-paused at:
- debugger;
createPromise().then(v => v #* 2);
}
@@ -273,13 +210,6 @@ paused at:
paused at:
- await foo();
- foo().#then(boo);
-
-
-asyncCallStackTraceId is set
-
-paused at:
function boo() {
#}
}
diff --git a/deps/v8/test/inspector/debugger/pause-on-async-call-set-timeout.js b/deps/v8/test/inspector/debugger/pause-on-async-call-set-timeout.js
index be26bc11de..716d860f08 100644
--- a/deps/v8/test/inspector/debugger/pause-on-async-call-set-timeout.js
+++ b/deps/v8/test/inspector/debugger/pause-on-async-call-set-timeout.js
@@ -12,9 +12,6 @@ InspectorTest.runAsyncTestSuite([
Protocol.Debugger.stepOver();
await waitPauseAndDumpLocation();
Protocol.Debugger.stepInto({breakOnAsyncCall: true});
- let parentStackTraceId = await waitPauseAndDumpLocation();
- Protocol.Debugger.pauseOnAsyncCall({parentStackTraceId});
- Protocol.Debugger.resume();
await waitPauseAndDumpLocation();
await Protocol.Debugger.resume();
},
@@ -26,9 +23,6 @@ InspectorTest.runAsyncTestSuite([
Protocol.Debugger.stepOver();
await waitPauseAndDumpLocation();
Protocol.Debugger.stepInto({breakOnAsyncCall: true});
- let parentStackTraceId = await waitPauseAndDumpLocation();
- Protocol.Debugger.pauseOnAsyncCall({parentStackTraceId});
- await Protocol.Debugger.resume();
await waitPauseAndDumpLocation();
await Protocol.Debugger.resume();
await waitPauseAndDumpLocation();
@@ -43,9 +37,6 @@ InspectorTest.runAsyncTestSuite([
Protocol.Debugger.stepOver();
await waitPauseAndDumpLocation();
Protocol.Debugger.stepInto({breakOnAsyncCall: true});
- let parentStackTraceId = await waitPauseAndDumpLocation();
- Protocol.Debugger.pauseOnAsyncCall({parentStackTraceId});
- Protocol.Debugger.resume();
await waitPauseAndDumpLocation();
await Protocol.Debugger.resume();
await InspectorTest.waitForPendingTasks();
@@ -57,9 +48,6 @@ InspectorTest.runAsyncTestSuite([
Protocol.Debugger.stepOver();
await waitPauseAndDumpLocation();
Protocol.Debugger.stepInto({breakOnAsyncCall: true});
- let parentStackTraceId = await waitPauseAndDumpLocation();
- Protocol.Debugger.pauseOnAsyncCall({parentStackTraceId});
- Protocol.Debugger.resume();
await waitPauseAndDumpLocation();
await Protocol.Debugger.resume();
},
@@ -69,20 +57,14 @@ InspectorTest.runAsyncTestSuite([
Protocol.Runtime.evaluate({expression: 'setTimeout(() => 42, 0)'});
await waitPauseAndDumpLocation();
Protocol.Debugger.stepInto({breakOnAsyncCall: true});
- let parentStackTraceId = await waitPauseAndDumpLocation();
- Protocol.Debugger.pauseOnAsyncCall({parentStackTraceId});
- Protocol.Debugger.resume();
await waitPauseAndDumpLocation();
await Protocol.Debugger.resume();
}
]);
async function waitPauseAndDumpLocation() {
- var {params: {callFrames, asyncCallStackTraceId}} =
+ var {params: {callFrames}} =
await Protocol.Debugger.oncePaused();
- if (!asyncCallStackTraceId) {
- InspectorTest.log('paused at:');
- await session.logSourceLocation(callFrames[0].location);
- }
- return asyncCallStackTraceId;
+ InspectorTest.log('paused at:');
+ await session.logSourceLocation(callFrames[0].location);
}
diff --git a/deps/v8/test/inspector/debugger/pause-on-async-call.js b/deps/v8/test/inspector/debugger/pause-on-async-call.js
index 24e7fc6647..c6104a1ae2 100644
--- a/deps/v8/test/inspector/debugger/pause-on-async-call.js
+++ b/deps/v8/test/inspector/debugger/pause-on-async-call.js
@@ -90,9 +90,6 @@ InspectorTest.runAsyncTestSuite([
Protocol.Debugger.stepOver();
await waitPauseAndDumpLocation();
Protocol.Debugger.stepInto({breakOnAsyncCall: true});
- let parentStackTraceId = await waitPauseAndDumpLocation();
- Protocol.Debugger.pauseOnAsyncCall({parentStackTraceId});
- Protocol.Debugger.resume();
await waitPauseAndDumpLocation();
await Protocol.Debugger.resume();
},
@@ -103,9 +100,6 @@ InspectorTest.runAsyncTestSuite([
Protocol.Debugger.stepOver();
await waitPauseAndDumpLocation();
Protocol.Debugger.stepInto({breakOnAsyncCall: true});
- let parentStackTraceId = await waitPauseAndDumpLocation();
- Protocol.Debugger.pauseOnAsyncCall({parentStackTraceId});
- Protocol.Debugger.resume();
await waitPauseAndDumpLocation();
await Protocol.Debugger.resume();
},
@@ -116,9 +110,6 @@ InspectorTest.runAsyncTestSuite([
Protocol.Debugger.stepInto();
await waitPauseAndDumpLocation();
Protocol.Debugger.stepInto({breakOnAsyncCall: true});
- let parentStackTraceId = await waitPauseAndDumpLocation();
- Protocol.Debugger.pauseOnAsyncCall({parentStackTraceId});
- Protocol.Debugger.resume();
await waitPauseAndDumpLocation();
await Protocol.Debugger.resume();
},
@@ -131,9 +122,6 @@ InspectorTest.runAsyncTestSuite([
Protocol.Debugger.stepOver();
await waitPauseAndDumpLocation();
Protocol.Debugger.stepInto({breakOnAsyncCall: true});
- let parentStackTraceId = await waitPauseAndDumpLocation();
- Protocol.Debugger.pauseOnAsyncCall({parentStackTraceId});
- Protocol.Debugger.resume();
await waitPauseAndDumpLocation();
await Protocol.Debugger.resume();
},
@@ -144,16 +132,10 @@ InspectorTest.runAsyncTestSuite([
Protocol.Debugger.stepOver();
await waitPauseAndDumpLocation();
Protocol.Debugger.stepInto({breakOnAsyncCall: true});
- let parentStackTraceId = await waitPauseAndDumpLocation();
- Protocol.Debugger.pauseOnAsyncCall({parentStackTraceId});
- Protocol.Debugger.resume();
await waitPauseAndDumpLocation();
Protocol.Debugger.stepOver();
await waitPauseAndDumpLocation();
Protocol.Debugger.stepInto({breakOnAsyncCall: true});
- parentStackTraceId = await waitPauseAndDumpLocation();
- Protocol.Debugger.pauseOnAsyncCall({parentStackTraceId});
- Protocol.Debugger.resume();
await waitPauseAndDumpLocation();
await Protocol.Debugger.resume();
},
@@ -164,9 +146,6 @@ InspectorTest.runAsyncTestSuite([
Protocol.Debugger.stepOver();
await waitPauseAndDumpLocation();
Protocol.Debugger.stepInto({breakOnAsyncCall: true});
- let parentStackTraceId = await waitPauseAndDumpLocation();
- Protocol.Debugger.pauseOnAsyncCall({parentStackTraceId});
- Protocol.Debugger.resume();
await waitPauseAndDumpLocation();
await Protocol.Debugger.resume();
},
@@ -178,9 +157,6 @@ InspectorTest.runAsyncTestSuite([
await waitPauseAndDumpLocation();
await Protocol.Debugger.setBlackboxPatterns({patterns: ['framework\.js'] });
Protocol.Debugger.stepInto({breakOnAsyncCall: true});
- let parentStackTraceId = await waitPauseAndDumpLocation();
- Protocol.Debugger.pauseOnAsyncCall({parentStackTraceId});
- Protocol.Debugger.resume();
await waitPauseAndDumpLocation();
await Protocol.Debugger.resume();
},
@@ -192,9 +168,6 @@ InspectorTest.runAsyncTestSuite([
await waitPauseAndDumpLocation();
await Protocol.Debugger.setBlackboxPatterns({patterns: ['framework\.js']});
Protocol.Debugger.stepInto({breakOnAsyncCall: true});
- let parentStackTraceId = await waitPauseAndDumpLocation();
- Protocol.Debugger.pauseOnAsyncCall({parentStackTraceId});
- Protocol.Debugger.resume();
await waitPauseAndDumpLocation();
await Protocol.Debugger.resume();
},
@@ -205,17 +178,11 @@ InspectorTest.runAsyncTestSuite([
Protocol.Debugger.stepOver();
await waitPauseAndDumpLocation();
Protocol.Debugger.stepInto({breakOnAsyncCall: true});
- let parentStackTraceId = await waitPauseAndDumpLocation();
- if (parentStackTraceId)
- InspectorTest.log(
- 'ERROR: we should not report parent stack trace id on async call');
+ await waitPauseAndDumpLocation();
Protocol.Debugger.stepOut();
await waitPauseAndDumpLocation();
Protocol.Debugger.stepInto({breakOnAsyncCall: true});
- parentStackTraceId = await waitPauseAndDumpLocation();
- if (parentStackTraceId)
- InspectorTest.log(
- 'ERROR: we should not report parent stack trace id on async call');
+ await waitPauseAndDumpLocation();
Protocol.Debugger.stepOut();
await waitPauseAndDumpLocation();
Protocol.Debugger.stepInto({breakOnAsyncCall: true});
@@ -223,9 +190,6 @@ InspectorTest.runAsyncTestSuite([
Protocol.Debugger.stepOut();
await waitPauseAndDumpLocation();
Protocol.Debugger.stepInto({breakOnAsyncCall: true});
- parentStackTraceId = await waitPauseAndDumpLocation();
- Protocol.Debugger.pauseOnAsyncCall({parentStackTraceId});
- Protocol.Debugger.resume();
await waitPauseAndDumpLocation();
await Protocol.Debugger.resume();
@@ -233,12 +197,8 @@ InspectorTest.runAsyncTestSuite([
]);
async function waitPauseAndDumpLocation() {
- var {params: {callFrames, asyncCallStackTraceId}} =
+ var {params: {callFrames}} =
await Protocol.Debugger.oncePaused();
InspectorTest.log('paused at:');
await session.logSourceLocation(callFrames[0].location);
- if (asyncCallStackTraceId) {
- InspectorTest.log('asyncCallStackTraceId is set\n');
- }
- return asyncCallStackTraceId;
}
diff --git a/deps/v8/test/inspector/debugger/step-into-break-on-async-call-expected.txt b/deps/v8/test/inspector/debugger/step-into-break-on-async-call-expected.txt
index 5f61c89c1a..72f896567f 100644
--- a/deps/v8/test/inspector/debugger/step-into-break-on-async-call-expected.txt
+++ b/deps/v8/test/inspector/debugger/step-into-break-on-async-call-expected.txt
@@ -1,13 +1,7 @@
Test for Debugger.stepInto with breakOnAsyncCall.
Running test: testSetTimeout
-(anonymous) (test.js:0:0)
-asyncCallStackTraceId is set
(anonymous) (test.js:0:17)
-asyncCallStackTraceId is empty
Running test: testPromiseThen
-(anonymous) (test.js:0:2)
-asyncCallStackTraceId is set
(anonymous) (test.js:0:13)
-asyncCallStackTraceId is empty
diff --git a/deps/v8/test/inspector/debugger/step-into-break-on-async-call.js b/deps/v8/test/inspector/debugger/step-into-break-on-async-call.js
index d47df9f7eb..417f4a3936 100644
--- a/deps/v8/test/inspector/debugger/step-into-break-on-async-call.js
+++ b/deps/v8/test/inspector/debugger/step-into-break-on-async-call.js
@@ -17,21 +17,8 @@ InspectorTest.runAsyncTestSuite([
});
await pausedPromise;
Protocol.Debugger.stepInto({breakOnAsyncCall: true});
- let {params: {callFrames, asyncCallStackTraceId}} =
- await Protocol.Debugger.oncePaused();
+ let {params: {callFrames}} = await Protocol.Debugger.oncePaused();
session.logCallFrames(callFrames);
- if (asyncCallStackTraceId) {
- InspectorTest.log('asyncCallStackTraceId is set');
- }
- Protocol.Debugger.pauseOnAsyncCall(
- {parentStackTraceId: asyncCallStackTraceId});
- pausedPromise = Protocol.Debugger.oncePaused();
- Protocol.Debugger.resume();
- ({params: {callFrames, asyncCallStackTraceId}} = await pausedPromise);
- session.logCallFrames(callFrames);
- if (!asyncCallStackTraceId) {
- InspectorTest.log('asyncCallStackTraceId is empty');
- }
await Protocol.Debugger.disable();
},
@@ -45,21 +32,8 @@ InspectorTest.runAsyncTestSuite([
Protocol.Runtime.evaluate({expression: 'p.then(() => 42)//# sourceURL=test.js'});
await pausedPromise;
Protocol.Debugger.stepInto({breakOnAsyncCall: true});
- let {params: {callFrames, asyncCallStackTraceId}} =
- await Protocol.Debugger.oncePaused();
- session.logCallFrames(callFrames);
- if (asyncCallStackTraceId) {
- InspectorTest.log('asyncCallStackTraceId is set');
- }
- Protocol.Debugger.pauseOnAsyncCall(
- {parentStackTraceId: asyncCallStackTraceId});
- pausedPromise = Protocol.Debugger.oncePaused();
- Protocol.Debugger.resume();
- ({params: {callFrames, asyncCallStackTraceId}} = await pausedPromise);
+ let {params: {callFrames}} = await Protocol.Debugger.oncePaused();
session.logCallFrames(callFrames);
- if (!asyncCallStackTraceId) {
- InspectorTest.log('asyncCallStackTraceId is empty');
- }
await Protocol.Debugger.disable();
}
]);
diff --git a/deps/v8/test/inspector/debugger/step-into-external-async-task-expected.txt b/deps/v8/test/inspector/debugger/step-into-external-async-task-expected.txt
index 8bf702b355..ce82054f06 100644
--- a/deps/v8/test/inspector/debugger/step-into-external-async-task-expected.txt
+++ b/deps/v8/test/inspector/debugger/step-into-external-async-task-expected.txt
@@ -2,7 +2,6 @@ Test for step-into remote async task
Setup debugger agents..
Pause before stack trace is captured..
Run stepInto with breakOnAsyncCall flag
-Call pauseOnAsyncCall
Trigger external async task on another context group
Dump stack trace
boo (target.js:1:22)
diff --git a/deps/v8/test/inspector/debugger/step-into-external-async-task-same-context-expected.txt b/deps/v8/test/inspector/debugger/step-into-external-async-task-same-context-expected.txt
index 4b2fba9856..ba93facf00 100644
--- a/deps/v8/test/inspector/debugger/step-into-external-async-task-same-context-expected.txt
+++ b/deps/v8/test/inspector/debugger/step-into-external-async-task-same-context-expected.txt
@@ -2,7 +2,6 @@ Test for step-into remote async task.
Setup debugger agents..
Pause before stack trace is captured..
Run stepInto with breakOnAsyncCall flag
-Call pauseOnAsyncCall
Trigger external async task on another context group
Dump stack trace
boo (target.js:1:22)
diff --git a/deps/v8/test/inspector/debugger/step-into-external-async-task-same-context.js b/deps/v8/test/inspector/debugger/step-into-external-async-task-same-context.js
index fec786422e..132c0690f1 100644
--- a/deps/v8/test/inspector/debugger/step-into-external-async-task-same-context.js
+++ b/deps/v8/test/inspector/debugger/step-into-external-async-task-same-context.js
@@ -42,13 +42,6 @@ session.setupScriptMap();
InspectorTest.log('Run stepInto with breakOnAsyncCall flag');
Protocol.Debugger.stepInto({breakOnAsyncCall: true});
- let {params: {asyncCallStackTraceId}} = await Protocol.Debugger.oncePaused();
-
- InspectorTest.log('Call pauseOnAsyncCall');
- Protocol.Debugger.pauseOnAsyncCall({
- parentStackTraceId: asyncCallStackTraceId,
- });
- Protocol.Debugger.resume();
InspectorTest.log('Trigger external async task on another context group');
let stackTraceId = (await evaluatePromise).result.result.value;
diff --git a/deps/v8/test/inspector/debugger/step-into-external-async-task.js b/deps/v8/test/inspector/debugger/step-into-external-async-task.js
index b0d55c950d..59b78c1630 100644
--- a/deps/v8/test/inspector/debugger/step-into-external-async-task.js
+++ b/deps/v8/test/inspector/debugger/step-into-external-async-task.js
@@ -62,13 +62,6 @@ session2.setupScriptMap();
InspectorTest.log('Run stepInto with breakOnAsyncCall flag');
Protocol1.Debugger.stepInto({breakOnAsyncCall: true});
- let {params: {asyncCallStackTraceId}} = await Protocol1.Debugger.oncePaused();
-
- InspectorTest.log('Call pauseOnAsyncCall');
- Protocol2.Debugger.pauseOnAsyncCall({
- parentStackTraceId: asyncCallStackTraceId,
- });
- Protocol1.Debugger.resume();
InspectorTest.log('Trigger external async task on another context group');
let stackTraceId = (await evaluatePromise).result.result.value;
diff --git a/deps/v8/test/inspector/debugger/wasm-anyref-global.js b/deps/v8/test/inspector/debugger/wasm-anyref-global.js
index d4c88ac694..d9f63d2d20 100644
--- a/deps/v8/test/inspector/debugger/wasm-anyref-global.js
+++ b/deps/v8/test/inspector/debugger/wasm-anyref-global.js
@@ -15,7 +15,7 @@ let {session, contextGroup, Protocol} =
builder.addImportedGlobal('m', 'global', kWasmAnyRef, false);
builder.addFunction('func', kSig_v_v)
.addBody([
- kExprGetGlobal, 0, //
+ kExprGlobalGet, 0, //
kExprDrop, //
])
.exportAs('main');
diff --git a/deps/v8/test/inspector/debugger/wasm-clone-module-expected.txt b/deps/v8/test/inspector/debugger/wasm-clone-module-expected.txt
index fba9bb4cf2..d07f035f78 100644
--- a/deps/v8/test/inspector/debugger/wasm-clone-module-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-clone-module-expected.txt
@@ -1,5 +1,5 @@
Tests that cloning a module notifies the debugger
-Got URL: wasm://wasm/wasm-cae8f226/wasm-cae8f226-0
-Got URL: wasm://wasm/wasm-cae8f226/wasm-cae8f226-0
-Got URL: wasm://wasm/wasm-cae8f226/wasm-cae8f226-0
+Got URL: wasm://wasm/wasm-95d1e44e/wasm-95d1e44e-0
+Got URL: wasm://wasm/wasm-95d1e44e/wasm-95d1e44e-0
+Got URL: wasm://wasm/wasm-95d1e44e/wasm-95d1e44e-0
Done!
diff --git a/deps/v8/test/inspector/debugger/wasm-get-breakable-locations-expected.txt b/deps/v8/test/inspector/debugger/wasm-get-breakable-locations-expected.txt
index 8fec6bc2df..519d77911b 100644
--- a/deps/v8/test/inspector/debugger/wasm-get-breakable-locations-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-get-breakable-locations-expected.txt
@@ -2,9 +2,9 @@ Tests breakable locations in wasm
Running testFunction...
Script nr 0 parsed. URL: v8://test/setup
Script nr 1 parsed. URL: v8://test/runTestFunction
-Script nr 2 parsed. URL: wasm://wasm/wasm-354ada0e/wasm-354ada0e-0
+Script nr 2 parsed. URL: wasm://wasm/wasm-6a95b41e/wasm-6a95b41e-0
This is a wasm script (nr 0).
-Script nr 3 parsed. URL: wasm://wasm/wasm-354ada0e/wasm-354ada0e-1
+Script nr 3 parsed. URL: wasm://wasm/wasm-6a95b41e/wasm-6a95b41e-1
This is a wasm script (nr 1).
Querying breakable locations for all wasm scripts now...
Requesting all breakable locations in wasm script 0
@@ -38,51 +38,51 @@ Requesting breakable locations in lines [4,6)
[0] 4:6 || >call 0
[1] 5:4 || >end
Setting a breakpoint on each breakable location...
-Setting at wasm://wasm/wasm-354ada0e/wasm-354ada0e-0:2:2
+Setting at wasm://wasm/wasm-6a95b41e/wasm-6a95b41e-0:2:2
Success!
-Setting at wasm://wasm/wasm-354ada0e/wasm-354ada0e-0:3:2
+Setting at wasm://wasm/wasm-6a95b41e/wasm-6a95b41e-0:3:2
Success!
-Setting at wasm://wasm/wasm-354ada0e/wasm-354ada0e-0:4:2
+Setting at wasm://wasm/wasm-6a95b41e/wasm-6a95b41e-0:4:2
Success!
-Setting at wasm://wasm/wasm-354ada0e/wasm-354ada0e-0:5:0
+Setting at wasm://wasm/wasm-6a95b41e/wasm-6a95b41e-0:5:0
Success!
-Setting at wasm://wasm/wasm-354ada0e/wasm-354ada0e-1:1:2
+Setting at wasm://wasm/wasm-6a95b41e/wasm-6a95b41e-1:1:2
Success!
-Setting at wasm://wasm/wasm-354ada0e/wasm-354ada0e-1:2:2
+Setting at wasm://wasm/wasm-6a95b41e/wasm-6a95b41e-1:2:2
Success!
-Setting at wasm://wasm/wasm-354ada0e/wasm-354ada0e-1:3:4
+Setting at wasm://wasm/wasm-6a95b41e/wasm-6a95b41e-1:3:4
Success!
-Setting at wasm://wasm/wasm-354ada0e/wasm-354ada0e-1:4:6
+Setting at wasm://wasm/wasm-6a95b41e/wasm-6a95b41e-1:4:6
Success!
-Setting at wasm://wasm/wasm-354ada0e/wasm-354ada0e-1:5:4
+Setting at wasm://wasm/wasm-6a95b41e/wasm-6a95b41e-1:5:4
Success!
-Setting at wasm://wasm/wasm-354ada0e/wasm-354ada0e-1:6:2
+Setting at wasm://wasm/wasm-6a95b41e/wasm-6a95b41e-1:6:2
Success!
-Setting at wasm://wasm/wasm-354ada0e/wasm-354ada0e-1:7:0
+Setting at wasm://wasm/wasm-6a95b41e/wasm-6a95b41e-1:7:0
Success!
Running wasm code...
Missing breakpoints: 11
Script nr 4 parsed. URL: v8://test/runWasm
-Stopped at wasm://wasm/wasm-354ada0e/wasm-354ada0e-1:1:2
+Stopped at wasm://wasm/wasm-6a95b41e/wasm-6a95b41e-1:1:2
Missing breakpoints: 10
-Stopped at wasm://wasm/wasm-354ada0e/wasm-354ada0e-1:2:2
+Stopped at wasm://wasm/wasm-6a95b41e/wasm-6a95b41e-1:2:2
Missing breakpoints: 9
-Stopped at wasm://wasm/wasm-354ada0e/wasm-354ada0e-1:3:4
+Stopped at wasm://wasm/wasm-6a95b41e/wasm-6a95b41e-1:3:4
Missing breakpoints: 8
-Stopped at wasm://wasm/wasm-354ada0e/wasm-354ada0e-1:4:6
+Stopped at wasm://wasm/wasm-6a95b41e/wasm-6a95b41e-1:4:6
Missing breakpoints: 7
-Stopped at wasm://wasm/wasm-354ada0e/wasm-354ada0e-0:2:2
+Stopped at wasm://wasm/wasm-6a95b41e/wasm-6a95b41e-0:2:2
Missing breakpoints: 6
-Stopped at wasm://wasm/wasm-354ada0e/wasm-354ada0e-0:3:2
+Stopped at wasm://wasm/wasm-6a95b41e/wasm-6a95b41e-0:3:2
Missing breakpoints: 5
-Stopped at wasm://wasm/wasm-354ada0e/wasm-354ada0e-0:4:2
+Stopped at wasm://wasm/wasm-6a95b41e/wasm-6a95b41e-0:4:2
Missing breakpoints: 4
-Stopped at wasm://wasm/wasm-354ada0e/wasm-354ada0e-0:5:0
+Stopped at wasm://wasm/wasm-6a95b41e/wasm-6a95b41e-0:5:0
Missing breakpoints: 3
-Stopped at wasm://wasm/wasm-354ada0e/wasm-354ada0e-1:5:4
+Stopped at wasm://wasm/wasm-6a95b41e/wasm-6a95b41e-1:5:4
Missing breakpoints: 2
-Stopped at wasm://wasm/wasm-354ada0e/wasm-354ada0e-1:6:2
+Stopped at wasm://wasm/wasm-6a95b41e/wasm-6a95b41e-1:6:2
Missing breakpoints: 1
-Stopped at wasm://wasm/wasm-354ada0e/wasm-354ada0e-1:7:0
+Stopped at wasm://wasm/wasm-6a95b41e/wasm-6a95b41e-1:7:0
Missing breakpoints: 0
Finished!
diff --git a/deps/v8/test/inspector/debugger/wasm-get-breakable-locations.js b/deps/v8/test/inspector/debugger/wasm-get-breakable-locations.js
index f0c20b3955..62f50749f3 100644
--- a/deps/v8/test/inspector/debugger/wasm-get-breakable-locations.js
+++ b/deps/v8/test/inspector/debugger/wasm-get-breakable-locations.js
@@ -16,12 +16,12 @@ var func_idx = builder.addFunction('helper', kSig_v_v)
.addBody([
kExprNop,
kExprI32Const, 12,
- kExprSetLocal, 0,
+ kExprLocalSet, 0,
]).index;
builder.addFunction('main', kSig_v_i)
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprIf, kWasmStmt,
kExprBlock, kWasmStmt,
kExprCallFunction, func_idx,
diff --git a/deps/v8/test/inspector/debugger/wasm-reset-context-group.js b/deps/v8/test/inspector/debugger/wasm-reset-context-group.js
index c1353da86b..a9a096d65b 100644
--- a/deps/v8/test/inspector/debugger/wasm-reset-context-group.js
+++ b/deps/v8/test/inspector/debugger/wasm-reset-context-group.js
@@ -12,7 +12,7 @@ var builder = new WasmModuleBuilder();
builder.addFunction('wasm_func', kSig_i_i)
.addBody([
// clang-format off
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprI32Const, 1,
kExprI32Sub,
// clang-format on
diff --git a/deps/v8/test/inspector/debugger/wasm-scope-info.js b/deps/v8/test/inspector/debugger/wasm-scope-info.js
index 116b0ce146..2a5e2961ac 100644
--- a/deps/v8/test/inspector/debugger/wasm-scope-info.js
+++ b/deps/v8/test/inspector/debugger/wasm-scope-info.js
@@ -41,21 +41,21 @@ async function instantiateWasm() {
['i32Arg', undefined, 'i64_local', 'unicode☼f64'])
.addBody([
// Set param 0 to 11.
- kExprI32Const, 11, kExprSetLocal, 0,
+ kExprI32Const, 11, kExprLocalSet, 0,
// Set local 1 to 47.
- kExprI32Const, 47, kExprSetLocal, 1,
+ kExprI32Const, 47, kExprLocalSet, 1,
// Set local 2 to 0x7FFFFFFFFFFFFFFF (max i64).
kExprI64Const, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
- kExprSetLocal, 2,
+ kExprLocalSet, 2,
// Set local 2 to 0x8000000000000000 (min i64).
kExprI64Const, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x7f,
- kExprSetLocal, 2,
+ kExprLocalSet, 2,
// Set local 3 to 1/7.
kExprI32Const, 1, kExprF64UConvertI32, kExprI32Const, 7,
- kExprF64UConvertI32, kExprF64Div, kExprSetLocal, 3,
+ kExprF64UConvertI32, kExprF64Div, kExprLocalSet, 3,
// Set global 0 to 15
- kExprI32Const, 15, kExprSetGlobal, 0,
+ kExprI32Const, 15, kExprGlobalSet, 0,
])
.exportAs('main');
diff --git a/deps/v8/test/inspector/debugger/wasm-scripts-expected.txt b/deps/v8/test/inspector/debugger/wasm-scripts-expected.txt
index 0afcc861c4..210292c858 100644
--- a/deps/v8/test/inspector/debugger/wasm-scripts-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-scripts-expected.txt
@@ -1,16 +1,23 @@
Tests how wasm scripts are reported
-Check that inspector gets two wasm scripts at module creation time.
-Script #0 parsed. URL: v8://test/testFunction
-Script #1 parsed. URL:
-Script #2 parsed. URL: v8://test/runTestRunction
-Script #3 parsed. URL: wasm://wasm/wasm-7b04570e/wasm-7b04570e-0
-Script #4 parsed. URL: wasm://wasm/wasm-7b04570e/wasm-7b04570e-1
-Source for wasm://wasm/wasm-7b04570e/wasm-7b04570e-0:
+Check that each inspector gets two wasm scripts at module creation time.
+Session #1: Script #0 parsed. URL: wasm://wasm/wasm-f608ae1e/wasm-f608ae1e-0. Source map URL:
+Session #1: Script #1 parsed. URL: wasm://wasm/wasm-f608ae1e/wasm-f608ae1e-1. Source map URL:
+Session #2: Script #0 parsed. URL: wasm://wasm/wasm-f608ae1e/wasm-f608ae1e-0. Source map URL:
+Session #2: Script #1 parsed. URL: wasm://wasm/wasm-f608ae1e/wasm-f608ae1e-1. Source map URL:
+Session #1: Script #2 parsed. URL: wasm://wasm/wasm-74f86b7e. Source map URL: wasm://dwarf
+Session #2: Script #2 parsed. URL: wasm://wasm/wasm-74f86b7e. Source map URL: wasm://dwarf
+Session #1: Script #3 parsed. URL: wasm://wasm/wasm-3754e3fe. Source map URL: abc
+Session #2: Script #3 parsed. URL: wasm://wasm/wasm-3754e3fe. Source map URL: abc
+Session #1: Script #4 parsed. URL: wasm://wasm/wasm-2bd2e40e. Source map URL: abc
+Session #2: Script #4 parsed. URL: wasm://wasm/wasm-2bd2e40e. Source map URL: abc
+Session #1: Script #5 parsed. URL: wasm://wasm/wasm-f568e726. Source map URL: abc
+Session #2: Script #5 parsed. URL: wasm://wasm/wasm-f568e726. Source map URL: abc
+Session #1: Source for wasm://wasm/wasm-f608ae1e/wasm-f608ae1e-0:
func $nopFunction
nop
end
-Source for wasm://wasm/wasm-7b04570e/wasm-7b04570e-1:
+Session #1: Source for wasm://wasm/wasm-f608ae1e/wasm-f608ae1e-1:
func $main
block
i32.const 2
@@ -18,3 +25,48 @@ func $main
end
end
+Session #2: Source for wasm://wasm/wasm-f608ae1e/wasm-f608ae1e-0:
+func $nopFunction
+ nop
+end
+
+Session #2: Source for wasm://wasm/wasm-f608ae1e/wasm-f608ae1e-1:
+func $main
+ block
+ i32.const 2
+ drop
+ end
+end
+
+Session #1: Source for wasm://wasm/wasm-74f86b7e:
+Raw: 00 61 73 6d 01 00 00 00 01 07 02 60 00 00 60 00 00 03 03 02 00 01 07 08 01 04 6d 61 69 6e 00 01 0a 0e 02 03 00 01 0b 08 00 02 40 41 02 1a 0b 0b 00 11 0b 2e 64 65 62 75 67 5f 69 6e 66 6f 01 02 03 04 05 00 1b 04 6e 61 6d 65 01 14 02 00 0b 6e 6f 70 46 75 6e 63 74 69 6f 6e 01 04 6d 61 69 6e
+Imports: []
+Exports: [main: function]
+Session #2: Source for wasm://wasm/wasm-74f86b7e:
+Raw: 00 61 73 6d 01 00 00 00 01 07 02 60 00 00 60 00 00 03 03 02 00 01 07 08 01 04 6d 61 69 6e 00 01 0a 0e 02 03 00 01 0b 08 00 02 40 41 02 1a 0b 0b 00 11 0b 2e 64 65 62 75 67 5f 69 6e 66 6f 01 02 03 04 05 00 1b 04 6e 61 6d 65 01 14 02 00 0b 6e 6f 70 46 75 6e 63 74 69 6f 6e 01 04 6d 61 69 6e
+Imports: []
+Exports: [main: function]
+Session #1: Source for wasm://wasm/wasm-3754e3fe:
+Raw: 00 61 73 6d 01 00 00 00 01 07 02 60 00 00 60 00 00 03 03 02 00 01 07 08 01 04 6d 61 69 6e 00 01 0a 0e 02 03 00 01 0b 08 00 02 40 41 02 1a 0b 0b 00 15 10 73 6f 75 72 63 65 4d 61 70 70 69 6e 67 55 52 4c 03 61 62 63 00 1b 04 6e 61 6d 65 01 14 02 00 0b 6e 6f 70 46 75 6e 63 74 69 6f 6e 01 04 6d 61 69 6e
+Imports: []
+Exports: [main: function]
+Session #2: Source for wasm://wasm/wasm-3754e3fe:
+Raw: 00 61 73 6d 01 00 00 00 01 07 02 60 00 00 60 00 00 03 03 02 00 01 07 08 01 04 6d 61 69 6e 00 01 0a 0e 02 03 00 01 0b 08 00 02 40 41 02 1a 0b 0b 00 15 10 73 6f 75 72 63 65 4d 61 70 70 69 6e 67 55 52 4c 03 61 62 63 00 1b 04 6e 61 6d 65 01 14 02 00 0b 6e 6f 70 46 75 6e 63 74 69 6f 6e 01 04 6d 61 69 6e
+Imports: []
+Exports: [main: function]
+Session #1: Source for wasm://wasm/wasm-2bd2e40e:
+Raw: 00 61 73 6d 01 00 00 00 01 07 02 60 00 00 60 00 00 03 03 02 00 01 07 08 01 04 6d 61 69 6e 00 01 0a 0e 02 03 00 01 0b 08 00 02 40 41 02 1a 0b 0b 00 11 0b 2e 64 65 62 75 67 5f 69 6e 66 6f 01 02 03 04 05 00 15 10 73 6f 75 72 63 65 4d 61 70 70 69 6e 67 55 52 4c 03 61 62 63 00 1b 04 6e 61 6d 65 01 14 02 00 0b 6e 6f 70 46 75 6e 63 74 69 6f 6e 01 04 6d 61 69 6e
+Imports: []
+Exports: [main: function]
+Session #2: Source for wasm://wasm/wasm-2bd2e40e:
+Raw: 00 61 73 6d 01 00 00 00 01 07 02 60 00 00 60 00 00 03 03 02 00 01 07 08 01 04 6d 61 69 6e 00 01 0a 0e 02 03 00 01 0b 08 00 02 40 41 02 1a 0b 0b 00 11 0b 2e 64 65 62 75 67 5f 69 6e 66 6f 01 02 03 04 05 00 15 10 73 6f 75 72 63 65 4d 61 70 70 69 6e 67 55 52 4c 03 61 62 63 00 1b 04 6e 61 6d 65 01 14 02 00 0b 6e 6f 70 46 75 6e 63 74 69 6f 6e 01 04 6d 61 69 6e
+Imports: []
+Exports: [main: function]
+Session #1: Source for wasm://wasm/wasm-f568e726:
+Raw: 00 61 73 6d 01 00 00 00 01 07 02 60 00 00 60 00 00 03 03 02 00 01 07 08 01 04 6d 61 69 6e 00 01 0a 0e 02 03 00 01 0b 08 00 02 40 41 02 1a 0b 0b 00 15 10 73 6f 75 72 63 65 4d 61 70 70 69 6e 67 55 52 4c 03 61 62 63 00 11 0b 2e 64 65 62 75 67 5f 69 6e 66 6f 01 02 03 04 05 00 1b 04 6e 61 6d 65 01 14 02 00 0b 6e 6f 70 46 75 6e 63 74 69 6f 6e 01 04 6d 61 69 6e
+Imports: []
+Exports: [main: function]
+Session #2: Source for wasm://wasm/wasm-f568e726:
+Raw: 00 61 73 6d 01 00 00 00 01 07 02 60 00 00 60 00 00 03 03 02 00 01 07 08 01 04 6d 61 69 6e 00 01 0a 0e 02 03 00 01 0b 08 00 02 40 41 02 1a 0b 0b 00 15 10 73 6f 75 72 63 65 4d 61 70 70 69 6e 67 55 52 4c 03 61 62 63 00 11 0b 2e 64 65 62 75 67 5f 69 6e 66 6f 01 02 03 04 05 00 1b 04 6e 61 6d 65 01 14 02 00 0b 6e 6f 70 46 75 6e 63 74 69 6f 6e 01 04 6d 61 69 6e
+Imports: []
+Exports: [main: function]
diff --git a/deps/v8/test/inspector/debugger/wasm-scripts.js b/deps/v8/test/inspector/debugger/wasm-scripts.js
index 04e5ec88c1..72d886b0e5 100644
--- a/deps/v8/test/inspector/debugger/wasm-scripts.js
+++ b/deps/v8/test/inspector/debugger/wasm-scripts.js
@@ -4,68 +4,140 @@
// Flags: --expose-wasm
-let {session, contextGroup, Protocol} = InspectorTest.start('Tests how wasm scripts are reported');
+InspectorTest.log("Tests how wasm scripts are reported");
-utils.load('test/mjsunit/wasm/wasm-module-builder.js');
+let contextGroup = new InspectorTest.ContextGroup();
+let sessions = [
+ // Main session.
+ trackScripts(),
+ // Extra session to verify that all inspectors get same messages.
+ // See https://bugs.chromium.org/p/v8/issues/detail?id=9725.
+ trackScripts(),
+];
-// Add two empty functions. Both should be registered as individual scripts at
-// module creation time.
-var builder = new WasmModuleBuilder();
-builder.addFunction('nopFunction', kSig_v_v).addBody([kExprNop]);
-builder.addFunction('main', kSig_v_v)
- .addBody([kExprBlock, kWasmStmt, kExprI32Const, 2, kExprDrop, kExprEnd])
- .exportAs('main');
-var module_bytes = builder.toArray();
+utils.load('test/mjsunit/wasm/wasm-module-builder.js');
-function testFunction(bytes) {
- var buffer = new ArrayBuffer(bytes.length);
- var view = new Uint8Array(buffer);
- for (var i = 0; i < bytes.length; i++) {
- view[i] = bytes[i] | 0;
+// Create module with given custom sections.
+function createModule(...customSections) {
+ var builder = new WasmModuleBuilder();
+ builder.addFunction('nopFunction', kSig_v_v).addBody([kExprNop]);
+ builder.addFunction('main', kSig_v_v)
+ .addBody([kExprBlock, kWasmStmt, kExprI32Const, 2, kExprDrop, kExprEnd])
+ .exportAs('main');
+ for (var { name, value } of customSections) {
+ builder.addCustomSection(name, value);
}
+ return builder.toArray();
+}
+function testFunction(bytes) {
// Compilation triggers registration of wasm scripts.
- new WebAssembly.Module(buffer);
+ new WebAssembly.Module(new Uint8Array(bytes));
}
contextGroup.addScript(testFunction.toString(), 0, 0, 'v8://test/testFunction');
-contextGroup.addScript('var module_bytes = ' + JSON.stringify(module_bytes));
-Protocol.Debugger.enable();
-Protocol.Debugger.onScriptParsed(handleScriptParsed);
InspectorTest.log(
- 'Check that inspector gets two wasm scripts at module creation time.');
-Protocol.Runtime
+ 'Check that each inspector gets two wasm scripts at module creation time.');
+
+// Sample .debug_info section.
+// Content doesn't matter, as we don't try to parse it in V8,
+// but should be non-empty to check that we're skipping it correctly.
+const dwarfSection = { name: '.debug_info', value: [1, 2, 3, 4, 5] };
+
+// Sample sourceMappingURL section set to "abc".
+const sourceMapSection = { name: 'sourceMappingURL', value: [3, 97, 98, 99] };
+
+sessions[0].Protocol.Runtime
.evaluate({
- 'expression': '//# sourceURL=v8://test/runTestRunction\n' +
- 'testFunction(module_bytes)'
+ 'expression': `//# sourceURL=v8://test/runTestRunction
+
+ // no debug info
+ testFunction([${createModule()}]);
+
+ // DWARF
+ testFunction([${createModule(dwarfSection)}]);
+
+ // Source map
+ testFunction([${createModule(sourceMapSection)}]);
+
+ // DWARF + source map
+ testFunction([${createModule(dwarfSection, sourceMapSection)}]);
+
+ // Source map + DWARF (different order)
+ testFunction([${createModule(sourceMapSection, dwarfSection)}]);
+ `
})
- .then(checkFinished);
+ .then(() => (
+ // At this point all scripts were parsed.
+ // Stop tracking and wait for script sources in each session.
+ Promise.all(sessions.map(session => session.getScripts()))
+ ))
+ .catch(err => {
+ InspectorTest.log(err.stack);
+ })
+ .then(() => InspectorTest.completeTest());
+
+function decodeBase64(base64) {
+ const LOOKUP = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/';
+
+ const paddingLength = base64.match(/=*$/)[0].length;
+ const bytesLength = base64.length * 0.75 - paddingLength;
-var num_scripts = 0;
-var missing_sources = 0;
+ let bytes = new Uint8Array(bytesLength);
+
+ for (let i = 0, p = 0; i < base64.length; i += 4, p += 3) {
+ let bits = 0;
+ for (let j = 0; j < 4; j++) {
+ bits <<= 6;
+ const c = base64[i + j];
+ if (c !== '=') bits |= LOOKUP.indexOf(c);
+ }
+ for (let j = p + 2; j >= p; j--) {
+ if (j < bytesLength) bytes[j] = bits;
+ bits >>= 8;
+ }
+ }
-function checkFinished() {
- if (missing_sources == 0)
- InspectorTest.completeTest();
+ return bytes;
}
-function handleScriptParsed(messageObject)
-{
- var url = messageObject.params.url;
- InspectorTest.log("Script #" + num_scripts + " parsed. URL: " + url);
- ++num_scripts;
-
- if (url.startsWith("wasm://")) {
- ++missing_sources;
- function dumpScriptSource(message) {
- InspectorTest.log("Source for " + url + ":");
- InspectorTest.log(message.result.scriptSource);
- --missing_sources;
+function trackScripts(debuggerParams) {
+ let {id: sessionId, Protocol} = contextGroup.connect();
+ let scripts = [];
+
+ Protocol.Debugger.enable(debuggerParams);
+ Protocol.Debugger.onScriptParsed(handleScriptParsed);
+
+ async function loadScript({url, scriptId, sourceMapURL}) {
+ InspectorTest.log(`Session #${sessionId}: Script #${scripts.length} parsed. URL: ${url}. Source map URL: ${sourceMapURL}`);
+ let scriptSource;
+ if (sourceMapURL) {
+ let {result: {bytecode}} = await Protocol.Debugger.getWasmBytecode({scriptId});
+ // Binary value is represented as base64 in JSON, decode it.
+ bytecode = decodeBase64(bytecode);
+ // Check that it can be parsed back to a WebAssembly module.
+ let module = new WebAssembly.Module(bytecode);
+ scriptSource = `
+Raw: ${Array.from(bytecode, b => ('0' + b.toString(16)).slice(-2)).join(' ')}
+Imports: [${WebAssembly.Module.imports(module).map(i => `${i.name}: ${i.kind} from "${i.module}"`).join(', ')}]
+Exports: [${WebAssembly.Module.exports(module).map(e => `${e.name}: ${e.kind}`).join(', ')}]
+ `.trim();
+ } else {
+ ({result: {scriptSource}} = await Protocol.Debugger.getScriptSource({scriptId}));
}
+ InspectorTest.log(`Session #${sessionId}: Source for ${url}:`);
+ InspectorTest.log(scriptSource);
+ }
- Protocol.Debugger.getScriptSource({scriptId: messageObject.params.scriptId})
- .then(dumpScriptSource.bind(null))
- .then(checkFinished);
+ function handleScriptParsed({params}) {
+ if (params.url.startsWith("wasm://")) {
+ scripts.push(loadScript(params));
+ }
}
+
+ return {
+ Protocol,
+ getScripts: () => Promise.all(scripts),
+ };
}
diff --git a/deps/v8/test/inspector/debugger/wasm-set-breakpoint-expected.txt b/deps/v8/test/inspector/debugger/wasm-set-breakpoint-expected.txt
index 406d39dd95..29ee78c65a 100644
--- a/deps/v8/test/inspector/debugger/wasm-set-breakpoint-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-set-breakpoint-expected.txt
@@ -1,13 +1,13 @@
Tests stepping through wasm scripts.
Instantiating.
Waiting for two wasm scripts (ignoring first non-wasm script).
-Source of script wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-0:
+Source of script wasm://wasm/wasm-18214bfe/wasm-18214bfe-0:
1: func $wasm_A
2: nop
3: nop
4: end
-Source of script wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:
+Source of script wasm://wasm/wasm-18214bfe/wasm-18214bfe-1:
1: func $wasm_B (param i32)
2: loop
3: local.get 0
@@ -22,12 +22,12 @@ Source of script wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:
12: end
13: end
-Setting breakpoint on line 8 on script wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1
-Setting breakpoint on line 7 on script wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1
-Setting breakpoint on line 6 on script wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1
-Setting breakpoint on line 5 on script wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1
-Setting breakpoint on line 3 on script wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1
-Setting breakpoint on line 4 on script wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1
+Setting breakpoint on line 8 on script wasm://wasm/wasm-18214bfe/wasm-18214bfe-1
+Setting breakpoint on line 7 on script wasm://wasm/wasm-18214bfe/wasm-18214bfe-1
+Setting breakpoint on line 6 on script wasm://wasm/wasm-18214bfe/wasm-18214bfe-1
+Setting breakpoint on line 5 on script wasm://wasm/wasm-18214bfe/wasm-18214bfe-1
+Setting breakpoint on line 3 on script wasm://wasm/wasm-18214bfe/wasm-18214bfe-1
+Setting breakpoint on line 4 on script wasm://wasm/wasm-18214bfe/wasm-18214bfe-1
Calling main(4)
Breaking on line 3
Breaking on line 4
diff --git a/deps/v8/test/inspector/debugger/wasm-set-breakpoint.js b/deps/v8/test/inspector/debugger/wasm-set-breakpoint.js
index a9b676f8a7..1696a0f59c 100644
--- a/deps/v8/test/inspector/debugger/wasm-set-breakpoint.js
+++ b/deps/v8/test/inspector/debugger/wasm-set-breakpoint.js
@@ -17,12 +17,12 @@ builder.addFunction('wasm_B', kSig_v_i)
.addBody([
// clang-format off
kExprLoop, kWasmStmt, // while
- kExprGetLocal, 0, // -
+ kExprLocalGet, 0, // -
kExprIf, kWasmStmt, // if <param0> != 0
- kExprGetLocal, 0, // -
+ kExprLocalGet, 0, // -
kExprI32Const, 1, // -
kExprI32Sub, // -
- kExprSetLocal, 0, // decrease <param0>
+ kExprLocalSet, 0, // decrease <param0>
kExprCallFunction, func_a_idx, // -
kExprBr, 1, // continue
kExprEnd, // -
diff --git a/deps/v8/test/inspector/debugger/wasm-stepping-expected.txt b/deps/v8/test/inspector/debugger/wasm-stepping-expected.txt
index c951dce4ba..4a1fd58f2b 100644
--- a/deps/v8/test/inspector/debugger/wasm-stepping-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-stepping-expected.txt
@@ -3,10 +3,10 @@ Installing code an global variable.
Calling instantiate function.
Waiting for two wasm scripts to be parsed.
Ignoring script with url v8://test/callInstantiate
-Got wasm script: wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-0
-Requesting source for wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-0...
-Got wasm script: wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1
-Requesting source for wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1...
+Got wasm script: wasm://wasm/wasm-18214bfe/wasm-18214bfe-0
+Requesting source for wasm://wasm/wasm-18214bfe/wasm-18214bfe-0...
+Got wasm script: wasm://wasm/wasm-18214bfe/wasm-18214bfe-1
+Requesting source for wasm://wasm/wasm-18214bfe/wasm-18214bfe-1...
func $wasm_A
nop
nop
@@ -26,13 +26,13 @@ func $wasm_B (param i32)
end
end
-Setting breakpoint on line 7 (on the setlocal before the call), url wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1
+Setting breakpoint on line 7 (on the setlocal before the call), url wasm://wasm/wasm-18214bfe/wasm-18214bfe-1
{
columnNumber : 6
lineNumber : 7
scriptId : <scriptId>
}
-Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:7:6: >local.set 0
+Paused at wasm://wasm/wasm-18214bfe/wasm-18214bfe-1:7:6: >local.set 0
at wasm_B (7:6):
- scope (global):
-- skipped
@@ -43,7 +43,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:8:6: >call 0
+Paused at wasm://wasm/wasm-18214bfe/wasm-18214bfe-1:8:6: >call 0
at wasm_B (8:6):
- scope (global):
-- skipped
@@ -54,7 +54,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-0:1:2: >nop
+Paused at wasm://wasm/wasm-18214bfe/wasm-18214bfe-0:1:2: >nop
at wasm_A (1:2):
- scope (global):
-- skipped
@@ -70,7 +70,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepOver called
-Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-0:2:2: >nop
+Paused at wasm://wasm/wasm-18214bfe/wasm-18214bfe-0:2:2: >nop
at wasm_A (2:2):
- scope (global):
-- skipped
@@ -86,7 +86,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepOut called
-Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:9:6: >br 1
+Paused at wasm://wasm/wasm-18214bfe/wasm-18214bfe-1:9:6: >br 1
at wasm_B (9:6):
- scope (global):
-- skipped
@@ -97,7 +97,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepOut called
-Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:7:6: >local.set 0
+Paused at wasm://wasm/wasm-18214bfe/wasm-18214bfe-1:7:6: >local.set 0
at wasm_B (7:6):
- scope (global):
-- skipped
@@ -108,7 +108,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepOver called
-Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:8:6: >call 0
+Paused at wasm://wasm/wasm-18214bfe/wasm-18214bfe-1:8:6: >call 0
at wasm_B (8:6):
- scope (global):
-- skipped
@@ -119,7 +119,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepOver called
-Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:9:6: >br 1
+Paused at wasm://wasm/wasm-18214bfe/wasm-18214bfe-1:9:6: >br 1
at wasm_B (9:6):
- scope (global):
-- skipped
@@ -130,7 +130,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.resume called
-Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:7:6: >local.set 0
+Paused at wasm://wasm/wasm-18214bfe/wasm-18214bfe-1:7:6: >local.set 0
at wasm_B (7:6):
- scope (global):
-- skipped
@@ -141,7 +141,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:8:6: >call 0
+Paused at wasm://wasm/wasm-18214bfe/wasm-18214bfe-1:8:6: >call 0
at wasm_B (8:6):
- scope (global):
-- skipped
@@ -152,7 +152,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-0:1:2: >nop
+Paused at wasm://wasm/wasm-18214bfe/wasm-18214bfe-0:1:2: >nop
at wasm_A (1:2):
- scope (global):
-- skipped
@@ -168,7 +168,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepOut called
-Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:9:6: >br 1
+Paused at wasm://wasm/wasm-18214bfe/wasm-18214bfe-1:9:6: >br 1
at wasm_B (9:6):
- scope (global):
-- skipped
@@ -179,7 +179,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:1:2: >loop
+Paused at wasm://wasm/wasm-18214bfe/wasm-18214bfe-1:1:2: >loop
at wasm_B (1:2):
- scope (global):
-- skipped
@@ -190,7 +190,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:2:4: >local.get 0
+Paused at wasm://wasm/wasm-18214bfe/wasm-18214bfe-1:2:4: >local.get 0
at wasm_B (2:4):
- scope (global):
-- skipped
@@ -201,7 +201,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:3:4: >if
+Paused at wasm://wasm/wasm-18214bfe/wasm-18214bfe-1:3:4: >if
at wasm_B (3:4):
- scope (global):
-- skipped
@@ -212,7 +212,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:4:6: >local.get 0
+Paused at wasm://wasm/wasm-18214bfe/wasm-18214bfe-1:4:6: >local.get 0
at wasm_B (4:6):
- scope (global):
-- skipped
@@ -223,7 +223,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:5:6: >i32.const 1
+Paused at wasm://wasm/wasm-18214bfe/wasm-18214bfe-1:5:6: >i32.const 1
at wasm_B (5:6):
- scope (global):
-- skipped
@@ -234,7 +234,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:6:6: >i32.sub
+Paused at wasm://wasm/wasm-18214bfe/wasm-18214bfe-1:6:6: >i32.sub
at wasm_B (6:6):
- scope (global):
-- skipped
@@ -245,7 +245,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:7:6: >local.set 0
+Paused at wasm://wasm/wasm-18214bfe/wasm-18214bfe-1:7:6: >local.set 0
at wasm_B (7:6):
- scope (global):
-- skipped
@@ -256,7 +256,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:8:6: >call 0
+Paused at wasm://wasm/wasm-18214bfe/wasm-18214bfe-1:8:6: >call 0
at wasm_B (8:6):
- scope (global):
-- skipped
@@ -267,7 +267,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-0:1:2: >nop
+Paused at wasm://wasm/wasm-18214bfe/wasm-18214bfe-0:1:2: >nop
at wasm_A (1:2):
- scope (global):
-- skipped
@@ -283,7 +283,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-0:2:2: >nop
+Paused at wasm://wasm/wasm-18214bfe/wasm-18214bfe-0:2:2: >nop
at wasm_A (2:2):
- scope (global):
-- skipped
@@ -299,7 +299,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-0:3:0: >end
+Paused at wasm://wasm/wasm-18214bfe/wasm-18214bfe-0:3:0: >end
at wasm_A (3:0):
- scope (global):
-- skipped
@@ -315,7 +315,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:9:6: >br 1
+Paused at wasm://wasm/wasm-18214bfe/wasm-18214bfe-1:9:6: >br 1
at wasm_B (9:6):
- scope (global):
-- skipped
diff --git a/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map-expected.txt b/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map-expected.txt
index 34f9cf1df9..325049fffb 100644
--- a/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map-expected.txt
@@ -1,16 +1,16 @@
Tests stepping through wasm scripts with source maps
Installing code an global variable and instantiate.
-Got wasm script: wasm-9b4bf87e
+Got wasm script: wasm://wasm/wasm-3697f0fe
Script sourceMapURL: abc
-Requesting source for wasm-9b4bf87e...
+Requesting source for wasm://wasm/wasm-3697f0fe...
Source retrieved without error: true
-Setting breakpoint on offset 54 (on the setlocal before the call), url wasm-9b4bf87e
+Setting breakpoint on offset 54 (on the setlocal before the call), url wasm://wasm/wasm-3697f0fe
{
columnNumber : 54
lineNumber : 0
scriptId : <scriptId>
}
-Paused at wasm-9b4bf87e:0:54
+Paused at wasm://wasm/wasm-3697f0fe:0:54
at wasm_B (0:54):
- scope (global):
-- skipped
@@ -21,7 +21,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm-9b4bf87e:0:56
+Paused at wasm://wasm/wasm-3697f0fe:0:56
at wasm_B (0:56):
- scope (global):
-- skipped
@@ -32,7 +32,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm-9b4bf87e:0:38
+Paused at wasm://wasm/wasm-3697f0fe:0:38
at wasm_A (0:38):
- scope (global):
-- skipped
@@ -48,7 +48,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepOver called
-Paused at wasm-9b4bf87e:0:39
+Paused at wasm://wasm/wasm-3697f0fe:0:39
at wasm_A (0:39):
- scope (global):
-- skipped
@@ -64,7 +64,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepOut called
-Paused at wasm-9b4bf87e:0:58
+Paused at wasm://wasm/wasm-3697f0fe:0:58
at wasm_B (0:58):
- scope (global):
-- skipped
@@ -75,7 +75,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepOut called
-Paused at wasm-9b4bf87e:0:54
+Paused at wasm://wasm/wasm-3697f0fe:0:54
at wasm_B (0:54):
- scope (global):
-- skipped
@@ -86,7 +86,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepOver called
-Paused at wasm-9b4bf87e:0:56
+Paused at wasm://wasm/wasm-3697f0fe:0:56
at wasm_B (0:56):
- scope (global):
-- skipped
@@ -97,7 +97,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepOver called
-Paused at wasm-9b4bf87e:0:58
+Paused at wasm://wasm/wasm-3697f0fe:0:58
at wasm_B (0:58):
- scope (global):
-- skipped
@@ -108,7 +108,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.resume called
-Paused at wasm-9b4bf87e:0:54
+Paused at wasm://wasm/wasm-3697f0fe:0:54
at wasm_B (0:54):
- scope (global):
-- skipped
@@ -119,7 +119,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm-9b4bf87e:0:56
+Paused at wasm://wasm/wasm-3697f0fe:0:56
at wasm_B (0:56):
- scope (global):
-- skipped
@@ -130,7 +130,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm-9b4bf87e:0:38
+Paused at wasm://wasm/wasm-3697f0fe:0:38
at wasm_A (0:38):
- scope (global):
-- skipped
@@ -146,7 +146,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepOut called
-Paused at wasm-9b4bf87e:0:58
+Paused at wasm://wasm/wasm-3697f0fe:0:58
at wasm_B (0:58):
- scope (global):
-- skipped
@@ -157,7 +157,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm-9b4bf87e:0:43
+Paused at wasm://wasm/wasm-3697f0fe:0:43
at wasm_B (0:43):
- scope (global):
-- skipped
@@ -168,7 +168,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm-9b4bf87e:0:45
+Paused at wasm://wasm/wasm-3697f0fe:0:45
at wasm_B (0:45):
- scope (global):
-- skipped
@@ -179,7 +179,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm-9b4bf87e:0:47
+Paused at wasm://wasm/wasm-3697f0fe:0:47
at wasm_B (0:47):
- scope (global):
-- skipped
@@ -190,7 +190,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm-9b4bf87e:0:49
+Paused at wasm://wasm/wasm-3697f0fe:0:49
at wasm_B (0:49):
- scope (global):
-- skipped
@@ -201,7 +201,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm-9b4bf87e:0:51
+Paused at wasm://wasm/wasm-3697f0fe:0:51
at wasm_B (0:51):
- scope (global):
-- skipped
@@ -212,7 +212,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm-9b4bf87e:0:53
+Paused at wasm://wasm/wasm-3697f0fe:0:53
at wasm_B (0:53):
- scope (global):
-- skipped
@@ -223,7 +223,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm-9b4bf87e:0:54
+Paused at wasm://wasm/wasm-3697f0fe:0:54
at wasm_B (0:54):
- scope (global):
-- skipped
@@ -234,7 +234,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm-9b4bf87e:0:56
+Paused at wasm://wasm/wasm-3697f0fe:0:56
at wasm_B (0:56):
- scope (global):
-- skipped
@@ -245,7 +245,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm-9b4bf87e:0:38
+Paused at wasm://wasm/wasm-3697f0fe:0:38
at wasm_A (0:38):
- scope (global):
-- skipped
@@ -261,7 +261,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm-9b4bf87e:0:39
+Paused at wasm://wasm/wasm-3697f0fe:0:39
at wasm_A (0:39):
- scope (global):
-- skipped
@@ -277,7 +277,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm-9b4bf87e:0:40
+Paused at wasm://wasm/wasm-3697f0fe:0:40
at wasm_A (0:40):
- scope (global):
-- skipped
@@ -293,7 +293,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm-9b4bf87e:0:58
+Paused at wasm://wasm/wasm-3697f0fe:0:58
at wasm_B (0:58):
- scope (global):
-- skipped
diff --git a/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map.js b/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map.js
index 1c9ec95577..57b2fd581c 100644
--- a/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map.js
+++ b/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map.js
@@ -17,12 +17,12 @@ builder.addFunction('wasm_B', kSig_v_i)
.addBody([
// clang-format off
kExprLoop, kWasmStmt, // while
- kExprGetLocal, 0, // -
+ kExprLocalGet, 0, // -
kExprIf, kWasmStmt, // if <param0> != 0
- kExprGetLocal, 0, // -
+ kExprLocalGet, 0, // -
kExprI32Const, 1, // -
kExprI32Sub, // -
- kExprSetLocal, 0, // decrease <param0>
+ kExprLocalSet, 0, // decrease <param0>
kExprCallFunction, func_a_idx, // -
kExprBr, 1, // continue
kExprEnd, // -
diff --git a/deps/v8/test/inspector/debugger/wasm-stepping.js b/deps/v8/test/inspector/debugger/wasm-stepping.js
index 0fda6b73be..01d83b6696 100644
--- a/deps/v8/test/inspector/debugger/wasm-stepping.js
+++ b/deps/v8/test/inspector/debugger/wasm-stepping.js
@@ -6,9 +6,9 @@ let {session, contextGroup, Protocol} = InspectorTest.start('Tests stepping thro
utils.load('test/mjsunit/wasm/wasm-module-builder.js');
-var builder = new WasmModuleBuilder();
+let builder = new WasmModuleBuilder();
-var func_a_idx =
+let func_a_idx =
builder.addFunction('wasm_A', kSig_v_v).addBody([kExprNop, kExprNop]).index;
// wasm_B calls wasm_A <param0> times.
@@ -16,12 +16,12 @@ builder.addFunction('wasm_B', kSig_v_i)
.addBody([
// clang-format off
kExprLoop, kWasmStmt, // while
- kExprGetLocal, 0, // -
+ kExprLocalGet, 0, // -
kExprIf, kWasmStmt, // if <param0> != 0
- kExprGetLocal, 0, // -
+ kExprLocalGet, 0, // -
kExprI32Const, 1, // -
kExprI32Sub, // -
- kExprSetLocal, 0, // decrease <param0>
+ kExprLocalSet, 0, // decrease <param0>
kExprCallFunction, func_a_idx, // -
kExprBr, 1, // continue
kExprEnd, // -
@@ -30,26 +30,26 @@ builder.addFunction('wasm_B', kSig_v_i)
])
.exportAs('main');
-var module_bytes = builder.toArray();
+let module_bytes = builder.toArray();
function instantiate(bytes) {
- var buffer = new ArrayBuffer(bytes.length);
- var view = new Uint8Array(buffer);
- for (var i = 0; i < bytes.length; ++i) {
+ let buffer = new ArrayBuffer(bytes.length);
+ let view = new Uint8Array(buffer);
+ for (let i = 0; i < bytes.length; ++i) {
view[i] = bytes[i] | 0;
}
- var module = new WebAssembly.Module(buffer);
+ let module = new WebAssembly.Module(buffer);
// Set global variable.
instance = new WebAssembly.Instance(module);
}
-var evalWithUrl = (code, url) => Protocol.Runtime.evaluate(
+let evalWithUrl = (code, url) => Protocol.Runtime.evaluate(
{'expression': code + '\n//# sourceURL=v8://test/' + url});
Protocol.Debugger.onPaused(handlePaused);
-var wasm_B_scriptId;
-var step_actions = [
+let wasm_B_scriptId;
+let step_actions = [
'stepInto', // == stepOver, to call instruction
'stepInto', // into call to wasm_A
'stepOver', // over first nop
@@ -69,38 +69,33 @@ var step_actions = [
// then just resume.
'resume'
];
-for (var action of step_actions) {
+for (let action of step_actions) {
InspectorTest.logProtocolCommandCalls('Debugger.' + action)
}
-var sources = {};
-var urls = {};
-var afterTwoSourcesCallback;
-
-Protocol.Debugger.enable()
- .then(() => InspectorTest.log('Installing code an global variable.'))
- .then(
- () => evalWithUrl('var instance;\n' + instantiate.toString(), 'setup'))
- .then(() => InspectorTest.log('Calling instantiate function.'))
- .then(
- () =>
- (evalWithUrl(
- 'instantiate(' + JSON.stringify(module_bytes) + ')',
- 'callInstantiate'),
- 0))
- .then(waitForTwoWasmScripts)
- .then(
- () => InspectorTest.log(
- 'Setting breakpoint on line 7 (on the setlocal before the call), url ' +
- urls[wasm_B_scriptId]))
- .then(
- () => Protocol.Debugger.setBreakpoint(
- {'location': {'scriptId': wasm_B_scriptId, 'lineNumber': 7}}))
- .then(printFailure)
- .then(msg => InspectorTest.logMessage(msg.result.actualLocation))
- .then(() => evalWithUrl('instance.exports.main(4)', 'runWasm'))
- .then(() => InspectorTest.log('exports.main returned!'))
- .then(() => InspectorTest.log('Finished!'))
- .then(InspectorTest.completeTest);
+let sources = {};
+let urls = {};
+let afterTwoSourcesCallback;
+
+(async function Test() {
+ await Protocol.Debugger.enable();
+ InspectorTest.log('Installing code an global variable.');
+ await evalWithUrl('var instance;\n' + instantiate.toString(), 'setup');
+ InspectorTest.log('Calling instantiate function.');
+ evalWithUrl(
+ 'instantiate(' + JSON.stringify(module_bytes) + ')', 'callInstantiate');
+ await waitForTwoWasmScripts();
+ InspectorTest.log(
+ 'Setting breakpoint on line 7 (on the setlocal before the call), url ' +
+ urls[wasm_B_scriptId]);
+ let msg = await Protocol.Debugger.setBreakpoint(
+ {'location': {'scriptId': wasm_B_scriptId, 'lineNumber': 7}});
+ printFailure(msg);
+ InspectorTest.logMessage(msg.result.actualLocation);
+ await evalWithUrl('instance.exports.main(4)', 'runWasm');
+ InspectorTest.log('exports.main returned!');
+ InspectorTest.log('Finished!');
+ InspectorTest.completeTest();
+})();
function printFailure(message) {
if (!message.result) {
@@ -109,41 +104,37 @@ function printFailure(message) {
return message;
}
-function waitForTwoWasmScripts() {
- var num = 0;
+async function waitForTwoWasmScripts() {
+ let num = 0;
InspectorTest.log('Waiting for two wasm scripts to be parsed.');
- var promise = new Promise(fulfill => gotBothSources = fulfill);
- function waitForMore() {
- if (num == 2) return promise;
- Protocol.Debugger.onceScriptParsed()
- .then(handleNewScript)
- .then(waitForMore);
+ let source_promises = [];
+ async function getWasmSource(scriptId) {
+ let msg = await Protocol.Debugger.getScriptSource({scriptId: scriptId});
+ printFailure(msg);
+ InspectorTest.log(msg.result.scriptSource);
+ sources[scriptId] = msg.result.scriptSource;
}
- function handleNewScript(msg) {
- var url = msg.params.url;
+ while (num < 2) {
+ let msg = await Protocol.Debugger.onceScriptParsed();
+ let url = msg.params.url;
if (!url.startsWith('wasm://')) {
InspectorTest.log('Ignoring script with url ' + url);
- return;
+ continue;
}
num += 1;
- var scriptId = msg.params.scriptId;
+ let scriptId = msg.params.scriptId;
urls[scriptId] = url;
InspectorTest.log('Got wasm script: ' + url);
if (url.substr(-2) == '-1') wasm_B_scriptId = scriptId;
InspectorTest.log('Requesting source for ' + url + '...');
- Protocol.Debugger.getScriptSource({scriptId: scriptId})
- .then(printFailure)
- .then(msg => sources[scriptId] = msg.result.scriptSource)
- .then(InspectorTest.log)
- .then(() => Object.keys(sources).length == 2 ? gotBothSources() : 0);
+ source_promises.push(getWasmSource(scriptId));
}
- waitForMore();
- return promise;
+ await Promise.all(source_promises);
}
function printPauseLocation(scriptId, lineNr, columnNr) {
- var lines = sources[scriptId].split('\n');
- var line = '<illegal line number>';
+ let lines = sources[scriptId].split('\n');
+ let line = '<illegal line number>';
if (lineNr < lines.length) {
line = lines[lineNr];
if (columnNr < line.length) {
@@ -157,7 +148,7 @@ function printPauseLocation(scriptId, lineNr, columnNr) {
async function getValueString(value) {
if (value.type == 'object') {
- var msg = await Protocol.Runtime.callFunctionOn({
+ let msg = await Protocol.Runtime.callFunctionOn({
objectId: value.objectId,
functionDeclaration: 'function () { return JSON.stringify(this); }'
});
@@ -169,24 +160,24 @@ async function getValueString(value) {
async function dumpProperties(message) {
printFailure(message);
- for (var value of message.result.result) {
- var value_str = await getValueString(value.value);
+ for (let value of message.result.result) {
+ let value_str = await getValueString(value.value);
InspectorTest.log(' ' + value.name + ': ' + value_str);
}
}
async function dumpScopeChainsOnPause(message) {
- for (var frame of message.params.callFrames) {
- var functionName = frame.functionName || '(anonymous)';
- var lineNumber = frame.location ? frame.location.lineNumber : frame.lineNumber;
- var columnNumber = frame.location ? frame.location.columnNumber : frame.columnNumber;
+ for (let frame of message.params.callFrames) {
+ let functionName = frame.functionName || '(anonymous)';
+ let lineNumber = frame.location ? frame.location.lineNumber : frame.lineNumber;
+ let columnNumber = frame.location ? frame.location.columnNumber : frame.columnNumber;
InspectorTest.log(`at ${functionName} (${lineNumber}:${columnNumber}):`);
- for (var scope of frame.scopeChain) {
+ for (let scope of frame.scopeChain) {
InspectorTest.logObject(' - scope (' + scope.type + '):');
if (scope.type == 'global') {
InspectorTest.logObject(' -- skipped');
} else {
- var properties = await Protocol.Runtime.getProperties(
+ let properties = await Protocol.Runtime.getProperties(
{'objectId': scope.object.objectId});
await dumpProperties(properties);
}
@@ -194,9 +185,10 @@ async function dumpScopeChainsOnPause(message) {
}
}
-function handlePaused(msg) {
- var loc = msg.params.callFrames[0].location;
+async function handlePaused(msg) {
+ let loc = msg.params.callFrames[0].location;
printPauseLocation(loc.scriptId, loc.lineNumber, loc.columnNumber);
- dumpScopeChainsOnPause(msg)
- .then(Protocol.Debugger[step_actions.shift() || 'resume']);
+ await dumpScopeChainsOnPause(msg);
+ let action = step_actions.shift() || 'resume';
+ await Protocol.Debugger[action]();
}
diff --git a/deps/v8/test/inspector/inspector-test.cc b/deps/v8/test/inspector/inspector-test.cc
index 0a4cd48e92..0f39bc9417 100644
--- a/deps/v8/test/inspector/inspector-test.cc
+++ b/deps/v8/test/inspector/inspector-test.cc
@@ -968,8 +968,8 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
data->StoreCurrentStackTrace(description_view);
v8::Local<v8::ArrayBuffer> buffer =
v8::ArrayBuffer::New(isolate, sizeof(id));
- *static_cast<v8_inspector::V8StackTraceId*>(buffer->GetContents().Data()) =
- id;
+ *static_cast<v8_inspector::V8StackTraceId*>(
+ buffer->GetBackingStore()->Data()) = id;
args.GetReturnValue().Set(buffer);
}
@@ -983,7 +983,7 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
IsolateData* data = IsolateData::FromContext(context);
v8_inspector::V8StackTraceId* id =
static_cast<v8_inspector::V8StackTraceId*>(
- args[0].As<v8::ArrayBuffer>()->GetContents().Data());
+ args[0].As<v8::ArrayBuffer>()->GetBackingStore()->Data());
data->ExternalAsyncTaskStarted(*id);
}
@@ -997,7 +997,7 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
IsolateData* data = IsolateData::FromContext(context);
v8_inspector::V8StackTraceId* id =
static_cast<v8_inspector::V8StackTraceId*>(
- args[0].As<v8::ArrayBuffer>()->GetContents().Data());
+ args[0].As<v8::ArrayBuffer>()->GetBackingStore()->Data());
data->ExternalAsyncTaskFinished(*id);
}
diff --git a/deps/v8/test/inspector/isolate-data.cc b/deps/v8/test/inspector/isolate-data.cc
index ae41264072..8011007e34 100644
--- a/deps/v8/test/inspector/isolate-data.cc
+++ b/deps/v8/test/inspector/isolate-data.cc
@@ -61,8 +61,9 @@ IsolateData::IsolateData(TaskRunner* task_runner,
: task_runner_(task_runner),
setup_global_tasks_(std::move(setup_global_tasks)) {
v8::Isolate::CreateParams params;
- params.array_buffer_allocator =
- v8::ArrayBuffer::Allocator::NewDefaultAllocator();
+ array_buffer_allocator_.reset(
+ v8::ArrayBuffer::Allocator::NewDefaultAllocator());
+ params.array_buffer_allocator = array_buffer_allocator_.get();
params.snapshot_blob = startup_data;
params.only_terminate_in_safe_scope = true;
isolate_.reset(v8::Isolate::New(params));
diff --git a/deps/v8/test/inspector/isolate-data.h b/deps/v8/test/inspector/isolate-data.h
index d569ab11e0..fc15c3b5f3 100644
--- a/deps/v8/test/inspector/isolate-data.h
+++ b/deps/v8/test/inspector/isolate-data.h
@@ -6,6 +6,7 @@
#define V8_TEST_INSPECTOR_PROTOCOL_ISOLATE_DATA_H_
#include <map>
+#include <memory>
#include "include/v8-inspector.h"
#include "include/v8-platform.h"
@@ -129,6 +130,7 @@ class IsolateData : public v8_inspector::V8InspectorClient {
TaskRunner* task_runner_;
SetupGlobalTasks setup_global_tasks_;
+ std::unique_ptr<v8::ArrayBuffer::Allocator> array_buffer_allocator_;
std::unique_ptr<v8::Isolate, IsolateDeleter> isolate_;
std::unique_ptr<v8_inspector::V8Inspector> inspector_;
int last_context_group_id_ = 0;
diff --git a/deps/v8/test/inspector/runtime/evaluate-with-disable-breaks-expected.txt b/deps/v8/test/inspector/runtime/evaluate-with-disable-breaks-expected.txt
new file mode 100644
index 0000000000..a6be5b9121
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/evaluate-with-disable-breaks-expected.txt
@@ -0,0 +1,72 @@
+Tests that Runtime.evaluate can run with breaks disabled.
+Test disableBreaks: false
+paused
+{
+ id : <messageId>
+ result : {
+ result : {
+ type : undefined
+ }
+ }
+}
+Test disableBreaks: true
+{
+ id : <messageId>
+ result : {
+ result : {
+ type : undefined
+ }
+ }
+}
+Test calling out with disableBreaks: false
+paused
+{
+ id : <messageId>
+ result : {
+ result : {
+ type : undefined
+ }
+ }
+}
+Test calling out with disableBreaks: true
+{
+ id : <messageId>
+ result : {
+ result : {
+ type : undefined
+ }
+ }
+}
+Test Debugger.pause with disableBreaks: false
+{
+ id : <messageId>
+ result : {
+ }
+}
+paused
+{
+ id : <messageId>
+ result : {
+ result : {
+ description : 1
+ type : number
+ value : 1
+ }
+ }
+}
+Test Debugger.pause with disableBreaks: true
+{
+ id : <messageId>
+ result : {
+ }
+}
+{
+ id : <messageId>
+ result : {
+ result : {
+ description : 1
+ type : number
+ value : 1
+ }
+ }
+}
diff --git a/deps/v8/test/inspector/runtime/evaluate-with-disable-breaks.js b/deps/v8/test/inspector/runtime/evaluate-with-disable-breaks.js
new file mode 100644
index 0000000000..43ddc5278c
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/evaluate-with-disable-breaks.js
@@ -0,0 +1,60 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} = InspectorTest.start("Tests that Runtime.evaluate can run with breaks disabled.");
+
+session.setupScriptMap();
+contextGroup.addScript(`
+ function f() {
+ debugger;
+ } //# sourceURL=test.js`);
+Protocol.Runtime.enable();
+Protocol.Debugger.enable();
+
+Protocol.Debugger.onPaused(message => {
+ InspectorTest.log("paused");
+ Protocol.Debugger.resume();
+});
+
+(async function() {
+ InspectorTest.log("Test disableBreaks: false");
+ InspectorTest.logMessage(await Protocol.Runtime.evaluate({
+ expression: "debugger;",
+ disableBreaks: false
+ }));
+
+ InspectorTest.log("Test disableBreaks: true");
+ InspectorTest.logMessage(await Protocol.Runtime.evaluate({
+ expression: "debugger;",
+ disableBreaks: true
+ }));
+
+ InspectorTest.log("Test calling out with disableBreaks: false");
+ InspectorTest.logMessage(await Protocol.Runtime.evaluate({
+ expression: "f();",
+ disableBreaks: false
+ }));
+
+ InspectorTest.log("Test calling out with disableBreaks: true");
+ InspectorTest.logMessage(await Protocol.Runtime.evaluate({
+ expression: "f();",
+ disableBreaks: true
+ }));
+
+ InspectorTest.log("Test Debugger.pause with disableBreaks: false");
+ InspectorTest.logMessage(await Protocol.Debugger.pause());
+ InspectorTest.logMessage(await Protocol.Runtime.evaluate({
+ expression: "1",
+ disableBreaks: false
+ }));
+
+ InspectorTest.log("Test Debugger.pause with disableBreaks: true");
+ InspectorTest.logMessage(await Protocol.Debugger.pause());
+ InspectorTest.logMessage(await Protocol.Runtime.evaluate({
+ expression: "1",
+ disableBreaks: true
+ }));
+
+ InspectorTest.completeTest();
+})();
diff --git a/deps/v8/test/inspector/task-runner.h b/deps/v8/test/inspector/task-runner.h
index 41a5729571..afc3c39ab2 100644
--- a/deps/v8/test/inspector/task-runner.h
+++ b/deps/v8/test/inspector/task-runner.h
@@ -6,6 +6,7 @@
#define V8_TEST_INSPECTOR_PROTOCOL_TASK_RUNNER_H_
#include <map>
+#include <memory>
#include "include/v8-inspector.h"
#include "include/v8-platform.h"
diff --git a/deps/v8/test/intl/assert.js b/deps/v8/test/intl/assert.js
index a6367a8cf2..ae1646cc02 100644
--- a/deps/v8/test/intl/assert.js
+++ b/deps/v8/test/intl/assert.js
@@ -157,7 +157,7 @@ function assertThrows(code, type_opt, cause_opt) {
assertInstanceof(e, type_opt);
}
if (arguments.length >= 3) {
- assertEquals(cause_opt, e.type, 'thrown exception type mismatch');
+ assertEquals(cause_opt, e.message, 'thrown exception type mismatch');
}
// Success.
return;
diff --git a/deps/v8/test/intl/bigint/tolocalestring.js b/deps/v8/test/intl/bigint/tolocalestring.js
index d0b6792ea8..449d644546 100644
--- a/deps/v8/test/intl/bigint/tolocalestring.js
+++ b/deps/v8/test/intl/bigint/tolocalestring.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-bigint
-
var locales = [
"en", // "1,234,567,890,123,456"
"de", // "1.234.567.890.123.456"
diff --git a/deps/v8/test/intl/date-format/check-calendar.js b/deps/v8/test/intl/date-format/check-calendar.js
index b6c7c58ea3..f9e5565f60 100644
--- a/deps/v8/test/intl/date-format/check-calendar.js
+++ b/deps/v8/test/intl/date-format/check-calendar.js
@@ -7,6 +7,15 @@
let invalidCalendar = [
"invalid",
"abce",
+ "abc-defghi",
+];
+
+let illFormedCalendar = [
+ "",
+ "i",
+ "ij",
+ "abcdefghi",
+ "abc-ab",
];
// https://www.unicode.org/repos/cldr/tags/latest/common/bcp47/calendar.xml
@@ -36,8 +45,17 @@ let locales = [
"ar",
];
-
invalidCalendar.forEach(function(calendar) {
+ locales.forEach(function(base) {
+ var df;
+ assertDoesNotThrow(() => df = new Intl.DateTimeFormat([base], {calendar}));
+ assertEquals(
+ (new Intl.DateTimeFormat([base])).resolvedOptions().calendar,
+ df.resolvedOptions().calendar);
+ });
+});
+
+illFormedCalendar.forEach(function(calendar) {
assertThrows(
() => new Intl.DateTimeFormat(["en"], {calendar}),
RangeError);
diff --git a/deps/v8/test/intl/date-format/check-numbering-system.js b/deps/v8/test/intl/date-format/check-numbering-system.js
index 0bb71c5358..aa6ac8728e 100644
--- a/deps/v8/test/intl/date-format/check-numbering-system.js
+++ b/deps/v8/test/intl/date-format/check-numbering-system.js
@@ -10,6 +10,15 @@ let invalidNumberingSystem = [
"finance",
"native",
"traditio",
+ "abc-defghi",
+];
+
+let illFormedNumberingSystem = [
+ "",
+ "i",
+ "ij",
+ "abcdefghi",
+ "abc-ab",
];
// https://tc39.github.io/ecma402/#table-numbering-system-digits
@@ -43,13 +52,22 @@ let locales = [
"ar",
];
-
invalidNumberingSystem.forEach(function(numberingSystem) {
+ locales.forEach(function(base) {
+ var df;
+ assertDoesNotThrow(
+ () => df = new Intl.DateTimeFormat([base], {numberingSystem}));
+ assertEquals(
+ (new Intl.DateTimeFormat([base])).resolvedOptions().numberingSystem,
+ df.resolvedOptions().numberingSystem);
+ });
+});
+
+illFormedNumberingSystem.forEach(function(numberingSystem) {
assertThrows(
() => new Intl.DateTimeFormat(["en"], {numberingSystem}),
RangeError);
-}
-);
+});
let value = new Date();
validNumberingSystem.forEach(function(numberingSystem) {
diff --git a/deps/v8/test/intl/date-format/constructor-calendar-numberingSytem-order.js b/deps/v8/test/intl/date-format/constructor-calendar-numberingSytem-order.js
new file mode 100644
index 0000000000..75b4a456d4
--- /dev/null
+++ b/deps/v8/test/intl/date-format/constructor-calendar-numberingSytem-order.js
@@ -0,0 +1,35 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-add-calendar-numbering-system
+const actual = [];
+
+const options = {
+ get localeMatcher() {
+ actual.push("localeMatcher");
+ return undefined;
+ },
+ get calendar() {
+ actual.push("calendar");
+ return undefined;
+ },
+ get numberingSystem() {
+ actual.push("numberingSystem");
+ return undefined;
+ },
+ get hour12() {
+ actual.push("hour12");
+ return undefined;
+ },
+};
+
+const expected = [
+ "localeMatcher",
+ "calendar",
+ "numberingSystem",
+ "hour12"
+];
+
+let df = new Intl.DateTimeFormat(undefined, options);
+assertEquals(actual.join(":"), expected.join(":"));
diff --git a/deps/v8/test/intl/date-format/constructor-date-style-order.js b/deps/v8/test/intl/date-format/constructor-date-style-order.js
index 8e601b48d3..b148d8eea7 100644
--- a/deps/v8/test/intl/date-format/constructor-date-style-order.js
+++ b/deps/v8/test/intl/date-format/constructor-date-style-order.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-datetime-style
-
// Throws only once during construction.
// Check for all getters to prevent regression.
// Preserve the order of getter initialization.
diff --git a/deps/v8/test/intl/date-format/constructor-date-time-style-order.js b/deps/v8/test/intl/date-format/constructor-date-time-style-order.js
index d4d114662f..8152b17618 100644
--- a/deps/v8/test/intl/date-format/constructor-date-time-style-order.js
+++ b/deps/v8/test/intl/date-format/constructor-date-time-style-order.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-datetime-style
-
// Throws only once during construction.
// Check for all getters to prevent regression.
// Preserve the order of getter initialization.
diff --git a/deps/v8/test/intl/date-format/constructor-date-time-style.js b/deps/v8/test/intl/date-format/constructor-date-time-style.js
index f4bc40b396..477c5c5a01 100644
--- a/deps/v8/test/intl/date-format/constructor-date-time-style.js
+++ b/deps/v8/test/intl/date-format/constructor-date-time-style.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-datetime-style
-
var validStyle = ["full", "long", "medium", "short", undefined];
var invalidStyle = ["narrow", "numeric"];
diff --git a/deps/v8/test/intl/date-format/constructor-no-style-order.js b/deps/v8/test/intl/date-format/constructor-no-style-order.js
index bd4bc4cc37..a671968367 100644
--- a/deps/v8/test/intl/date-format/constructor-no-style-order.js
+++ b/deps/v8/test/intl/date-format/constructor-no-style-order.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-datetime-style
-
// Throws only once during construction.
// Check for all getters to prevent regression.
// Preserve the order of getter initialization.
diff --git a/deps/v8/test/intl/date-format/constructor-time-style-order.js b/deps/v8/test/intl/date-format/constructor-time-style-order.js
index d35f21a196..b3c8850dde 100644
--- a/deps/v8/test/intl/date-format/constructor-time-style-order.js
+++ b/deps/v8/test/intl/date-format/constructor-time-style-order.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-datetime-style
-
// Throws only once during construction.
// Check for all getters to prevent regression.
// Preserve the order of getter initialization.
diff --git a/deps/v8/test/intl/date-format/en-format-range-to-parts.js b/deps/v8/test/intl/date-format/en-format-range-to-parts.js
index c2421812f8..9d9b2b8193 100644
--- a/deps/v8/test/intl/date-format/en-format-range-to-parts.js
+++ b/deps/v8/test/intl/date-format/en-format-range-to-parts.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-date-format-range
-
const date1 = new Date("2019-01-03T03:20");
const date2 = new Date("2019-01-05T19:33");
const date3 = new Date("2019-01-05T22:57");
diff --git a/deps/v8/test/intl/date-format/format-range-to-parts.js b/deps/v8/test/intl/date-format/format-range-to-parts.js
index b2eac1765c..3ffd61ba7e 100644
--- a/deps/v8/test/intl/date-format/format-range-to-parts.js
+++ b/deps/v8/test/intl/date-format/format-range-to-parts.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-date-format-range
-
let descriptor = Object.getOwnPropertyDescriptor(
Intl.DateTimeFormat.prototype, "formatRangeToParts");
assertTrue(descriptor.writable);
diff --git a/deps/v8/test/intl/date-format/format-range.js b/deps/v8/test/intl/date-format/format-range.js
index f00f228b07..066e53e1bd 100644
--- a/deps/v8/test/intl/date-format/format-range.js
+++ b/deps/v8/test/intl/date-format/format-range.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-date-format-range
-
let descriptor = Object.getOwnPropertyDescriptor(
Intl.DateTimeFormat.prototype, "formatRange");
assertTrue(descriptor.writable);
diff --git a/deps/v8/test/intl/date-format/format-with-extensions.js b/deps/v8/test/intl/date-format/format-with-extensions.js
index b9eb0fcb1e..9aa37e1f32 100644
--- a/deps/v8/test/intl/date-format/format-with-extensions.js
+++ b/deps/v8/test/intl/date-format/format-with-extensions.js
@@ -20,18 +20,9 @@ function checkFormat(locale, options, expected) {
assertEquals(expected.formatted, formatted);
}
-// Even though the calendar is Chinese, the best pattern search for formatting
-// should be done in the base locale (i.e. en or en-GB instead of
-// en-u-ca-chinese or en-GB-u-ca-chinese). Otherwise, {year: 'numeric'} would
-// results in '35 (wu-su)' where 'wu-su' is the designation for year 35 in the
-// 60-year cycle. See https://github.com/tc39/ecma402/issues/225 .
[
["en", "gregory", "latn", "2018"],
["en-GB", "gregory", "latn", "2018"],
- ["en-u-ca-chinese", "chinese", "latn", "35"],
- ["en-GB-u-ca-chinese", "chinese", "latn", "35"],
- ["en-u-ca-chinese-nu-deva", "chinese", "deva", "३५"],
- ["en-GB-u-ca-chinese-nu-deva", "chinese", "deva", "३५"],
].forEach(function(entry) {
checkFormat(entry[0], {year: 'numeric'},
{ cal: entry[1],
@@ -48,9 +39,6 @@ const enGBTypes = ["day", "literal", "month", "literal", "year"];
["en", "gregory", "latn", "6/21/2018", enUSTypes],
["en-GB", "gregory", "latn", "21/06/2018", enGBTypes],
["en-u-nu-deva", "gregory", "deva", "६/२१/२०१८", enUSTypes],
- ["en-u-ca-chinese", "chinese", "latn", "5/8/35", enUSTypes],
- ["en-GB-u-ca-chinese", "chinese", "latn", "08/05/35", enGBTypes],
- ["en-u-ca-chinese-nu-deva", "chinese", "deva", "५/८/३५", enUSTypes],
].forEach(function(entry) {
checkFormat(entry[0], {},
{ cal: entry[1],
diff --git a/deps/v8/test/intl/date-format/property-override-date-style.js b/deps/v8/test/intl/date-format/property-override-date-style.js
index 542ef5eb27..c7965a41c5 100644
--- a/deps/v8/test/intl/date-format/property-override-date-style.js
+++ b/deps/v8/test/intl/date-format/property-override-date-style.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-datetime-style
-
// Checks for security holes introduced by Object.property overrides.
// For example:
// Object.defineProperty(Array.prototype, 'locale', {
@@ -34,8 +32,6 @@ var expectedProperties = [
'calendar',
'numberingSystem',
'timeZone',
- 'hourCycle',
- 'hour12',
'dateStyle',
];
diff --git a/deps/v8/test/intl/date-format/property-override-date-time-style.js b/deps/v8/test/intl/date-format/property-override-date-time-style.js
index 8977be2967..6cb7af171f 100644
--- a/deps/v8/test/intl/date-format/property-override-date-time-style.js
+++ b/deps/v8/test/intl/date-format/property-override-date-time-style.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-datetime-style
-
// Checks for security holes introduced by Object.property overrides.
// For example:
// Object.defineProperty(Array.prototype, 'locale', {
diff --git a/deps/v8/test/intl/date-format/property-override-time-style.js b/deps/v8/test/intl/date-format/property-override-time-style.js
index ab8fa22d0b..50cb6e26f1 100644
--- a/deps/v8/test/intl/date-format/property-override-time-style.js
+++ b/deps/v8/test/intl/date-format/property-override-time-style.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-datetime-style
-
// Checks for security holes introduced by Object.property overrides.
// For example:
// Object.defineProperty(Array.prototype, 'locale', {
diff --git a/deps/v8/test/intl/date-format/related-year.js b/deps/v8/test/intl/date-format/related-year.js
new file mode 100644
index 0000000000..a3d9e9dcb4
--- /dev/null
+++ b/deps/v8/test/intl/date-format/related-year.js
@@ -0,0 +1,14 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-other-calendars
+// Test it will output relatedYear and yearName
+
+let df = new Intl.DateTimeFormat("zh-u-ca-chinese", {year: "numeric"})
+let date = new Date(2019, 5, 1);
+assertEquals("2019己亥年", df.format(date));
+assertEquals([{type: "relatedYear", value: "2019"},
+ {type: "yearName", value: "己亥"},
+ {type: "literal", value: "年"}],
+ df.formatToParts(date));
diff --git a/deps/v8/test/intl/general/case-mapping.js b/deps/v8/test/intl/general/case-mapping.js
index 79d1624821..606af09c44 100644
--- a/deps/v8/test/intl/general/case-mapping.js
+++ b/deps/v8/test/intl/general/case-mapping.js
@@ -125,9 +125,6 @@ assertEquals("abci\u0307", "aBcI\u0307".toLowerCase());
// Anything other than 'tr' and 'az' behave like root for U+0307.
assertEquals("abci\u0307", "aBcI\u0307".toLocaleLowerCase("fil"));
assertEquals("abci\u0307", "aBcI\u0307".toLocaleLowerCase("zh-Hant-TW"));
-assertEquals("abci\u0307", "aBcI\u0307".toLocaleLowerCase("i-klingon"));
-assertEquals("abci\u0307", "aBcI\u0307".toLocaleLowerCase("i-enochian"));
-assertEquals("abci\u0307", "aBcI\u0307".toLocaleLowerCase("x-foobar"));
// Up to 8 chars are allowed for the primary language tag in BCP 47.
assertEquals("abci\u0307", "aBcI\u0307".toLocaleLowerCase("longlang"));
diff --git a/deps/v8/test/intl/general/grandfathered_tags_without_preferred_value.js b/deps/v8/test/intl/general/grandfathered_tags_without_preferred_value.js
index 808e50d208..16bf369601 100644
--- a/deps/v8/test/intl/general/grandfathered_tags_without_preferred_value.js
+++ b/deps/v8/test/intl/general/grandfathered_tags_without_preferred_value.js
@@ -8,18 +8,8 @@
// v8 works around that ICU issue.
// See https://www.iana.org/assignments/language-subtag-registry/language-subtag-registry .
["cel-gaulish", "cel-gaulish"],
- ["i-default", "i-default"],
- ["i-mingo", "i-mingo"],
- ["i-enochian", "i-enochian"],
- ["zh-min", "zh-min"],
// Matching should be case-insensitive.
- ["I-default", "i-default"],
- ["i-DEFAULT", "i-default"],
- ["I-DEFAULT", "i-default"],
- ["i-DEfauLT", "i-default"],
- ["zh-Min", "zh-min"],
- ["Zh-min", "zh-min"],
].forEach(([inputLocale, expectedLocale]) => {
const canonicalLocales = Intl.getCanonicalLocales(inputLocale);
assertEquals(canonicalLocales.length, 1);
diff --git a/deps/v8/test/intl/general/language_tags_with_preferred_values.js b/deps/v8/test/intl/general/language_tags_with_preferred_values.js
index 4f2fbbfb2e..462bcfb8f7 100644
--- a/deps/v8/test/intl/general/language_tags_with_preferred_values.js
+++ b/deps/v8/test/intl/general/language_tags_with_preferred_values.js
@@ -9,8 +9,6 @@
// Matching should be case-insensitive.
["sgn-De", "gsg"],
- ["sgn-BE-FR", "sfb"],
- ["Sgn-bE-Fr", "sfb"],
// deprecated region tag
["und-Latn-dd", "und-Latn-DE"],
diff --git a/deps/v8/test/intl/general/supported-locales-of.js b/deps/v8/test/intl/general/supported-locales-of.js
index eb5c426f07..84984c1d25 100644
--- a/deps/v8/test/intl/general/supported-locales-of.js
+++ b/deps/v8/test/intl/general/supported-locales-of.js
@@ -83,16 +83,15 @@ for (const service of services) {
privateuseLocale = service.supportedLocalesOf("en-US-x-twain");
assertEquals("en-US-x-twain", privateuseLocale[0]);
- privateuseLocale2 = service.supportedLocalesOf("x-twain");
- assertEquals(undefined, privateuseLocale2[0]);
+ assertThrows(() => service.supportedLocalesOf("x-twain"), RangeError);
+
if (service != Intl.PluralRules) {
grandfatheredLocale = service.supportedLocalesOf("art-lojban");
assertEquals(undefined, grandfatheredLocale[0]);
}
- grandfatheredLocale2 = service.supportedLocalesOf("i-pwn");
- assertEquals(undefined, grandfatheredLocale2[0]);
+ assertThrows(() => service.supportedLocalesOf("x-pwn"), RangeError);
unicodeInPrivateuseLocale = service.supportedLocalesOf(
"en-US-x-u-co-phonebk"
diff --git a/deps/v8/test/intl/intl.status b/deps/v8/test/intl/intl.status
index ba54743d67..669f5d93d5 100644
--- a/deps/v8/test/intl/intl.status
+++ b/deps/v8/test/intl/intl.status
@@ -36,9 +36,6 @@
# https://code.google.com/p/v8/issues/detail?id=9312
'regress-9312': [FAIL],
-
- # Slow tests.
- 'regress-903566': [PASS, SLOW],
}], # ALWAYS
['variant == no_wasm_traps', {
@@ -72,12 +69,6 @@
}], # 'system == android'
##############################################################################
-['variant == stress', {
- # Too slow.
- 'regress-903566': [SKIP],
-}], # 'variant == stress'
-
-##############################################################################
['variant == jitless and not embedded_builtins', {
'*': [SKIP],
}], # variant == jitless and not embedded_builtins
diff --git a/deps/v8/test/intl/list-format/resolved-options.js b/deps/v8/test/intl/list-format/resolved-options.js
index 42687990f9..a2cfff8606 100644
--- a/deps/v8/test/intl/list-format/resolved-options.js
+++ b/deps/v8/test/intl/list-format/resolved-options.js
@@ -144,7 +144,3 @@ assertEquals(
assertEquals(
'ar',
(new Intl.ListFormat(['xyz', 'ar'])).resolvedOptions().locale);
-
-assertEquals(
- 'ar',
- (new Intl.ListFormat(['i-default', 'ar'])).resolvedOptions().locale);
diff --git a/deps/v8/test/intl/number-format/check-numbering-system.js b/deps/v8/test/intl/number-format/check-numbering-system.js
index cd7884b8dc..c9df98a671 100644
--- a/deps/v8/test/intl/number-format/check-numbering-system.js
+++ b/deps/v8/test/intl/number-format/check-numbering-system.js
@@ -10,6 +10,15 @@ let invalidNumberingSystem = [
"finance",
"native",
"traditio",
+ "abc-defghi",
+];
+
+let illFormedNumberingSystem = [
+ "",
+ "i",
+ "ij",
+ "abcdefghi",
+ "abc-ab",
];
// https://tc39.github.io/ecma402/#table-numbering-system-digits
@@ -45,11 +54,21 @@ let locales = [
invalidNumberingSystem.forEach(function(numberingSystem) {
+ locales.forEach(function(base) {
+ var df;
+ assertDoesNotThrow(
+ () => df = new Intl.NumberFormat([base], {numberingSystem}));
+ assertEquals(
+ (new Intl.NumberFormat([base])).resolvedOptions().numberingSystem,
+ df.resolvedOptions().numberingSystem);
+ });
+});
+
+illFormedNumberingSystem.forEach(function(numberingSystem) {
assertThrows(
() => new Intl.NumberFormat(["en"], {numberingSystem}),
RangeError);
-}
-);
+});
let value = 1234567.89;
validNumberingSystem.forEach(function(numberingSystem) {
diff --git a/deps/v8/test/intl/number-format/constructor-numberingSytem-order.js b/deps/v8/test/intl/number-format/constructor-numberingSytem-order.js
new file mode 100644
index 0000000000..8c284967ff
--- /dev/null
+++ b/deps/v8/test/intl/number-format/constructor-numberingSytem-order.js
@@ -0,0 +1,30 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-add-calendar-numbering-system
+const actual = [];
+
+const options = {
+ get localeMatcher() {
+ actual.push("localeMatcher");
+ return undefined;
+ },
+ get numberingSystem() {
+ actual.push("numberingSystem");
+ return undefined;
+ },
+ get style() {
+ actual.push("style");
+ return undefined;
+ },
+};
+
+const expected = [
+ "localeMatcher",
+ "numberingSystem",
+ "style"
+];
+
+let nf = new Intl.NumberFormat(undefined, options);
+assertEquals(actual.join(":"), expected.join(":"));
diff --git a/deps/v8/test/intl/number-format/property-override.js b/deps/v8/test/intl/number-format/property-override.js
deleted file mode 100644
index 590b1c2e4b..0000000000
--- a/deps/v8/test/intl/number-format/property-override.js
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Checks for security holes introduced by Object.property overrides.
-// For example:
-// Object.defineProperty(Array.prototype, 'locale', {
-// set: function(value) {
-// throw new Error('blah');
-// },
-// configurable: true,
-// enumerable: false
-// });
-//
-// would throw in case of (JS) x.locale = 'us' or (C++) x->Set('locale', 'us').
-//
-// Update both number-format.js and number-format.cc so they have the same
-// list of properties.
-
-// Flags: --noharmony-intl-numberformat-unified
-
-// First get supported properties.
-var properties = [];
-// Some properties are optional and won't show up in resolvedOptions if
-// they were not requested - currency, currencyDisplay,
-// minimumSignificantDigits and maximumSignificantDigits - so we request them.
-var options = Intl.NumberFormat(
- undefined, {style: 'currency', currency: 'USD', currencyDisplay: 'name',
- minimumSignificantDigits: 1, maximumSignificantDigits: 5}).
- resolvedOptions();
-for (var prop in options) {
- if (options.hasOwnProperty(prop)) {
- properties.push(prop);
- }
-}
-
-var expectedProperties = [
- 'style', 'locale', 'numberingSystem',
- 'currency', 'currencyDisplay', 'useGrouping',
- 'minimumIntegerDigits', 'minimumFractionDigits',
- 'maximumFractionDigits', 'minimumSignificantDigits',
- 'maximumSignificantDigits'
-];
-
-assertEquals(expectedProperties.length, properties.length);
-
-properties.forEach(function(prop) {
- assertFalse(expectedProperties.indexOf(prop) === -1);
-});
-
-taintProperties(properties);
-
-var locale = Intl.NumberFormat(undefined,
- {currency: 'USD', currencyDisplay: 'name',
- minimumIntegerDigits: 2,
- numberingSystem: 'latn'}).
- resolvedOptions().locale;
diff --git a/deps/v8/test/intl/number-format/unified/compact-display.js b/deps/v8/test/intl/number-format/unified/compact-display.js
index 228a2b6259..666cab86f4 100644
--- a/deps/v8/test/intl/number-format/unified/compact-display.js
+++ b/deps/v8/test/intl/number-format/unified/compact-display.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-numberformat-unified
-
const testData = [
["short"],
["long"],
diff --git a/deps/v8/test/intl/number-format/unified/constructor-order.js b/deps/v8/test/intl/number-format/unified/constructor-order.js
index be716371f5..f32f0b1696 100644
--- a/deps/v8/test/intl/number-format/unified/constructor-order.js
+++ b/deps/v8/test/intl/number-format/unified/constructor-order.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-numberformat-unified
// Similar to constructor-order.js but also consider the new options
// in https://tc39-transfer.github.io/proposal-unified-intl-numberformat/
diff --git a/deps/v8/test/intl/number-format/unified/currency-display.js b/deps/v8/test/intl/number-format/unified/currency-display.js
index effd026778..3c407f27cb 100644
--- a/deps/v8/test/intl/number-format/unified/currency-display.js
+++ b/deps/v8/test/intl/number-format/unified/currency-display.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-numberformat-unified
-
// Test defaults
let nf = new Intl.NumberFormat();
assertEquals(undefined, nf.resolvedOptions().currencyDisplay);
diff --git a/deps/v8/test/intl/number-format/unified/currency-sign.js b/deps/v8/test/intl/number-format/unified/currency-sign.js
index 3f2941a8e9..9bfecf8c9b 100644
--- a/deps/v8/test/intl/number-format/unified/currency-sign.js
+++ b/deps/v8/test/intl/number-format/unified/currency-sign.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-numberformat-unified
-
// Test default.
let nf = new Intl.NumberFormat();
assertEquals(undefined, nf.resolvedOptions().currencySign);
diff --git a/deps/v8/test/intl/number-format/unified/no-compact-display.js b/deps/v8/test/intl/number-format/unified/no-compact-display.js
index 95611e90fc..36d75722ea 100644
--- a/deps/v8/test/intl/number-format/unified/no-compact-display.js
+++ b/deps/v8/test/intl/number-format/unified/no-compact-display.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-numberformat-unified
-
// Except when the notation is "compact", the resolvedOptions().compactDisplay
// should be undefined.
//
diff --git a/deps/v8/test/intl/number-format/unified/notation-engineering-formatToParts.js b/deps/v8/test/intl/number-format/unified/notation-engineering-formatToParts.js
index 280771b2e7..da91f4ba96 100644
--- a/deps/v8/test/intl/number-format/unified/notation-engineering-formatToParts.js
+++ b/deps/v8/test/intl/number-format/unified/notation-engineering-formatToParts.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-numberformat-unified
-
// Test notation: "engineering" with formatToParts.
const nf = Intl.NumberFormat("en", {notation: "engineering"});
diff --git a/deps/v8/test/intl/number-format/unified/notation-scientific-formatToParts.js b/deps/v8/test/intl/number-format/unified/notation-scientific-formatToParts.js
index 9ffd5f8709..ade723dbe3 100644
--- a/deps/v8/test/intl/number-format/unified/notation-scientific-formatToParts.js
+++ b/deps/v8/test/intl/number-format/unified/notation-scientific-formatToParts.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-numberformat-unified
-
// Test notation: "scientific" with formatToParts.
const nf = Intl.NumberFormat("en", {notation: "scientific"});
diff --git a/deps/v8/test/intl/number-format/unified/notation.js b/deps/v8/test/intl/number-format/unified/notation.js
index b26ee01f5c..3711644f52 100644
--- a/deps/v8/test/intl/number-format/unified/notation.js
+++ b/deps/v8/test/intl/number-format/unified/notation.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-numberformat-unified
-
// Test defaults.
let nf = new Intl.NumberFormat();
diff --git a/deps/v8/test/intl/number-format/unified/percent.js b/deps/v8/test/intl/number-format/unified/percent.js
index 9918210ec7..c4de0f7fec 100644
--- a/deps/v8/test/intl/number-format/unified/percent.js
+++ b/deps/v8/test/intl/number-format/unified/percent.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-numberformat-unified
-//
// Test the handling of "percent" w/ "unit"
let nf1 = new Intl.NumberFormat("en-US", {
diff --git a/deps/v8/test/intl/number-format/unified/sign-display.js b/deps/v8/test/intl/number-format/unified/sign-display.js
index c71f57e67c..18b74c9f0b 100644
--- a/deps/v8/test/intl/number-format/unified/sign-display.js
+++ b/deps/v8/test/intl/number-format/unified/sign-display.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-numberformat-unified
-
// Test default.
let nf = new Intl.NumberFormat();
assertEquals("auto", nf.resolvedOptions().signDisplay);
diff --git a/deps/v8/test/intl/number-format/unified/style-unit.js b/deps/v8/test/intl/number-format/unified/style-unit.js
index 72eb0a782d..757c0093c2 100644
--- a/deps/v8/test/intl/number-format/unified/style-unit.js
+++ b/deps/v8/test/intl/number-format/unified/style-unit.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-numberformat-unified
-
// Test default.
let nf = new Intl.NumberFormat();
diff --git a/deps/v8/test/intl/number-format/unified/unit-display.js b/deps/v8/test/intl/number-format/unified/unit-display.js
index d4d814d70e..d451fda324 100644
--- a/deps/v8/test/intl/number-format/unified/unit-display.js
+++ b/deps/v8/test/intl/number-format/unified/unit-display.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-numberformat-unified
-
// Test default.
let nf = new Intl.NumberFormat();
assertEquals(undefined, nf.resolvedOptions().unitDisplay);
diff --git a/deps/v8/test/intl/regress-1003748.js b/deps/v8/test/intl/regress-1003748.js
new file mode 100644
index 0000000000..89ef559e16
--- /dev/null
+++ b/deps/v8/test/intl/regress-1003748.js
@@ -0,0 +1,18 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let usd = new Intl.NumberFormat('en',
+ { style: 'currency', currency: 'USD' }).resolvedOptions();
+assertEquals(2, usd.maximumFractionDigits);
+assertEquals(2, usd.minimumFractionDigits);
+
+let jpy = new Intl.NumberFormat('en',
+ { style: 'currency', currency: 'JPY' }).resolvedOptions();
+assertEquals(0, jpy.maximumFractionDigits);
+assertEquals(0, jpy.minimumFractionDigits);
+
+let krw = new Intl.NumberFormat('en',
+ { style: 'currency', currency: 'KRW' }).resolvedOptions();
+assertEquals(0, krw.maximumFractionDigits);
+assertEquals(0, krw.minimumFractionDigits);
diff --git a/deps/v8/test/intl/regress-1012579.js b/deps/v8/test/intl/regress-1012579.js
new file mode 100644
index 0000000000..9051a0004c
--- /dev/null
+++ b/deps/v8/test/intl/regress-1012579.js
@@ -0,0 +1,6 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let longLocale = 'de-u-cf-cu-em-kk-kr-ks-kv-lb-lw-ms-nu-rg-sd-ss-tz';
+rtf = new Intl.RelativeTimeFormat(longLocale);
diff --git a/deps/v8/test/intl/regress-8725514.js b/deps/v8/test/intl/regress-8725514.js
index 82f884a093..f1bf92b7cc 100644
--- a/deps/v8/test/intl/regress-8725514.js
+++ b/deps/v8/test/intl/regress-8725514.js
@@ -6,5 +6,5 @@ Object.prototype.__defineGetter__('x', function () {
return -2147483648;
});
-var f = ["x-u-foo"];
+var f = ["en-US"];
Intl.NumberFormat(f);
diff --git a/deps/v8/test/intl/regress-903566.js b/deps/v8/test/intl/regress-903566.js
index 9346fa63a8..65f604f452 100644
--- a/deps/v8/test/intl/regress-903566.js
+++ b/deps/v8/test/intl/regress-903566.js
@@ -13,7 +13,9 @@ let arr = ["a","b","c"];
// Test under no HasHoleyElements();
assertFalse(%HasHoleyElements(arr));
assertDoesNotThrow(()=>(new Intl.ListFormat()).format(arr));
-for (var i = 0; i < 10000; i++) {
+// ICU uses bubblesort, so keep the array reasonably small (as of mid-2019:
+// 100 entries -> 1ms, 1,000 entries -> 64ms, 10,000 entries -> 5s).
+for (var i = 0; i < 100; i++) {
arr.push("xx");
}
assertFalse(%HasHoleyElements(arr));
diff --git a/deps/v8/test/intl/regress-9356.js b/deps/v8/test/intl/regress-9356.js
new file mode 100644
index 0000000000..a355aa1b9e
--- /dev/null
+++ b/deps/v8/test/intl/regress-9356.js
@@ -0,0 +1,14 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertFalse(/ſ/i.test('ſ'.toUpperCase()));
+assertFalse(/ſ/i.test('ſ'.toUpperCase()[0]));
+assertTrue(/ſ/i.test('ſ'));
+assertTrue(/ſ/i.test('ſ'[0]));
+assertFalse(/ſ/i.test('s'.toUpperCase()));
+assertFalse(/ſ/i.test('s'.toUpperCase()[0]));
+assertFalse(/ſ/i.test('S'.toUpperCase()));
+assertFalse(/ſ/i.test('S'.toUpperCase()[0]));
+assertFalse(/ſ/i.test('S'));
+assertFalse(/ſ/i.test('S'[0]));
diff --git a/deps/v8/test/intl/regress-9408.js b/deps/v8/test/intl/regress-9408.js
index 88883981f3..d5cfb11345 100644
--- a/deps/v8/test/intl/regress-9408.js
+++ b/deps/v8/test/intl/regress-9408.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-numberformat-unified
// Test precision of compact-rounding
let compact = {notation: "compact"};
diff --git a/deps/v8/test/intl/regress-9464.js b/deps/v8/test/intl/regress-9464.js
new file mode 100644
index 0000000000..fc51d632f9
--- /dev/null
+++ b/deps/v8/test/intl/regress-9464.js
@@ -0,0 +1,51 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Number, BigInt and Intl.NumberFormat
+assertThrows(
+ "new Intl.NumberFormat('en', { style: 'unit', unit: 'son'});",
+ RangeError,
+ "Invalid unit argument for Intl.NumberFormat() 'son'");
+
+assertThrows(
+ "123n.toLocaleString('en', { style: 'unit', unit: 'son'});",
+ RangeError,
+ "Invalid unit argument for BigInt.prototype.toLocaleString() 'son'");
+
+assertThrows(
+ "Math.PI.toLocaleString('en', { style: 'unit', unit: 'son'});",
+ RangeError,
+ "Invalid unit argument for Number.prototype.toLocaleString() 'son'");
+
+// String and Intl.Collator
+assertThrows(
+ "new Intl.Collator('en', { usage: 'mom'});",
+ RangeError,
+ "Value mom out of range for Intl.Collator options property usage");
+
+assertThrows(
+ "'abc'.localeCompare('efg', 'en', { usage: 'mom'});",
+ RangeError,
+ "Value mom out of range for String.prototype.localeCompare options property usage");
+
+// Date and Intl.DateTimeFormat
+assertThrows(
+ "new Intl.DateTimeFormat('en', { hour: 'dad'});",
+ RangeError,
+ "Value dad out of range for Intl.DateTimeFormat options property hour");
+
+assertThrows(
+ "(new Date).toLocaleDateString('en', { hour: 'dad'});",
+ RangeError,
+ "Value dad out of range for Date.prototype.toLocaleDateString options property hour");
+
+assertThrows(
+ "(new Date).toLocaleString('en', { hour: 'dad'});",
+ RangeError,
+ "Value dad out of range for Date.prototype.toLocaleString options property hour");
+
+assertThrows(
+ "(new Date).toLocaleTimeString('en', { hour: 'dad'});",
+ RangeError,
+ "Value dad out of range for Date.prototype.toLocaleTimeString options property hour");
diff --git a/deps/v8/test/intl/regress-9475.js b/deps/v8/test/intl/regress-9475.js
index 3549ef8f38..68e2fdd7d7 100644
--- a/deps/v8/test/intl/regress-9475.js
+++ b/deps/v8/test/intl/regress-9475.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-numberformat-unified
// Test format of all valid units won't throw exception.
let validList = [
diff --git a/deps/v8/test/intl/regress-9513.js b/deps/v8/test/intl/regress-9513.js
index e23b5cf77e..fc50df2418 100644
--- a/deps/v8/test/intl/regress-9513.js
+++ b/deps/v8/test/intl/regress-9513.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-numberformat-unified
// Test Infinity, -Infinity, NaN won't crash with any notation in formatToParts.
let validNotations = [
diff --git a/deps/v8/test/intl/regress-9731.js b/deps/v8/test/intl/regress-9731.js
new file mode 100644
index 0000000000..3cabade5a1
--- /dev/null
+++ b/deps/v8/test/intl/regress-9731.js
@@ -0,0 +1,15 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertFalse(/k/i.test('\u212A'));
+assertTrue(/k/i.test('K'));
+assertTrue(/k/i.test('k'));
+
+assertFalse(/K/i.test('\u212A'));
+assertTrue(/K/i.test('K'));
+assertTrue(/K/i.test('k'));
+
+assertTrue(/\u212A/i.test('\u212A'));
+assertFalse(/\u212A/i.test('k'));
+assertFalse(/\u212A/i.test('K'));
diff --git a/deps/v8/test/intl/regress-9747.js b/deps/v8/test/intl/regress-9747.js
new file mode 100644
index 0000000000..8b51ddc275
--- /dev/null
+++ b/deps/v8/test/intl/regress-9747.js
@@ -0,0 +1,50 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let lf = new Intl.ListFormat("en");
+
+// Test normal array
+assertDoesNotThrow(() => lf.format(['a','b','c']));
+assertThrows("lf.format(['a','b',3])", TypeError, "Iterable yielded 3 which is not a string");
+
+// Test sparse array
+let sparse = ['a','b'];
+sparse[10] = 'c';
+assertThrows("lf.format(sparse)", TypeError, "Iterable yielded undefined which is not a string");
+
+// Test iterable of all String
+let iterable_of_strings = {
+ [Symbol.iterator]() {
+ return this;
+ },
+ count: 0,
+ next() {
+ if (this.count++ < 4) {
+ return {done: false, value: String(this.count)};
+ }
+ return {done:true}
+ }
+};
+assertDoesNotThrow(() => lf.format(iterable_of_strings));
+
+// Test iterable of none String throw TypeError
+let iterable_of_strings_and_number = {
+ [Symbol.iterator]() {
+ return this;
+ },
+ count: 0,
+ next() {
+ this.count++;
+ if (this.count == 3) {
+ return {done:false, value: 3};
+ }
+ if (this.count < 5) {
+ return {done: false, value: String(this.count)};
+ }
+ return {done:true}
+ }
+};
+assertThrows("lf.format(iterable_of_strings_and_number)",
+ TypeError, "Iterable yielded 3 which is not a string");
+assertEquals(3, iterable_of_strings_and_number.count);
diff --git a/deps/v8/test/intl/regress-9786.js b/deps/v8/test/intl/regress-9786.js
new file mode 100644
index 0000000000..e922dba5f7
--- /dev/null
+++ b/deps/v8/test/intl/regress-9786.js
@@ -0,0 +1,21 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-add-calendar-numbering-system
+
+// Well-formed but invalid calendar should not throw RangeError.
+var calendar = "abc";
+var len = 3;
+var expected = new Intl.DateTimeFormat("en").resolvedOptions().calendar;
+var df;
+
+for (var i = 3; i < 20; i++, len++, calendar += "a") {
+ assertDoesNotThrow(() => df = new Intl.DateTimeFormat("en", {calendar}),
+ "Well-formed calendar should not throw");
+ assertEquals(expected, df.resolvedOptions().calendar);
+ if (len == 8) {
+ calendar += "-ab";
+ len = 2;
+ }
+}
diff --git a/deps/v8/test/intl/regress-9787.js b/deps/v8/test/intl/regress-9787.js
new file mode 100644
index 0000000000..576c0f7d64
--- /dev/null
+++ b/deps/v8/test/intl/regress-9787.js
@@ -0,0 +1,10 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-add-calendar-numbering-system
+
+// ill-formed and valid calendar should throw RangeError.
+assertThrows(
+ 'new Intl.DateTimeFormat("en", {calendar: "gregorian"})',
+ RangeError);
diff --git a/deps/v8/test/intl/regress-9788.js b/deps/v8/test/intl/regress-9788.js
new file mode 100644
index 0000000000..7a3679584f
--- /dev/null
+++ b/deps/v8/test/intl/regress-9788.js
@@ -0,0 +1,29 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-add-calendar-numbering-system
+
+// Well-formed but invalid numberingSystem should not throw RangeError.
+var numberingSystem = "abc";
+var len = 3;
+
+const intlClasses = [
+ Intl.DateTimeFormat,
+ Intl.NumberFormat,
+ Intl.RelativeTimeFormat
+];
+
+intlClasses.forEach(function(cls) {
+ var expected = new cls("en").resolvedOptions().numberingSystem;
+ var obj;
+ for (var i = 3; i < 20; i++, len++, numberingSystem += "a") {
+ assertDoesNotThrow(() => obj = new cls("en", {numberingSystem}),
+ "Well-formed numberingSystem should not throw");
+ assertEquals(expected, obj.resolvedOptions().numberingSystem);
+ if (len == 8) {
+ numberingSystem += "-ab";
+ len = 2;
+ }
+ }
+});
diff --git a/deps/v8/test/intl/regress-9812.js b/deps/v8/test/intl/regress-9812.js
new file mode 100644
index 0000000000..c85f9e65c1
--- /dev/null
+++ b/deps/v8/test/intl/regress-9812.js
@@ -0,0 +1,65 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const locales = [
+ "cs",
+ "cs-CZ",
+ "en-001",
+ "en-150",
+ "en-TV",
+ "es-419",
+ "es-AR",
+ "fil",
+ "fr-CA",
+ "id",
+ "in",
+ "lt",
+ "nl",
+ "pl",
+ "pt-PT",
+ "sr-ME",
+ "sv",
+ "uk",
+ "vi",
+];
+
+const calendars = [
+ // Calendars we know have issues
+ "islamic",
+ "islamic-civil",
+ "islamic-tbla",
+ "islamic-umalqura",
+ "ethiopic-amete-alem",
+ "islamicc",
+ "ethioaa",
+ "islamic-rgsa",
+
+ // Other calendars
+ "gregory",
+ "japanese",
+ "buddhist",
+ "roc",
+ "persian",
+ "islamic",
+ "hebrew",
+ "chinese",
+ "indian",
+ "coptic",
+ "ethiopic",
+ "iso8601",
+ "dangi",
+ "chinese",
+];
+
+let d1 = new Date(2019, 3, 4);
+let d2 = new Date(2019, 5, 6);
+
+calendars.forEach(function(calendar) {
+ locales.forEach(function(baseLocale) {
+ let locale = `${baseLocale}-u-ca-${calendar}`;
+ assertDoesNotThrow(
+ () => (new Intl.DateTimeFormat(locale)).formatRange(d1, d2),
+ `Using Intl.DateFormat formatRange with ${locale} should not throw`);
+ })
+})
diff --git a/deps/v8/test/intl/regress-9849.js b/deps/v8/test/intl/regress-9849.js
new file mode 100644
index 0000000000..0b406a0381
--- /dev/null
+++ b/deps/v8/test/intl/regress-9849.js
@@ -0,0 +1,15 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let d = new Date(271733878);
+d.toLocaleString('en-u-nu-arab');
+d.toLocaleString('en-u-nu-arab', {dateStyle : 'full', timeStyle : 'full'});
+d.toLocaleString('en-u-nu-roman');
+d.toLocaleString('en-u-nu-roman', {dateStyle : 'full', timeStyle : 'full'});
+d.toLocaleString('sr-u-nu-roman');
+d.toLocaleString('sr-u-nu-roman', {dateStyle : 'full', timeStyle : 'full'});
+d.toLocaleString('sr-Cyrl-u-nu-roman');
+d.toLocaleString('sr-Cyrl-u-nu-roman', {dateStyle : 'full', timeStyle : 'full'});
+d.toLocaleString('zh-u-nu-roman', {dateStyle : 'full', timeStyle : 'full'});
+d.toLocaleString('ja-u-nu-cyrl', {dateStyle : 'full', timeStyle : 'full'});
diff --git a/deps/v8/test/intl/regress-992694.js b/deps/v8/test/intl/regress-992694.js
new file mode 100644
index 0000000000..0729636989
--- /dev/null
+++ b/deps/v8/test/intl/regress-992694.js
@@ -0,0 +1,12 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Make sure the "hu" locale format the number group correctly.
+
+let number = 123456.789;
+let expected = "123 456,79 Ft";
+assertEquals(expected,
+ (new Intl.NumberFormat('hu', { style: 'currency', currency: 'HUF'}).format(number)));
+assertEquals(expected,
+ (new Intl.NumberFormat('hu-HU', { style: 'currency', currency: 'HUF' }).format(number)));
diff --git a/deps/v8/test/intl/regress-997401.js b/deps/v8/test/intl/regress-997401.js
new file mode 100644
index 0000000000..4c46acff7d
--- /dev/null
+++ b/deps/v8/test/intl/regress-997401.js
@@ -0,0 +1,7 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Test no crash with a very long locale.
+let dtf = new Intl.DateTimeFormat(
+ 'de-u-cu-eur-em-default-hc-h23-ks-level1-lb-strict-lw-normal-ms-metric-nu-latn-rg-atzzzz-sd-atat1-ss-none-tz-atvie-va-posix');
diff --git a/deps/v8/test/intl/relative-time-format/check-numbering-system.js b/deps/v8/test/intl/relative-time-format/check-numbering-system.js
new file mode 100644
index 0000000000..91f4b3aee0
--- /dev/null
+++ b/deps/v8/test/intl/relative-time-format/check-numbering-system.js
@@ -0,0 +1,85 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let invalidNumberingSystem = [
+ "invalid",
+ "abce",
+ "finance",
+ "native",
+ "traditio",
+ "abc-defghi",
+];
+
+let illFormedNumberingSystem = [
+ "",
+ "i",
+ "ij",
+ "abcdefghi",
+ "abc-ab",
+];
+
+// https://tc39.github.io/ecma402/#table-numbering-system-digits
+let validNumberingSystem= [
+ "arab",
+ "arabext",
+ "bali",
+ "beng",
+ "deva",
+ "fullwide",
+ "gujr",
+ "guru",
+ "hanidec",
+ "khmr",
+ "knda",
+ "laoo",
+ "latn",
+ "limb",
+ "mlym",
+ "mong",
+ "mymr",
+ "orya",
+ "tamldec",
+ "telu",
+ "thai",
+ "tibt",
+];
+
+let locales = [
+ "en",
+ "ar",
+];
+
+
+invalidNumberingSystem.forEach(function(numberingSystem) {
+ locales.forEach(function(base) {
+ var df;
+ assertDoesNotThrow(
+ () => df = new Intl.RelativeTimeFormat([base], {numberingSystem}));
+ assertEquals(
+ (new Intl.RelativeTimeFormat([base])).resolvedOptions().numberingSystem,
+ df.resolvedOptions().numberingSystem);
+ });
+});
+
+illFormedNumberingSystem.forEach(function(numberingSystem) {
+ assertThrows(
+ () => new Intl.RelativeTimeFormat(["en"], {numberingSystem}),
+ RangeError);
+});
+
+let value = 1234567.89;
+validNumberingSystem.forEach(function(numberingSystem) {
+ locales.forEach(function(base) {
+ let l = base + "-u-nu-" + numberingSystem;
+ let nf = new Intl.RelativeTimeFormat([base], {numberingSystem});
+ assertEquals(l, nf.resolvedOptions().locale);
+ assertEquals(numberingSystem, nf.resolvedOptions().numberingSystem);
+
+ // Test the formatting result is the same as passing in via u-nu-
+ // in the locale.
+ let nf2 = new Intl.RelativeTimeFormat([l]);
+ assertEquals(nf2.format(value, "day"), nf.format(value, "day"));
+ });
+}
+);
diff --git a/deps/v8/test/intl/relative-time-format/resolved-options.js b/deps/v8/test/intl/relative-time-format/resolved-options.js
index 1caa4f86c9..53648320c9 100644
--- a/deps/v8/test/intl/relative-time-format/resolved-options.js
+++ b/deps/v8/test/intl/relative-time-format/resolved-options.js
@@ -156,7 +156,3 @@ assertEquals(
assertThrows(() =>
Intl.RelativeTimeFormat.prototype.resolvedOptions.call(receiver), TypeError);
}
-
-assertEquals(
- 'ar',
- (new Intl.RelativeTimeFormat(['i-default', 'ar'])).resolvedOptions().locale);
diff --git a/deps/v8/test/js-perf-test/BytecodeHandlers/LdaGlobal.js b/deps/v8/test/js-perf-test/BytecodeHandlers/LdaGlobal.js
new file mode 100644
index 0000000000..a986f5f18f
--- /dev/null
+++ b/deps/v8/test/js-perf-test/BytecodeHandlers/LdaGlobal.js
@@ -0,0 +1,60 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function addBenchmark(name, test) {
+ new BenchmarkSuite(name, [1000],
+ [
+ new Benchmark(name, false, false, 0, test)
+ ]);
+}
+
+addBenchmark('LoadGlobal', ldaGlobal);
+addBenchmark('LoadGlobalInsideTypeof', ldaGlobalInsideTypeof);
+
+var g_var = 10;
+
+function ldaGlobal() {
+ for (var i = 0; i < 1000; ++i) {
+ g_var; g_var; g_var; g_var; g_var; g_var; g_var; g_var; g_var; g_var;
+ g_var; g_var; g_var; g_var; g_var; g_var; g_var; g_var; g_var; g_var;
+ g_var; g_var; g_var; g_var; g_var; g_var; g_var; g_var; g_var; g_var;
+ g_var; g_var; g_var; g_var; g_var; g_var; g_var; g_var; g_var; g_var;
+ g_var; g_var; g_var; g_var; g_var; g_var; g_var; g_var; g_var; g_var;
+ g_var; g_var; g_var; g_var; g_var; g_var; g_var; g_var; g_var; g_var;
+ g_var; g_var; g_var; g_var; g_var; g_var; g_var; g_var; g_var; g_var;
+ g_var; g_var; g_var; g_var; g_var; g_var; g_var; g_var; g_var; g_var;
+ g_var; g_var; g_var; g_var; g_var; g_var; g_var; g_var; g_var; g_var;
+ g_var; g_var; g_var; g_var; g_var; g_var; g_var; g_var; g_var; g_var;
+ }
+}
+
+function ldaGlobalInsideTypeof() {
+ for (var i = 0; i < 1000; ++i) {
+ typeof(g_var); typeof(g_var); typeof(g_var); typeof(g_var);
+ typeof(g_var); typeof(g_var); typeof(g_var); typeof(g_var);
+ typeof(g_var); typeof(g_var); typeof(g_var); typeof(g_var);
+ typeof(g_var); typeof(g_var); typeof(g_var); typeof(g_var);
+ typeof(g_var); typeof(g_var); typeof(g_var); typeof(g_var);
+ typeof(g_var); typeof(g_var); typeof(g_var); typeof(g_var);
+ typeof(g_var); typeof(g_var); typeof(g_var); typeof(g_var);
+ typeof(g_var); typeof(g_var); typeof(g_var); typeof(g_var);
+ typeof(g_var); typeof(g_var); typeof(g_var); typeof(g_var);
+ typeof(g_var); typeof(g_var); typeof(g_var); typeof(g_var);
+ typeof(g_var); typeof(g_var); typeof(g_var); typeof(g_var);
+ typeof(g_var); typeof(g_var); typeof(g_var); typeof(g_var);
+ typeof(g_var); typeof(g_var); typeof(g_var); typeof(g_var);
+ typeof(g_var); typeof(g_var); typeof(g_var); typeof(g_var);
+ typeof(g_var); typeof(g_var); typeof(g_var); typeof(g_var);
+ typeof(g_var); typeof(g_var); typeof(g_var); typeof(g_var);
+ typeof(g_var); typeof(g_var); typeof(g_var); typeof(g_var);
+ typeof(g_var); typeof(g_var); typeof(g_var); typeof(g_var);
+ typeof(g_var); typeof(g_var); typeof(g_var); typeof(g_var);
+ typeof(g_var); typeof(g_var); typeof(g_var); typeof(g_var);
+ typeof(g_var); typeof(g_var); typeof(g_var); typeof(g_var);
+ typeof(g_var); typeof(g_var); typeof(g_var); typeof(g_var);
+ typeof(g_var); typeof(g_var); typeof(g_var); typeof(g_var);
+ typeof(g_var); typeof(g_var); typeof(g_var); typeof(g_var);
+ typeof(g_var); typeof(g_var); typeof(g_var); typeof(g_var);
+ }
+}
diff --git a/deps/v8/test/js-perf-test/JSTests3.json b/deps/v8/test/js-perf-test/JSTests3.json
index 598e9fc6d1..e2b669614c 100644
--- a/deps/v8/test/js-perf-test/JSTests3.json
+++ b/deps/v8/test/js-perf-test/JSTests3.json
@@ -371,6 +371,17 @@
{"name": "Object-Lookup-Index-Number"},
{"name": "Object-Lookup-Index-String"}
]
+ },
+ {
+ "name": "LdaGlobal",
+ "main": "run.js",
+ "resources": [ "LdaGlobal.js" ],
+ "test_flags": [ "LdaGlobal" ],
+ "results_regexp": "^%s\\-BytecodeHandler\\(Score\\): (.+)$",
+ "tests": [
+ {"name": "LoadGlobal"},
+ {"name": "LoadGlobalInsideTypeof"}
+ ]
}
]
},
diff --git a/deps/v8/test/message/fail/modules-import-top-level-await-fail-1.mjs b/deps/v8/test/message/fail/modules-import-top-level-await-fail-1.mjs
new file mode 100644
index 0000000000..3a00ba67ae
--- /dev/null
+++ b/deps/v8/test/message/fail/modules-import-top-level-await-fail-1.mjs
@@ -0,0 +1,9 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+//
+// Flags: --harmony-top-level-await
+
+import "modules-skip-1-top-level-await-fail.mjs"
diff --git a/deps/v8/test/message/fail/modules-import-top-level-await-fail-1.out b/deps/v8/test/message/fail/modules-import-top-level-await-fail-1.out
new file mode 100644
index 0000000000..2b2cb407a4
--- /dev/null
+++ b/deps/v8/test/message/fail/modules-import-top-level-await-fail-1.out
@@ -0,0 +1,3 @@
+*modules-skip-1-top-level-await-fail.mjs:7: ReferenceError: x is not defined
+await x;
+^
diff --git a/deps/v8/test/message/fail/modules-import-top-level-await-fail-2.mjs b/deps/v8/test/message/fail/modules-import-top-level-await-fail-2.mjs
new file mode 100644
index 0000000000..c0bc4c22aa
--- /dev/null
+++ b/deps/v8/test/message/fail/modules-import-top-level-await-fail-2.mjs
@@ -0,0 +1,9 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+//
+// Flags: --harmony-top-level-await
+
+import "modules-skip-2-top-level-await-fail.mjs"
diff --git a/deps/v8/test/message/fail/modules-import-top-level-await-fail-2.out b/deps/v8/test/message/fail/modules-import-top-level-await-fail-2.out
new file mode 100644
index 0000000000..208d53e1d2
--- /dev/null
+++ b/deps/v8/test/message/fail/modules-import-top-level-await-fail-2.out
@@ -0,0 +1,3 @@
+*modules-skip-2-top-level-await-fail.mjs:7: ReferenceError: ththsths is not defined
+ththsths
+^
diff --git a/deps/v8/test/message/fail/modules-skip-1-top-level-await-fail.mjs b/deps/v8/test/message/fail/modules-skip-1-top-level-await-fail.mjs
new file mode 100644
index 0000000000..0642ddf366
--- /dev/null
+++ b/deps/v8/test/message/fail/modules-skip-1-top-level-await-fail.mjs
@@ -0,0 +1,7 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+await x;
diff --git a/deps/v8/test/message/fail/modules-skip-2-top-level-await-fail.mjs b/deps/v8/test/message/fail/modules-skip-2-top-level-await-fail.mjs
new file mode 100644
index 0000000000..19edc2c546
--- /dev/null
+++ b/deps/v8/test/message/fail/modules-skip-2-top-level-await-fail.mjs
@@ -0,0 +1,7 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import "modules-skip-3-top-level-await-fail.mjs"
+
+ththsths
diff --git a/deps/v8/test/message/fail/modules-skip-3-top-level-await-fail.mjs b/deps/v8/test/message/fail/modules-skip-3-top-level-await-fail.mjs
new file mode 100644
index 0000000000..caf3431b7f
--- /dev/null
+++ b/deps/v8/test/message/fail/modules-skip-3-top-level-await-fail.mjs
@@ -0,0 +1,5 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+await 42;
diff --git a/deps/v8/test/message/message.status b/deps/v8/test/message/message.status
index 25c87b5e5c..45f0347b4c 100644
--- a/deps/v8/test/message/message.status
+++ b/deps/v8/test/message/message.status
@@ -38,7 +38,7 @@
}], # ALWAYS
# Liftoff is currently only sufficiently implemented on x64 and ia32.
-# TODO(clemensh): Implement on all other platforms (crbug.com/v8/6600).
+# TODO(clemensb): Implement on all other platforms (crbug.com/v8/6600).
['arch != x64 and arch != ia32', {
'wasm-trace-memory-liftoff': [SKIP],
}], # arch != x64 and arch != ia32
diff --git a/deps/v8/test/message/wasm-trace-memory-interpreted.js b/deps/v8/test/message/wasm-trace-memory-interpreted.js
index fdac585b39..401707f581 100644
--- a/deps/v8/test/message/wasm-trace-memory-interpreted.js
+++ b/deps/v8/test/message/wasm-trace-memory-interpreted.js
@@ -3,5 +3,6 @@
// found in the LICENSE file.
// Flags: --no-stress-opt --expose-wasm --trace-wasm-memory --wasm-interpret-all
+// Flags: --experimental-wasm-simd
load("test/message/wasm-trace-memory.js");
diff --git a/deps/v8/test/message/wasm-trace-memory-interpreted.out b/deps/v8/test/message/wasm-trace-memory-interpreted.out
index 248d67e827..e390f10fe3 100644
--- a/deps/v8/test/message/wasm-trace-memory-interpreted.out
+++ b/deps/v8/test/message/wasm-trace-memory-interpreted.out
@@ -7,3 +7,5 @@ interpreter func: 2+0x3 load from 00000002 val: f32:68169720922112.00000
interpreter func: 4+0x5 store to 00000004 val: i8:171 / ab
interpreter func: 0+0x3 load from 00000002 val: i32:1454047232 / 56ab0000
interpreter func: 2+0x3 load from 00000002 val: f32:94008244174848.000000 / 56ab0000
+interpreter func: 6+0x7 store to 00000004 val: s128:48879 48879 48879 48879 / 0000beef 0000beef 0000beef 0000beef
+interpreter func: 5+0x3 load from 00000002 val: s128:-1091633152 -1091633152 -1091633152 -1091633152 / beef0000 beef0000 beef0000 beef0000
diff --git a/deps/v8/test/message/wasm-trace-memory-liftoff.js b/deps/v8/test/message/wasm-trace-memory-liftoff.js
index a23eca4a0f..b50d33dbe8 100644
--- a/deps/v8/test/message/wasm-trace-memory-liftoff.js
+++ b/deps/v8/test/message/wasm-trace-memory-liftoff.js
@@ -3,6 +3,8 @@
// found in the LICENSE file.
// Flags: --no-stress-opt --trace-wasm-memory --liftoff --no-future
-// Flags: --no-wasm-tier-up
+// Flags: --no-wasm-tier-up --experimental-wasm-simd
+// liftoff does not support simd128, so the s128 load and store traces are in
+// the turbofan tier and not liftoff
load("test/message/wasm-trace-memory.js");
diff --git a/deps/v8/test/message/wasm-trace-memory-liftoff.out b/deps/v8/test/message/wasm-trace-memory-liftoff.out
index 31fdefde3d..cdd8e765d9 100644
--- a/deps/v8/test/message/wasm-trace-memory-liftoff.out
+++ b/deps/v8/test/message/wasm-trace-memory-liftoff.out
@@ -7,3 +7,5 @@ liftoff func: 2+0x3 load from 00000002 val: f32:68169720922112.00000
liftoff func: 4+0x5 store to 00000004 val: i8:171 / ab
liftoff func: 0+0x3 load from 00000002 val: i32:1454047232 / 56ab0000
liftoff func: 2+0x3 load from 00000002 val: f32:94008244174848.000000 / 56ab0000
+turbofan func: 6+0x7 store to 00000004 val: s128:48879 48879 48879 48879 / 0000beef 0000beef 0000beef 0000beef
+turbofan func: 5+0x3 load from 00000002 val: s128:-1091633152 -1091633152 -1091633152 -1091633152 / beef0000 beef0000 beef0000 beef0000
diff --git a/deps/v8/test/message/wasm-trace-memory.js b/deps/v8/test/message/wasm-trace-memory.js
index 23425f4ddb..de2f1159e9 100644
--- a/deps/v8/test/message/wasm-trace-memory.js
+++ b/deps/v8/test/message/wasm-trace-memory.js
@@ -3,26 +3,33 @@
// found in the LICENSE file.
// Flags: --no-stress-opt --trace-wasm-memory --no-liftoff --no-future
-// Flags: --no-wasm-tier-up
+// Flags: --no-wasm-tier-up --experimental-wasm-simd
load("test/mjsunit/wasm/wasm-module-builder.js");
var builder = new WasmModuleBuilder();
builder.addMemory(1);
builder.addFunction('load', kSig_v_i)
- .addBody([kExprGetLocal, 0, kExprI32LoadMem, 0, 0, kExprDrop])
+ .addBody([kExprLocalGet, 0, kExprI32LoadMem, 0, 0, kExprDrop])
.exportFunc();
builder.addFunction('load8', kSig_v_i)
- .addBody([kExprGetLocal, 0, kExprI32LoadMem8U, 0, 0, kExprDrop])
+ .addBody([kExprLocalGet, 0, kExprI32LoadMem8U, 0, 0, kExprDrop])
.exportFunc();
builder.addFunction('loadf', kSig_v_i)
- .addBody([kExprGetLocal, 0, kExprF32LoadMem, 0, 0, kExprDrop])
+ .addBody([kExprLocalGet, 0, kExprF32LoadMem, 0, 0, kExprDrop])
.exportFunc();
builder.addFunction('store', kSig_v_ii)
- .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprI32StoreMem, 0, 0])
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1, kExprI32StoreMem, 0, 0])
.exportFunc();
builder.addFunction('store8', kSig_v_ii)
- .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprI32StoreMem8, 0, 0])
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1, kExprI32StoreMem8, 0, 0])
+ .exportFunc();
+builder.addFunction('load128', kSig_v_i)
+ .addBody([kExprLocalGet, 0, kSimdPrefix, kExprS128LoadMem, 0, 0, kExprDrop])
+ .exportFunc();
+// SIMD is not exposed to JS, so use splat to construct a s128 value.
+builder.addFunction('store128', kSig_v_ii)
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1, kSimdPrefix, kExprI32x4Splat, kSimdPrefix, kExprS128StoreMem, 0, 0])
.exportFunc();
var module = builder.instantiate();
@@ -35,3 +42,5 @@ module.exports.loadf(2);
module.exports.store8(4, 0xab);
module.exports.load(2);
module.exports.loadf(2);
+module.exports.store128(4, 0xbeef);
+module.exports.load128(2);
diff --git a/deps/v8/test/message/wasm-trace-memory.out b/deps/v8/test/message/wasm-trace-memory.out
index bc6b1b64ec..f41bc30fee 100644
--- a/deps/v8/test/message/wasm-trace-memory.out
+++ b/deps/v8/test/message/wasm-trace-memory.out
@@ -7,3 +7,5 @@ turbofan func: 2+0x3 load from 00000002 val: f32:68169720922112.00000
turbofan func: 4+0x5 store to 00000004 val: i8:171 / ab
turbofan func: 0+0x3 load from 00000002 val: i32:1454047232 / 56ab0000
turbofan func: 2+0x3 load from 00000002 val: f32:94008244174848.000000 / 56ab0000
+turbofan func: 6+0x7 store to 00000004 val: s128:48879 48879 48879 48879 / 0000beef 0000beef 0000beef 0000beef
+turbofan func: 5+0x3 load from 00000002 val: s128:-1091633152 -1091633152 -1091633152 -1091633152 / beef0000 beef0000 beef0000 beef0000
diff --git a/deps/v8/test/mjsunit/asm/regress-1013920.js b/deps/v8/test/mjsunit/asm/regress-1013920.js
new file mode 100644
index 0000000000..f7a2e57d1d
--- /dev/null
+++ b/deps/v8/test/mjsunit/asm/regress-1013920.js
@@ -0,0 +1,17 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function asm(stdlib, foreign, heap) {
+ "use asm";
+ var heap32 = new stdlib.Uint32Array(heap);
+ function f() { return 0; }
+ return {f : f};
+}
+
+var heap = Reflect.construct(
+ SharedArrayBuffer,
+ [1024 * 1024],
+ ArrayBuffer.prototype.constructor);
+
+asm(this, {}, heap);
diff --git a/deps/v8/test/mjsunit/bit-not.js b/deps/v8/test/mjsunit/bit-not.js
index d0316a71ea..4ef1b7c2e3 100644
--- a/deps/v8/test/mjsunit/bit-not.js
+++ b/deps/v8/test/mjsunit/bit-not.js
@@ -62,16 +62,3 @@ testBitNot(0x80000000 - 0.12345, "float6");
testBitNot("0", "string0");
testBitNot("2.3", "string2.3");
testBitNot("-9.4", "string-9.4");
-
-
-// Try to test that we can deal with allocation failures in
-// the fast path and just use the slow path instead.
-function TryToGC() {
- var x = 0x40000000;
- // Put in an eval to foil Crankshaft.
- eval("");
- for (var i = 0; i < 1000000; i++) {
- assertEquals(~0x40000000, ~x);
- }
-}
-TryToGC();
diff --git a/deps/v8/test/mjsunit/code-coverage-block-async.js b/deps/v8/test/mjsunit/code-coverage-block-async.js
new file mode 100644
index 0000000000..111be213b6
--- /dev/null
+++ b/deps/v8/test/mjsunit/code-coverage-block-async.js
@@ -0,0 +1,122 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --no-always-opt --no-stress-flush-bytecode
+// Flags: --no-stress-incremental-marking
+// Files: test/mjsunit/code-coverage-utils.js
+
+%DebugToggleBlockCoverage(true);
+
+TestCoverage(
+"await expressions",
+`
+async function f() { // 0000
+ await 42; // 0050
+ await 42; // 0100
+}; // 0150
+f(); // 0200
+%PerformMicrotaskCheckpoint(); // 0250
+`,
+[{"start":0,"end":299,"count":1},
+ {"start":0,"end":151,"count":1}]
+);
+
+TestCoverage(
+"for-await-of statements",
+`
+!async function() { // 0000
+ for await (var x of [0,1,2,3]) { // 0050
+ nop(); // 0100
+ } // 0150
+}(); // 0200
+%PerformMicrotaskCheckpoint(); // 0250
+`,
+[{"start":0,"end":299,"count":1},
+ {"start":1,"end":201,"count":1},
+ {"start":83,"end":153,"count":4}]
+);
+
+TestCoverage(
+"https://crbug.com/981313",
+`
+class Foo { // 0000
+ async timeout() { // 0000
+ return new Promise( // 0100
+ (r) => setTimeout(r, 10)); // 0000
+ } // 0200
+} // 0000
+new Foo().timeout(); // 0300
+`,
+[ {"start":0, "end":349, "count":1},
+ {"start":52, "end":203, "count":1},
+ {"start":158,"end":182, "count":1}]);
+
+TestCoverage(
+ "test async generator coverage",
+`
+class Foo { // 0000
+ async *timeout() { // 0000
+ return new Promise( // 0100
+ (r) => setTimeout(r, 10)); // 0000
+ } // 0200
+} // 0000
+new Foo().timeout(); // 0300
+`,
+ [ {"start":0, "end":349, "count":1},
+ {"start":52, "end":203, "count":1},
+ {"start":158,"end":182, "count":0}]);
+
+TestCoverage(
+ "test async generator coverage with next call",
+`
+class Foo { // 0000
+ async *timeout() { // 0000
+ return new Promise( // 0100
+ (r) => setTimeout(r, 10)); // 0000
+ } // 0200
+} // 0000
+new Foo().timeout().next(); // 0300
+`,
+ [ {"start":0, "end":349, "count":1},
+ {"start":52, "end":203, "count":1},
+ {"start":158,"end":182, "count":1}]);
+
+TestCoverage(
+ "test two consecutive returns",
+`
+class Foo { // 0000
+ timeout() { // 0000
+ return new Promise( // 0100
+ (r) => setTimeout(r, 10)); // 0000
+ return new Promise( // 0200
+ (r) => setTimeout(r, 10)); // 0000
+ } // 0300
+} // 0000
+new Foo().timeout(); // 0400
+`,
+[ {"start":0,"end":449,"count":1},
+ {"start":52,"end":303,"count":1},
+ {"start":184,"end":302,"count":0},
+ {"start":158,"end":182,"count":1}] );
+
+
+TestCoverage(
+ "test async generator with two consecutive returns",
+`
+class Foo { // 0000
+ async *timeout() { // 0000
+ return new Promise( // 0100
+ (r) => setTimeout(r, 10)); // 0000
+ return new Promise( // 0200
+ (r) => setTimeout(r, 10)); // 0000
+ } // 0300
+} // 0000
+new Foo().timeout().next(); // 0400
+`,
+[ {"start":0,"end":449,"count":1},
+ {"start":52,"end":303,"count":1},
+ {"start":184,"end":302,"count":0},
+ {"start":158,"end":182,"count":1}] );
+
+%DebugToggleBlockCoverage(false);
diff --git a/deps/v8/test/mjsunit/code-coverage-block.js b/deps/v8/test/mjsunit/code-coverage-block.js
index a7bad5bf11..6cf81bcce0 100644
--- a/deps/v8/test/mjsunit/code-coverage-block.js
+++ b/deps/v8/test/mjsunit/code-coverage-block.js
@@ -206,21 +206,6 @@ TestCoverage(
);
TestCoverage(
-"for-await-of statements",
-`
-!async function() { // 0000
- for await (var x of [0,1,2,3]) { // 0050
- nop(); // 0100
- } // 0150
-}(); // 0200
-%PerformMicrotaskCheckpoint(); // 0250
-`,
-[{"start":0,"end":299,"count":1},
- {"start":1,"end":201,"count":1},
- {"start":83,"end":153,"count":4}]
-);
-
-TestCoverage(
"while and do-while statements",
`
function g() {} // 0000
@@ -659,20 +644,6 @@ try { // 0200
);
TestCoverage(
-"await expressions",
-`
-async function f() { // 0000
- await 42; // 0050
- await 42; // 0100
-}; // 0150
-f(); // 0200
-%PerformMicrotaskCheckpoint(); // 0250
-`,
-[{"start":0,"end":299,"count":1},
- {"start":0,"end":151,"count":1}]
-);
-
-TestCoverage(
"LogicalOrExpression assignment",
`
const a = true || 99 // 0000
@@ -1097,4 +1068,19 @@ f(43); // 0450
{"start":204,"end":226,"count":1}]
);
+TestCoverage(
+"https://crbug.com/v8/9857",
+`function foo() {}`,
+[{"start":0,"end":17,"count":1},
+ {"start":0,"end":17,"count":0}]
+);
+
+TestCoverage(
+"https://crbug.com/v8/9857",
+`function foo() {function bar() {}}; foo()`,
+[{"start":0,"end":41,"count":1},
+ {"start":0,"end":34,"count":1},
+ {"start":16,"end":33,"count":0}]
+);
+
%DebugToggleBlockCoverage(false);
diff --git a/deps/v8/test/mjsunit/compiler/concurrent-inlining-1.js b/deps/v8/test/mjsunit/compiler/concurrent-inlining-1.js
new file mode 100644
index 0000000000..9cbdbc863f
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/concurrent-inlining-1.js
@@ -0,0 +1,26 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// This test ensures that we manage to serialize the global.gaga function for
+// compilation and therefore are able to inline it. Since the call feedback in
+// bar is megamorphic, this relies on recording the correct accumulator hint for
+// the named load of obj.gaga while serializing bar (in turn while serializing
+// foo).
+
+const global = this;
+global.gaga = function gaga() { return true; };
+
+function bar(obj) { return obj.gaga(); };
+function foo() { return %TurbofanStaticAssert(bar(global)); }
+
+%PrepareFunctionForOptimization(foo);
+%PrepareFunctionForOptimization(bar);
+%PrepareFunctionForOptimization(global.gaga);
+
+bar({gaga() {}});
+foo();
+%OptimizeFunctionOnNextCall(foo);
+foo();
diff --git a/deps/v8/test/mjsunit/compiler/concurrent-inlining-2.js b/deps/v8/test/mjsunit/compiler/concurrent-inlining-2.js
new file mode 100644
index 0000000000..e3e63d195c
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/concurrent-inlining-2.js
@@ -0,0 +1,26 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// This test ensures that we manage to serialize the global.gaga function for
+// compilation and therefore are able to inline it. Since the call feedback in
+// bar is megamorphic, this relies on recording the correct accumulator hint for
+// the named load of obj.gaga while serializing bar (in turn while serializing
+// foo).
+
+const global = this;
+global.gaga = function gaga() { return true; };
+
+function bar(obj) { return obj.gaga(); }
+function foo(obj) { obj.gaga; %TurbofanStaticAssert(bar(obj)); }
+
+%PrepareFunctionForOptimization(foo);
+%PrepareFunctionForOptimization(bar);
+%PrepareFunctionForOptimization(global.gaga);
+
+bar({gaga() {}});
+foo(global);
+%OptimizeFunctionOnNextCall(foo);
+foo(global);
diff --git a/deps/v8/test/mjsunit/compiler/promise-constructor.js b/deps/v8/test/mjsunit/compiler/promise-constructor.js
index ab2d720755..27deeda9d5 100644
--- a/deps/v8/test/mjsunit/compiler/promise-constructor.js
+++ b/deps/v8/test/mjsunit/compiler/promise-constructor.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --experimental-inline-promise-constructor
+// Flags: --allow-natives-syntax
// We have to patch mjsunit because normal assertion failures just throw
// exceptions which are swallowed in a then clause.
diff --git a/deps/v8/test/mjsunit/compiler/regress-9017.js b/deps/v8/test/mjsunit/compiler/regress-9017.js
index 7cbd4e0178..c484e177c6 100644
--- a/deps/v8/test/mjsunit/compiler/regress-9017.js
+++ b/deps/v8/test/mjsunit/compiler/regress-9017.js
@@ -3,6 +3,8 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax --noturbo-inlining --noturbo-verify-allocation
+// This test invokes optimization manually, no need for stress modes:
+// Flags: --nostress-opt --noalways-opt
// Ensure that very large stack frames can be used successfully.
// The flag --noturbo-verify-allocation is to make this run a little faster; it
diff --git a/deps/v8/test/mjsunit/d8/d8-performance-measure-memory.js b/deps/v8/test/mjsunit/d8/d8-performance-measure-memory.js
new file mode 100644
index 0000000000..baf6479fff
--- /dev/null
+++ b/deps/v8/test/mjsunit/d8/d8-performance-measure-memory.js
@@ -0,0 +1,47 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Test the performance.measureMemory() function of d8. This test only makes
+// sense with d8.
+
+load('test/mjsunit/mjsunit.js');
+
+function assertLessThanOrEqual(a, b) {
+ assertTrue(a <= b, `Expected ${a} <= ${b}`);
+}
+
+function checkMeasureMemoryResult(result) {
+ assertTrue('total' in result);
+ assertTrue('jsMemoryEstimate' in result.total);
+ assertTrue('jsMemoryRange' in result.total);
+ assertEquals('number', typeof result.total.jsMemoryEstimate);
+ assertEquals(2, result.total.jsMemoryRange.length);
+ assertEquals('number', typeof result.total.jsMemoryRange[0]);
+ assertEquals('number', typeof result.total.jsMemoryRange[1]);
+ assertLessThanOrEqual(result.total.jsMemoryRange[0],
+ result.total.jsMemoryRange[1]);
+ assertLessThanOrEqual(result.total.jsMemoryRange[0],
+ result.total.jsMemoryEstimate);
+ assertLessThanOrEqual(result.total.jsMemoryEstimate,
+ result.total.jsMemoryRange[1]);
+}
+
+if (this.performance && performance.measureMemory) {
+ assertPromiseResult((async () => {
+ let result = await performance.measureMemory();
+ checkMeasureMemoryResult(result);
+ })());
+
+ assertPromiseResult((async () => {
+ let result = await performance.measureMemory({detailed: false});
+ checkMeasureMemoryResult(result);
+ })());
+
+ assertPromiseResult((async () => {
+ let result = await performance.measureMemory({detailed: true});
+ // TODO(ulan): Also check the detailed results once measureMemory
+ // supports them.
+ checkMeasureMemoryResult(result);
+ })());
+}
diff --git a/deps/v8/test/mjsunit/es6/array-iterator-detached.js b/deps/v8/test/mjsunit/es6/array-iterator-detached.js
index f385039b4d..4e4f664373 100644
--- a/deps/v8/test/mjsunit/es6/array-iterator-detached.js
+++ b/deps/v8/test/mjsunit/es6/array-iterator-detached.js
@@ -17,7 +17,7 @@ function Baseline() {
%NeverOptimizeFunction(Baseline);
assertThrows(Baseline, TypeError,
- "Cannot perform Array Iterator.prototype.next on a neutered ArrayBuffer");
+ "Cannot perform Array Iterator.prototype.next on a detached ArrayBuffer");
function Turbo(count = 10000) {
let array = Array(10000);
@@ -45,4 +45,4 @@ Turbo(10);
%OptimizeFunctionOnNextCall(Turbo);
assertThrows(Turbo, TypeError,
- "Cannot perform Array Iterator.prototype.next on a neutered ArrayBuffer");
+ "Cannot perform Array Iterator.prototype.next on a detached ArrayBuffer");
diff --git a/deps/v8/test/mjsunit/es6/block-conflicts-sloppy.js b/deps/v8/test/mjsunit/es6/block-conflicts-sloppy.js
index b2ebfce6c9..8d896b93a2 100644
--- a/deps/v8/test/mjsunit/es6/block-conflicts-sloppy.js
+++ b/deps/v8/test/mjsunit/es6/block-conflicts-sloppy.js
@@ -3,6 +3,8 @@
// found in the LICENSE file.
// Test for conflicting variable bindings.
+// Stress-testing this test is very slow and provides no useful coverage.
+// Flags: --nostress-opt --noalways-opt
function CheckException(e) {
var string = e.toString();
diff --git a/deps/v8/test/mjsunit/es6/block-const-assign.js b/deps/v8/test/mjsunit/es6/block-const-assign.js
index 541dc0d97b..5700d69d04 100644
--- a/deps/v8/test/mjsunit/es6/block-const-assign.js
+++ b/deps/v8/test/mjsunit/es6/block-const-assign.js
@@ -29,6 +29,9 @@
// when using an immutable binding in an assigment or with
// prefix/postfix decrement/increment operators.
+// Optimization stress is not useful for early syntax errors.
+// Flags: --nostress-opt --noalways-opt
+
"use strict";
const decls = [
@@ -135,7 +138,8 @@ let usecontexts = [
function Test(program, error) {
program = "'use strict'; " + program;
try {
- print(program, " // throw " + error.name);
+ // If you need to debug this test, enable the following line:
+ // print(program, " // throw " + error.name);
eval(program);
} catch (e) {
assertInstanceof(e, error);
diff --git a/deps/v8/test/mjsunit/es6/iterator-eager-deopt.js b/deps/v8/test/mjsunit/es6/iterator-eager-deopt.js
new file mode 100644
index 0000000000..fe004c8c6d
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/iterator-eager-deopt.js
@@ -0,0 +1,69 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// The GetIterator bytecode is used to implement a part of the iterator
+// protocol (https://tc39.es/ecma262/#sec-getiterator). Here, the
+// bytecode performs multiple operations including some that have side-effects
+// and may deoptimize eagerly or lazily.
+// This test ensures the eager deoptimization is handled correctly.
+
+// Flags: --allow-natives-syntax --no-always-opt
+
+var getIteratorCount = 0;
+var iteratorCount = 0;
+var iteratorAfterEagerDeoptCount = 0;
+
+function foo(obj) {
+ // The following for-of loop uses the iterator protocol to iterate
+ // over the 'obj'.
+ // The GetIterator bytecode invovlves 3 steps:
+ // 1. method = GetMethod(obj, @@iterator)
+ // 2. iterator = Call(method, obj)
+ // 3. if(!IsJSReceiver(iterator)) throw SymbolIteratorInvalid.
+ for(var x of obj){}
+}
+
+// This iterator gets inlined when the 'foo' function is JIT compiled for
+// the first time.
+var iterator = function() {
+ iteratorCount++;
+ return {
+ next: function() {
+ return { done: true };
+ }
+ }
+}
+
+var iteratorAfterEagerDeopt = function() {
+ iteratorAfterEagerDeoptCount++;
+ return {
+ next: function() {
+ return { done: true };
+ }
+ }
+}
+
+// Here, retrieval of function at @@iterator has side effect (increments the
+// 'getIteratorCount'). Changing the value of 'iterator' in the JIT compiled
+// 'foo' causes deoptimization after the count is incremented. Now the deopt
+// cannot resume at the beginning of the bytecode because it would end up in
+// incrementing the count again.
+let y = { get [Symbol.iterator] () {
+ getIteratorCount++;
+ return iterator;
+ }
+ };
+
+%PrepareFunctionForOptimization(foo);
+foo(y);
+foo(y);
+%OptimizeFunctionOnNextCall(foo);
+
+// Change the value of 'iterator' to trigger eager deoptimization of 'foo'.
+iterator = iteratorAfterEagerDeopt
+foo(y);
+assertUnoptimized(foo);
+assertEquals(getIteratorCount, 3);
+assertEquals(iteratorCount, 2);
+assertEquals(iteratorAfterEagerDeoptCount, 1);
diff --git a/deps/v8/test/mjsunit/es6/iterator-invalid-receiver-opt.js b/deps/v8/test/mjsunit/es6/iterator-invalid-receiver-opt.js
new file mode 100644
index 0000000000..fac416b5b5
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/iterator-invalid-receiver-opt.js
@@ -0,0 +1,51 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// The GetIterator bytecode is used to implement a part of the iterator
+// protocol (https://tc39.es/ecma262/#sec-getiterator).
+// Here, call to the @@iterator property returns invalid JS receiver.
+// This test ensures that the optimized version of the GetIterator bytecode
+// incorporates exception handling mechanism without deoptimizing.
+
+// Flags: --allow-natives-syntax --opt
+
+var iteratorCount = 0;
+var exceptionCount = 0;
+
+function foo(obj) {
+ // The following for-of loop uses the iterator protocol to iterate
+ // over the 'obj'.
+ // The GetIterator bytecode invovlves 3 steps:
+ // 1. method = GetMethod(obj, @@iterator)
+ // 2. iterator = Call(method, obj)
+ // 3. if(!IsJSReceiver(iterator)) throw SymbolIteratorInvalid.
+ try{
+ for(let a of obj){
+ assertUnreachable();
+ }
+ } catch(e){
+ exceptionCount++;
+ }
+}
+
+// This iterator retuns '3' which is not a valid JSReceiver
+var iterator = function() {
+ iteratorCount++;
+ return 3;
+}
+
+let y = {
+ get [Symbol.iterator]() {
+ return iterator;
+ }
+};
+
+%PrepareFunctionForOptimization(foo);
+foo(y);
+foo(y);
+%OptimizeFunctionOnNextCall(foo);
+foo(y);
+assertOptimized(foo);
+assertEquals(iteratorCount, 3);
+assertEquals(exceptionCount, 3);
diff --git a/deps/v8/test/mjsunit/es6/iterator-lazy-deopt.js b/deps/v8/test/mjsunit/es6/iterator-lazy-deopt.js
new file mode 100644
index 0000000000..f2b39a208d
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/iterator-lazy-deopt.js
@@ -0,0 +1,71 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// The GetIterator bytecode is used to implement a part of the iterator
+// protocol (https://tc39.es/ecma262/#sec-getiterator). Here, the
+// bytecode performs multiple operations including some that have side-effects
+// and may deoptimize eagerly or lazily.
+// This test ensures the lazy deoptimization is handled correctly.
+
+// Flags: --allow-natives-syntax --no-always-opt
+
+var triggerLazyDeopt = false
+var iteratorCount = 0;
+var iteratorAfterLazyDeoptCount = 0;
+var getIteratorCount = 0;
+
+function foo(obj) {
+ // The following for-of loop uses the iterator protocol to iterate
+ // over the 'obj'.
+ // The GetIterator bytecode invovlves 3 steps:
+ // 1. method = GetMethod(obj, @@iterator)
+ // 2. iterator = Call(method, obj)
+ // 3. if(!IsJSReceiver(iterator)) throw SymbolIteratorInvalid.
+ for(var x of obj){}
+}
+
+// This iterator gets inlined when the 'foo' function is JIT compiled for
+// the first time.
+var iterator = function() {
+ iteratorCount++;
+ return {
+ next: function() {
+ return { done: true };
+ }
+ }
+}
+
+iteratorAfterLazyDeopt = function() {
+ iteratorAfterLazyDeoptCount++;
+ return {
+ next: function() {
+ return { done: true };
+ }
+ }
+}
+// Here, retrieval of function at @@iterator has side effect (increments the
+// 'getIteratorCount').The lazy deoptimization is triggerred by setting the
+// 'triggerLazyDeopt' to true after the count is incremented. Now the deopt
+// cannot resume at the beginning of the bytecode because it would end up in
+// incrementing the count again.
+let y = { get [Symbol.iterator] () {
+ getIteratorCount++;
+ if(triggerLazyDeopt) {
+ %DeoptimizeFunction(foo);
+ iterator = iteratorAfterLazyDeopt
+ }
+ return iterator;
+ }
+ };
+
+%PrepareFunctionForOptimization(foo);
+foo(y);
+foo(y);
+%OptimizeFunctionOnNextCall(foo);
+triggerLazyDeopt = true;
+foo(y);
+assertUnoptimized(foo);
+assertEquals(getIteratorCount, 3);
+assertEquals(iteratorCount, 2);
+assertEquals(iteratorAfterLazyDeoptCount, 1);
diff --git a/deps/v8/test/mjsunit/es6/large-classes-properties.js b/deps/v8/test/mjsunit/es6/large-classes-properties.js
index fe3fb13b8f..c725d8376e 100644
--- a/deps/v8/test/mjsunit/es6/large-classes-properties.js
+++ b/deps/v8/test/mjsunit/es6/large-classes-properties.js
@@ -3,6 +3,8 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax
+// This gets very slow with stress flags, and triggers optimization anyway:
+// Flags: --nostress-opt --noalways-opt
(function testLargeClassesProperties(){
// This is to test for dictionary mode when there more than
diff --git a/deps/v8/test/mjsunit/global-proxy-globalThis.js b/deps/v8/test/mjsunit/global-proxy-globalThis.js
new file mode 100644
index 0000000000..3b53d74792
--- /dev/null
+++ b/deps/v8/test/mjsunit/global-proxy-globalThis.js
@@ -0,0 +1,91 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+{
+ const realm = Realm.createAllowCrossRealmAccess();
+ const foo = Realm.eval(realm, "function foo() { return globalThis.foo }; foo");
+
+ %PrepareFunctionForOptimization(foo);
+ assertSame(foo(), foo);
+ %OptimizeFunctionOnNextCall(foo);
+ assertSame(foo(), foo);
+}
+
+// detachGlobal, old map
+{
+ const realm = Realm.createAllowCrossRealmAccess();
+ const foo = Realm.eval(realm, "function foo() { return globalThis.foo }; foo");
+
+ %PrepareFunctionForOptimization(foo);
+ assertSame(foo(), foo);
+ Realm.detachGlobal(realm);
+ %OptimizeFunctionOnNextCall(foo);
+ assertThrows(foo);
+}
+
+// navigate, old map
+{
+ const realm = Realm.createAllowCrossRealmAccess();
+ const foo = Realm.eval(realm, "function foo() { return globalThis.foo }; foo");
+
+ %PrepareFunctionForOptimization(foo);
+ assertSame(foo(), foo);
+ Realm.navigate(realm);
+ %OptimizeFunctionOnNextCall(foo);
+ assertThrows(foo);
+}
+
+// detachGlobal, new map
+{
+ const realm = Realm.createAllowCrossRealmAccess();
+ const foo = Realm.eval(realm, "function foo() { return globalThis.foo }; foo");
+
+ assertSame(foo(), foo);
+ Realm.detachGlobal(realm);
+ %PrepareFunctionForOptimization(foo);
+ assertThrows(foo);
+ %OptimizeFunctionOnNextCall(foo);
+ assertThrows(foo);
+}
+
+// navigate, new map
+{
+ const realm = Realm.createAllowCrossRealmAccess();
+ const foo = Realm.eval(realm, "function foo() { return globalThis.foo }; foo");
+
+ assertSame(foo(), foo);
+ Realm.navigate(realm);
+ %PrepareFunctionForOptimization(foo);
+ assertThrows(foo);
+ %OptimizeFunctionOnNextCall(foo);
+ assertThrows(foo);
+}
+
+// detachGlobal, old and new map
+{
+ const realm = Realm.createAllowCrossRealmAccess();
+ const foo = Realm.eval(realm, "function foo() { return globalThis.foo }; foo");
+
+ %PrepareFunctionForOptimization(foo);
+ assertSame(foo(), foo);
+ Realm.detachGlobal(realm);
+ assertThrows(foo);
+ %OptimizeFunctionOnNextCall(foo);
+ assertThrows(foo);
+}
+
+// navigate, old and new map
+{
+ const realm = Realm.createAllowCrossRealmAccess();
+ const foo = Realm.eval(realm, "function foo() { return globalThis.foo }; foo");
+
+ %PrepareFunctionForOptimization(foo);
+ assertSame(foo(), foo);
+ Realm.navigate(realm);
+ assertThrows(foo);
+ %OptimizeFunctionOnNextCall(foo);
+ assertThrows(foo);
+}
diff --git a/deps/v8/test/mjsunit/global-proxy-this.js b/deps/v8/test/mjsunit/global-proxy-this.js
new file mode 100644
index 0000000000..32e7868650
--- /dev/null
+++ b/deps/v8/test/mjsunit/global-proxy-this.js
@@ -0,0 +1,91 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+{
+ const realm = Realm.createAllowCrossRealmAccess();
+ const foo = Realm.eval(realm, "function foo() { return this.foo }; foo");
+
+ %PrepareFunctionForOptimization(foo);
+ assertSame(foo(), foo);
+ %OptimizeFunctionOnNextCall(foo);
+ assertSame(foo(), foo);
+}
+
+// detachGlobal, old map
+{
+ const realm = Realm.createAllowCrossRealmAccess();
+ const foo = Realm.eval(realm, "function foo() { return this.foo }; foo");
+
+ %PrepareFunctionForOptimization(foo);
+ assertSame(foo(), foo);
+ Realm.detachGlobal(realm);
+ %OptimizeFunctionOnNextCall(foo);
+ assertThrows(foo);
+}
+
+// navigate, old map
+{
+ const realm = Realm.createAllowCrossRealmAccess();
+ const foo = Realm.eval(realm, "function foo() { return this.foo }; foo");
+
+ %PrepareFunctionForOptimization(foo);
+ assertSame(foo(), foo);
+ Realm.navigate(realm);
+ %OptimizeFunctionOnNextCall(foo);
+ assertThrows(foo);
+}
+
+// detachGlobal, new map
+{
+ const realm = Realm.createAllowCrossRealmAccess();
+ const foo = Realm.eval(realm, "function foo() { return this.foo }; foo");
+
+ assertSame(foo(), foo);
+ Realm.detachGlobal(realm);
+ %PrepareFunctionForOptimization(foo);
+ assertThrows(foo);
+ %OptimizeFunctionOnNextCall(foo);
+ assertThrows(foo);
+}
+
+// navigate, new map
+{
+ const realm = Realm.createAllowCrossRealmAccess();
+ const foo = Realm.eval(realm, "function foo() { return this.foo }; foo");
+
+ assertSame(foo(), foo);
+ Realm.navigate(realm);
+ %PrepareFunctionForOptimization(foo);
+ assertThrows(foo);
+ %OptimizeFunctionOnNextCall(foo);
+ assertThrows(foo);
+}
+
+// detachGlobal, old and new map
+{
+ const realm = Realm.createAllowCrossRealmAccess();
+ const foo = Realm.eval(realm, "function foo() { return this.foo }; foo");
+
+ %PrepareFunctionForOptimization(foo);
+ assertSame(foo(), foo);
+ Realm.detachGlobal(realm);
+ assertThrows(foo);
+ %OptimizeFunctionOnNextCall(foo);
+ assertThrows(foo);
+}
+
+// navigate, old and new map
+{
+ const realm = Realm.createAllowCrossRealmAccess();
+ const foo = Realm.eval(realm, "function foo() { return this.foo }; foo");
+
+ %PrepareFunctionForOptimization(foo);
+ assertSame(foo(), foo);
+ Realm.navigate(realm);
+ assertThrows(foo);
+ %OptimizeFunctionOnNextCall(foo);
+ assertThrows(foo);
+}
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-15-top-level-await.mjs b/deps/v8/test/mjsunit/harmony/modules-import-15-top-level-await.mjs
new file mode 100644
index 0000000000..1feb3dae27
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-import-15-top-level-await.mjs
@@ -0,0 +1,58 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-top-level-await --allow-natives-syntax
+// Flags: --harmony-dynamic-import
+
+var ran = false;
+
+async function test1() {
+ try {
+ let x = await import('modules-skip-8.mjs');
+ %AbortJS('failure: should be unreachable');
+ } catch(e) {
+ assertEquals('x is not defined', e.message);
+ ran = true;
+ }
+}
+
+test1();
+%PerformMicrotaskCheckpoint();
+assertTrue(ran);
+
+ran = false;
+
+async function test2() {
+ try {
+ let x = await import('modules-skip-9.mjs');
+ %AbortJS('failure: should be unreachable');
+ } catch(e) {
+ assertInstanceof(e, SyntaxError);
+ assertEquals(
+ "The requested module 'modules-skip-empty.mjs' does not provide an " +
+ "export named 'default'",
+ e.message);
+ ran = true;
+ }
+}
+
+test2();
+%PerformMicrotaskCheckpoint();
+assertTrue(ran);
+
+ran = false;
+
+async function test3() {
+ try {
+ let x = await import('nonexistent-file.mjs');
+ %AbortJS('failure: should be unreachable');
+ } catch(e) {
+ assertTrue(e.startsWith('Error reading'));
+ ran = true;
+ }
+}
+
+test3();
+%PerformMicrotaskCheckpoint();
+assertTrue(ran);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-15.mjs b/deps/v8/test/mjsunit/harmony/modules-import-15.mjs
index b4febd5787..8b313d6017 100644
--- a/deps/v8/test/mjsunit/harmony/modules-import-15.mjs
+++ b/deps/v8/test/mjsunit/harmony/modules-import-15.mjs
@@ -3,6 +3,9 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax --harmony-dynamic-import
+//
+// Note: This test fails with top level await due to test1, which tries to
+// import a module using top level await and expects it to fail.
var ran = false;
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-rqstd-order-top-level-await-1.mjs b/deps/v8/test/mjsunit/harmony/modules-import-rqstd-order-top-level-await-1.mjs
new file mode 100644
index 0000000000..9c9dfc385b
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-import-rqstd-order-top-level-await-1.mjs
@@ -0,0 +1,12 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-top-level-await
+
+assertEquals(globalThis.test262, ['1', '2', '3', '4']);
+
+import 'modules-skip-1-rqstd-order.mjs';
+import 'modules-skip-2-rqstd-order.mjs';
+import 'modules-skip-3-rqstd-order.mjs';
+import 'modules-skip-4-rqstd-order.mjs';
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-rqstd-order-top-level-await-2.mjs b/deps/v8/test/mjsunit/harmony/modules-import-rqstd-order-top-level-await-2.mjs
new file mode 100644
index 0000000000..374660ec79
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-import-rqstd-order-top-level-await-2.mjs
@@ -0,0 +1,15 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-top-level-await
+
+assertEquals(globalThis.test262, [
+ '1_dir_a', '2_dir_a', '3_dir_a', '4_dir_a',
+ '1', '2', '3', '4',
+ '1_dir_b', '2_dir_b', '3_dir_b', '4_dir_b']);
+
+import 'modules-skip-1-rqstd-order-top-level-await.mjs';
+import 'modules-skip-2-rqstd-order-top-level-await.mjs';
+import 'modules-skip-3-rqstd-order-top-level-await.mjs';
+import 'modules-skip-4-rqstd-order-top-level-await.mjs';
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-rqstd-order-top-level-await-3.mjs b/deps/v8/test/mjsunit/harmony/modules-import-rqstd-order-top-level-await-3.mjs
new file mode 100644
index 0000000000..f145a75d5b
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-import-rqstd-order-top-level-await-3.mjs
@@ -0,0 +1,13 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-top-level-await
+
+assertEquals(globalThis.test262, [
+ '1', '2_dir_a', '3', '4_dir_a', '2', '4', '2_dir_b', '4_dir_b']);
+
+import 'modules-skip-1-rqstd-order.mjs';
+import 'modules-skip-2-rqstd-order-top-level-await.mjs';
+import 'modules-skip-3-rqstd-order.mjs';
+import 'modules-skip-4-rqstd-order-top-level-await.mjs';
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-rqstd-order-top-level-await-4.mjs b/deps/v8/test/mjsunit/harmony/modules-import-rqstd-order-top-level-await-4.mjs
new file mode 100644
index 0000000000..57e6e54310
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-import-rqstd-order-top-level-await-4.mjs
@@ -0,0 +1,17 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-top-level-await
+
+assertEquals(globalThis.test262, [
+ '1_dir_a', '2_dir_a', '3_dir_a', '4_dir_a',
+ '1', '2', '3', '4',
+ '1_dir_b', '2_dir_b', '3_dir_b', '4_dir_b',
+ '1_ind', '2_ind', '3_ind', '4_ind',
+]);
+
+import 'modules-skip-1-rqstd-order-indirect-top-level-await.mjs';
+import 'modules-skip-2-rqstd-order-indirect-top-level-await.mjs';
+import 'modules-skip-3-rqstd-order-indirect-top-level-await.mjs';
+import 'modules-skip-4-rqstd-order-indirect-top-level-await.mjs';
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-rqstd-order-top-level-await-5.mjs b/deps/v8/test/mjsunit/harmony/modules-import-rqstd-order-top-level-await-5.mjs
new file mode 100644
index 0000000000..e018705c33
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-import-rqstd-order-top-level-await-5.mjs
@@ -0,0 +1,16 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-top-level-await
+
+assertEquals(globalThis.test262, [
+ '1', '2_dir_a', '3_dir_a', '4',
+ '2', '3', '2_dir_b', '3_dir_b',
+ '2_ind',
+]);
+
+import 'modules-skip-1-rqstd-order.mjs';
+import 'modules-skip-2-rqstd-order-indirect-top-level-await.mjs';
+import 'modules-skip-3-rqstd-order-top-level-await.mjs';
+import 'modules-skip-4-rqstd-order.mjs';
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-rqstd-order-top-level-await-6.mjs b/deps/v8/test/mjsunit/harmony/modules-import-rqstd-order-top-level-await-6.mjs
new file mode 100644
index 0000000000..8d3ed1f255
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-import-rqstd-order-top-level-await-6.mjs
@@ -0,0 +1,16 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-top-level-await
+
+assertEquals(globalThis.test262, [
+ '1_dir_a', '2_dir_a', '3', '4_dir_a',
+ '1', '2', '4', '1_dir_b', '2_dir_b',
+ '4_dir_b', '2_ind',
+]);
+
+import 'modules-skip-1-rqstd-order-top-level-await.mjs';
+import 'modules-skip-2-rqstd-order-indirect-top-level-await.mjs';
+import 'modules-skip-3-rqstd-order.mjs';
+import 'modules-skip-4-rqstd-order-top-level-await.mjs';
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-rqstd-order-top-level-await-7.mjs b/deps/v8/test/mjsunit/harmony/modules-import-rqstd-order-top-level-await-7.mjs
new file mode 100644
index 0000000000..64bbeb1eb4
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-import-rqstd-order-top-level-await-7.mjs
@@ -0,0 +1,12 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-top-level-await
+
+assertEquals(globalThis.test262, [
+ '1_udir_a', '1_udir_b', '2',
+]);
+
+import 'modules-skip-1-rqstd-order-unreached-top-level-await.mjs';
+import 'modules-skip-2-rqstd-order.mjs';
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-rqstd-order-top-level-await-8.mjs b/deps/v8/test/mjsunit/harmony/modules-import-rqstd-order-top-level-await-8.mjs
new file mode 100644
index 0000000000..0d9fe3e381
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-import-rqstd-order-top-level-await-8.mjs
@@ -0,0 +1,12 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-top-level-await
+
+assertEquals(globalThis.test262, [
+ '1_udir_a', '1_udir_b', '2', '1_uind'
+]);
+
+import 'modules-skip-1-rqstd-order-indirect-unreached-top-level-await.mjs';
+import 'modules-skip-2-rqstd-order.mjs';
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-top-level-await-1.mjs b/deps/v8/test/mjsunit/harmony/modules-import-top-level-await-1.mjs
new file mode 100644
index 0000000000..c8efa5d94e
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-import-top-level-await-1.mjs
@@ -0,0 +1,14 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-dynamic-import --harmony-top-level-await
+
+let promise_resolved = false;
+let m = import('modules-skip-1.mjs');
+m.then(
+ () => { promise_resolved = true; },
+ () => { %AbortJS('Promise rejected'); });
+await m;
+
+assertEquals(promise_resolved, true);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-top-level-await-2.mjs b/deps/v8/test/mjsunit/harmony/modules-import-top-level-await-2.mjs
new file mode 100644
index 0000000000..0f74aa7ca9
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-import-top-level-await-2.mjs
@@ -0,0 +1,10 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-dynamic-import --harmony-top-level-await
+
+let m = import('modules-skip-1.mjs');
+let m_namespace = await m;
+
+assertEquals(42, m_namespace.life());
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-top-level-await-3.mjs b/deps/v8/test/mjsunit/harmony/modules-import-top-level-await-3.mjs
new file mode 100644
index 0000000000..44c8145127
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-import-top-level-await-3.mjs
@@ -0,0 +1,14 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-dynamic-import --harmony-top-level-await
+
+let m1 = import('modules-skip-1.mjs');
+let m1_namespace = await m1;
+
+let m2 = import('modules-skip-3.mjs');
+let m2_namespace = await m2;
+
+assertEquals(42, m1_namespace.life());
+assertEquals('42', m2_namespace.stringlife);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-top-level-await-4.mjs b/deps/v8/test/mjsunit/harmony/modules-import-top-level-await-4.mjs
new file mode 100644
index 0000000000..29730fa4a5
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-import-top-level-await-4.mjs
@@ -0,0 +1,9 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-dynamic-import --harmony-top-level-await
+
+import * as m from 'modules-skip-1-top-level-await.mjs'
+
+assertEquals(42, m.life());
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-top-level-await-5.mjs b/deps/v8/test/mjsunit/harmony/modules-import-top-level-await-5.mjs
new file mode 100644
index 0000000000..f1e7813346
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-import-top-level-await-5.mjs
@@ -0,0 +1,10 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-dynamic-import --harmony-top-level-await
+
+import * as m from 'modules-skip-2-top-level-await.mjs'
+
+assertEquals(42, m.life());
+assertEquals('42', m.stringlife);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-top-level-await-6.mjs b/deps/v8/test/mjsunit/harmony/modules-import-top-level-await-6.mjs
new file mode 100644
index 0000000000..f852895e4b
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-import-top-level-await-6.mjs
@@ -0,0 +1,10 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-dynamic-import --harmony-top-level-await
+
+import * as m from 'modules-skip-3-top-level-await.mjs'
+
+assertEquals(42, m.life());
+assertEquals('42', m.stringlife);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-top-level-await-7.mjs b/deps/v8/test/mjsunit/harmony/modules-import-top-level-await-7.mjs
new file mode 100644
index 0000000000..26f1440774
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-import-top-level-await-7.mjs
@@ -0,0 +1,9 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-top-level-await --harmony-dynamic-import
+
+import * as m from 'modules-skip-6-top-level-await.mjs';
+
+assertEquals(m.m1.life(), m.m2.life());
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-top-level-await-8.mjs b/deps/v8/test/mjsunit/harmony/modules-import-top-level-await-8.mjs
new file mode 100644
index 0000000000..aa80c73edd
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-import-top-level-await-8.mjs
@@ -0,0 +1,9 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-dynamic-import --harmony-top-level-await
+
+import * as m from 'modules-skip-7-top-level-await.mjs'
+
+assertEquals(42, m.life);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-top-level-await-cycle.mjs b/deps/v8/test/mjsunit/harmony/modules-import-top-level-await-cycle.mjs
new file mode 100644
index 0000000000..0ec478e59b
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-import-top-level-await-cycle.mjs
@@ -0,0 +1,16 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-top-level-await --harmony-dynamic-import
+
+import * as m1 from 'modules-skip-1-top-level-await-cycle.mjs'
+import * as m2 from 'modules-skip-2-top-level-await-cycle.mjs'
+import * as m3 from 'modules-skip-3-top-level-await-cycle.mjs'
+
+assertSame(m1.m1.m.m.life, m1.m2.m.m.life);
+assertSame(m1.m1.m.m.life, m2.m.m.life);
+assertSame(m1.m1.m.m.life, m3.m.m.life);
+
+let m4 = await import('modules-skip-1.mjs');
+assertSame(m1.m1.m.m.life, m4.life);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-top-level-await-exception-1.mjs b/deps/v8/test/mjsunit/harmony/modules-import-top-level-await-exception-1.mjs
new file mode 100644
index 0000000000..1e22f15758
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-import-top-level-await-exception-1.mjs
@@ -0,0 +1,18 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-dynamic-import --harmony-top-level-await
+
+let ran = false;
+let m = import('modules-skip-2.mjs');
+await m.then(
+ () => {
+ assertUnreachable();
+ },
+ (e) => {
+ assertEquals(e.message, '42 is not the answer');
+ ran = true;
+ });
+
+assertEquals(ran, true);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-top-level-await-exception-2.mjs b/deps/v8/test/mjsunit/harmony/modules-import-top-level-await-exception-2.mjs
new file mode 100644
index 0000000000..476cfbee15
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-import-top-level-await-exception-2.mjs
@@ -0,0 +1,16 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-dynamic-import --harmony-top-level-await
+
+let ran = false;
+try {
+ await import('modules-skip-2.mjs');
+ assertUnreachable();
+} catch (e) {
+ assertEquals(e.message, '42 is not the answer');
+ ran = true;
+}
+
+assertEquals(ran, true);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-top-level-await-exception-3.mjs b/deps/v8/test/mjsunit/harmony/modules-import-top-level-await-exception-3.mjs
new file mode 100644
index 0000000000..20de7ef06e
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-import-top-level-await-exception-3.mjs
@@ -0,0 +1,16 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-dynamic-import --harmony-top-level-await
+
+let ran = false;
+try {
+ await import('modules-skip-4-top-level-await.mjs');
+ assertUnreachable();
+} catch (e) {
+ assertEquals(e.message, '42 is not the answer');
+ ran = true;
+}
+
+assertEquals(ran, true);
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-1-rqstd-order-indirect-top-level-await.mjs b/deps/v8/test/mjsunit/harmony/modules-skip-1-rqstd-order-indirect-top-level-await.mjs
new file mode 100644
index 0000000000..cbd357c86b
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-1-rqstd-order-indirect-top-level-await.mjs
@@ -0,0 +1,6 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import 'modules-skip-1-rqstd-order-top-level-await.mjs'
+Function('return this;')().test262.push('1_ind');
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-1-rqstd-order-indirect-unreached-top-level-await.mjs b/deps/v8/test/mjsunit/harmony/modules-skip-1-rqstd-order-indirect-unreached-top-level-await.mjs
new file mode 100644
index 0000000000..c6dff00d63
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-1-rqstd-order-indirect-unreached-top-level-await.mjs
@@ -0,0 +1,8 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import 'modules-skip-1-rqstd-order-unreached-top-level-await.mjs';
+
+Function('return this;')().test262.push('1_uind');
+
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-1-rqstd-order-top-level-await.mjs b/deps/v8/test/mjsunit/harmony/modules-skip-1-rqstd-order-top-level-await.mjs
new file mode 100644
index 0000000000..fcbe07a848
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-1-rqstd-order-top-level-await.mjs
@@ -0,0 +1,12 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+if (typeof Function('return this;')().test262 === 'undefined') {
+ Function('return this;')().test262 = ['1_dir_a'];
+} else {
+ Function('return this;')().test262.push('1_dir_a');
+}
+let m = import('modules-skip-1-rqstd-order.mjs');
+await m;
+Function('return this;')().test262.push('1_dir_b');
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-1-rqstd-order-unreached-top-level-await.mjs b/deps/v8/test/mjsunit/harmony/modules-skip-1-rqstd-order-unreached-top-level-await.mjs
new file mode 100644
index 0000000000..f2b2104ad3
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-1-rqstd-order-unreached-top-level-await.mjs
@@ -0,0 +1,14 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+if (typeof Function('return this;')().test262 === 'undefined') {
+ Function('return this;')().test262 = ['1_udir_a'];
+} else {
+ Function('return this;')().test262.push('1_udir_a');
+}
+if (false) {
+ assertUnreachable();
+ await 42;
+}
+Function('return this;')().test262.push('1_udir_b');
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-1-rqstd-order.mjs b/deps/v8/test/mjsunit/harmony/modules-skip-1-rqstd-order.mjs
new file mode 100644
index 0000000000..5ac1882935
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-1-rqstd-order.mjs
@@ -0,0 +1,9 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+if (typeof Function('return this;')().test262 === 'undefined') {
+ Function('return this;')().test262 = ['1'];
+} else {
+ Function('return this;')().test262.push('1');
+}
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-1-top-level-await-cycle.mjs b/deps/v8/test/mjsunit/harmony/modules-skip-1-top-level-await-cycle.mjs
new file mode 100644
index 0000000000..601e80a1b1
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-1-top-level-await-cycle.mjs
@@ -0,0 +1,8 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import * as m1 from 'modules-skip-2-top-level-await-cycle.mjs';
+import * as m2 from 'modules-skip-3-top-level-await-cycle.mjs';
+
+export { m1, m2 };
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-1-top-level-await.mjs b/deps/v8/test/mjsunit/harmony/modules-skip-1-top-level-await.mjs
new file mode 100644
index 0000000000..25973fe9ee
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-1-top-level-await.mjs
@@ -0,0 +1,11 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let m = import('modules-skip-1.mjs');
+let m_namespace = await m;
+
+export function life() {
+ return m_namespace.life();
+}
+
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-2-rqstd-order-indirect-top-level-await.mjs b/deps/v8/test/mjsunit/harmony/modules-skip-2-rqstd-order-indirect-top-level-await.mjs
new file mode 100644
index 0000000000..2305422b81
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-2-rqstd-order-indirect-top-level-await.mjs
@@ -0,0 +1,6 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import 'modules-skip-2-rqstd-order-top-level-await.mjs'
+Function('return this;')().test262.push('2_ind');
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-2-rqstd-order-top-level-await.mjs b/deps/v8/test/mjsunit/harmony/modules-skip-2-rqstd-order-top-level-await.mjs
new file mode 100644
index 0000000000..c2b20a53f2
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-2-rqstd-order-top-level-await.mjs
@@ -0,0 +1,8 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Function('return this;')().test262.push('2_dir_a');
+let m = import('modules-skip-2-rqstd-order.mjs');
+await m;
+Function('return this;')().test262.push('2_dir_b');
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-2-rqstd-order.mjs b/deps/v8/test/mjsunit/harmony/modules-skip-2-rqstd-order.mjs
new file mode 100644
index 0000000000..7dbd64c4cf
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-2-rqstd-order.mjs
@@ -0,0 +1,5 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Function('return this;')().test262.push('2');
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-2-top-level-await-cycle.mjs b/deps/v8/test/mjsunit/harmony/modules-skip-2-top-level-await-cycle.mjs
new file mode 100644
index 0000000000..3171bb88ea
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-2-top-level-await-cycle.mjs
@@ -0,0 +1,7 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import * as m from 'modules-skip-4-top-level-await-cycle.mjs';
+
+export { m };
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-2-top-level-await.mjs b/deps/v8/test/mjsunit/harmony/modules-skip-2-top-level-await.mjs
new file mode 100644
index 0000000000..4aa2f2cdcd
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-2-top-level-await.mjs
@@ -0,0 +1,15 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import * as m1 from 'modules-skip-3.mjs'
+
+let m2 = import('modules-skip-1-top-level-await.mjs');
+let m2_namespace = await m2;
+
+export let stringlife = m1.stringlife;
+
+export function life() {
+ return m2_namespace.life();
+}
+
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-3-rqstd-order-indirect-top-level-await.mjs b/deps/v8/test/mjsunit/harmony/modules-skip-3-rqstd-order-indirect-top-level-await.mjs
new file mode 100644
index 0000000000..2b4dae0063
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-3-rqstd-order-indirect-top-level-await.mjs
@@ -0,0 +1,6 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import 'modules-skip-3-rqstd-order-top-level-await.mjs'
+Function('return this;')().test262.push('3_ind');
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-3-rqstd-order-top-level-await.mjs b/deps/v8/test/mjsunit/harmony/modules-skip-3-rqstd-order-top-level-await.mjs
new file mode 100644
index 0000000000..f3b8904731
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-3-rqstd-order-top-level-await.mjs
@@ -0,0 +1,8 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Function('return this;')().test262.push('3_dir_a');
+let m = import('modules-skip-3-rqstd-order.mjs');
+await m;
+Function('return this;')().test262.push('3_dir_b');
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-3-rqstd-order.mjs b/deps/v8/test/mjsunit/harmony/modules-skip-3-rqstd-order.mjs
new file mode 100644
index 0000000000..bd70e70aa5
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-3-rqstd-order.mjs
@@ -0,0 +1,5 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Function('return this;')().test262.push('3');
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-3-top-level-await-cycle.mjs b/deps/v8/test/mjsunit/harmony/modules-skip-3-top-level-await-cycle.mjs
new file mode 100644
index 0000000000..3171bb88ea
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-3-top-level-await-cycle.mjs
@@ -0,0 +1,7 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import * as m from 'modules-skip-4-top-level-await-cycle.mjs';
+
+export { m };
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-3-top-level-await.mjs b/deps/v8/test/mjsunit/harmony/modules-skip-3-top-level-await.mjs
new file mode 100644
index 0000000000..eea2c7a29b
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-3-top-level-await.mjs
@@ -0,0 +1,12 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import * as m1 from 'modules-skip-1-top-level-await.mjs';
+import * as m2 from 'modules-skip-3.mjs';
+
+export function life() {
+ return m1.life();
+}
+
+export let stringlife = m2.stringlife;
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-4-rqstd-order-indirect-top-level-await.mjs b/deps/v8/test/mjsunit/harmony/modules-skip-4-rqstd-order-indirect-top-level-await.mjs
new file mode 100644
index 0000000000..7c75a9aadc
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-4-rqstd-order-indirect-top-level-await.mjs
@@ -0,0 +1,6 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import 'modules-skip-4-rqstd-order-top-level-await.mjs'
+Function('return this;')().test262.push('4_ind');
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-4-rqstd-order-top-level-await.mjs b/deps/v8/test/mjsunit/harmony/modules-skip-4-rqstd-order-top-level-await.mjs
new file mode 100644
index 0000000000..1659ba681e
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-4-rqstd-order-top-level-await.mjs
@@ -0,0 +1,8 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Function('return this;')().test262.push('4_dir_a');
+let m = import('modules-skip-4-rqstd-order.mjs');
+await m;
+Function('return this;')().test262.push('4_dir_b');
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-4-rqstd-order.mjs b/deps/v8/test/mjsunit/harmony/modules-skip-4-rqstd-order.mjs
new file mode 100644
index 0000000000..7fdd12ca7a
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-4-rqstd-order.mjs
@@ -0,0 +1,5 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Function('return this;')().test262.push('4');
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-4-top-level-await-cycle.mjs b/deps/v8/test/mjsunit/harmony/modules-skip-4-top-level-await-cycle.mjs
new file mode 100644
index 0000000000..2b58e2399f
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-4-top-level-await-cycle.mjs
@@ -0,0 +1,7 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let m = await import('modules-skip-1.mjs');
+
+export { m };
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-4-top-level-await.mjs b/deps/v8/test/mjsunit/harmony/modules-skip-4-top-level-await.mjs
new file mode 100644
index 0000000000..00576a23c1
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-4-top-level-await.mjs
@@ -0,0 +1,7 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import 'modules-skip-5-top-level-await.mjs';
+
+assertUnreachable();
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-5-top-level-await.mjs b/deps/v8/test/mjsunit/harmony/modules-skip-5-top-level-await.mjs
new file mode 100644
index 0000000000..28cf2a9c18
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-5-top-level-await.mjs
@@ -0,0 +1,5 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+await import('modules-skip-2.mjs')
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-6-top-level-await.mjs b/deps/v8/test/mjsunit/harmony/modules-skip-6-top-level-await.mjs
new file mode 100644
index 0000000000..65849dba3e
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-6-top-level-await.mjs
@@ -0,0 +1,10 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import * as m1 from 'modules-skip-3-top-level-await.mjs';
+
+let m2 = await import('modules-skip-1.mjs');
+
+export { m1, m2 };
+
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-7-top-level-await.mjs b/deps/v8/test/mjsunit/harmony/modules-skip-7-top-level-await.mjs
new file mode 100644
index 0000000000..bc7f22b771
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-7-top-level-await.mjs
@@ -0,0 +1,14 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function sleeping_promise() {
+ return new Promise((resolve) => setTimeout(resolve));
+}
+
+export let life;
+
+await sleeping_promise();
+life = -1;
+await sleeping_promise();
+life = (await import('modules-skip-1.mjs')).life();
diff --git a/deps/v8/test/mjsunit/harmony/private-accessors.js b/deps/v8/test/mjsunit/harmony/private-accessors.js
index 3a828116a1..44ec2a0790 100644
--- a/deps/v8/test/mjsunit/harmony/private-accessors.js
+++ b/deps/v8/test/mjsunit/harmony/private-accessors.js
@@ -83,6 +83,30 @@
assertEquals('d', new C().getA().getD());
}
+{
+ assertThrows(() => {
+ class A {
+ [this.#a] = 1;
+ get #a() {}
+ }
+ }, TypeError);
+
+ assertThrows(() => {
+ class A {
+ [this.#a] = 1;
+ set #a(val) {}
+ }
+ }, TypeError);
+
+ assertThrows(() => {
+ class A {
+ [this.#a] = 1;
+ set #a(val) {}
+ get #a() {}
+ }
+ }, TypeError);
+}
+
// Duplicate private accessors.
// https://tc39.es/proposal-private-methods/#sec-static-semantics-early-errors
{
diff --git a/deps/v8/test/mjsunit/harmony/private-fields.js b/deps/v8/test/mjsunit/harmony/private-fields.js
index 0c1c04bc75..067b208620 100644
--- a/deps/v8/test/mjsunit/harmony/private-fields.js
+++ b/deps/v8/test/mjsunit/harmony/private-fields.js
@@ -476,3 +476,12 @@
let c = new C;
assertThrows(() => c.getA(), SyntaxError);
}
+
+{
+ assertThrows(() => {
+ class A {
+ [this.#a] = 1;
+ #a = 2;
+ }
+ }, TypeError);
+}
diff --git a/deps/v8/test/mjsunit/harmony/private-methods.js b/deps/v8/test/mjsunit/harmony/private-methods.js
index fcd80823c1..b42e4f658c 100644
--- a/deps/v8/test/mjsunit/harmony/private-methods.js
+++ b/deps/v8/test/mjsunit/harmony/private-methods.js
@@ -295,3 +295,12 @@
assertEquals(1, new C().fn());
}
+
+{
+ assertThrows(() => {
+ class A {
+ [this.#a] = 1;
+ #a() { }
+ }
+ }, TypeError);
+}
diff --git a/deps/v8/test/mjsunit/harmony/private-name-scopes.js b/deps/v8/test/mjsunit/harmony/private-name-scopes.js
new file mode 100644
index 0000000000..e6060cf81a
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/private-name-scopes.js
@@ -0,0 +1,137 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+{
+ let heritageFn;
+ class O {
+ #f = "O.#f";
+ static C = class C extends (heritageFn = function () {
+ return class D {
+ exfil(obj) { return obj.#f; }
+ exfilEval(obj) { return eval("obj.#f"); }
+ };
+ }) {
+ #f = "C.#f";
+ };
+ }
+
+ const o = new O;
+ const c = new O.C;
+ const D = heritageFn();
+ const d = new D;
+ assertEquals(d.exfil(o), "O.#f");
+ assertEquals(d.exfilEval(o), "O.#f");
+ assertThrows(() => d.exfil(c), TypeError);
+ assertThrows(() => d.exfilEval(c), TypeError);
+}
+
+// Early errors
+
+assertThrows(() => eval("new class extends " +
+ "(class { m() { let x = this.#f; } }) " +
+ "{ #f }"), SyntaxError);
+
+assertThrows(() => eval("new class extends this.#foo { #foo }"), SyntaxError);
+
+// Runtime errors
+
+{
+ // Test private name context chain recalc.
+ let heritageFn;
+ class O {
+ #f = "O.#f";
+ static C = class C extends (heritageFn = function () {
+ return class D { exfil(obj) { return obj.#f; } }
+ }) {
+ #f = "C.#f";
+ };
+ }
+
+ const o = new O;
+ const c = new O.C;
+ const D = heritageFn();
+ const d = new D;
+ assertEquals(d.exfil(o), "O.#f");
+ assertThrows(() => d.exfil(c), TypeError);
+}
+
+{
+ // Test private name context chain recalc with nested closures with context.
+ let heritageFn;
+ class O {
+ #f = "O.#f";
+ static C = class C extends (heritageFn = function () {
+ let forceContext = 1;
+ return () => {
+ assertEquals(forceContext, 1);
+ return class D { exfil(obj) { return obj.#f; } }
+ };
+ }) {
+ #f = "C.#f";
+ };
+ }
+
+ const o = new O;
+ const c = new O.C;
+ const D = heritageFn()();
+ const d = new D;
+ assertEquals(d.exfil(o), "O.#f");
+ assertThrows(() => d.exfil(c), TypeError);
+}
+
+{
+ // Test private name context chain recalc where skipped class has no context.
+ let heritageFn;
+ class O {
+ #f = "O.#f";
+ static C = class C0 extends (class C1 extends (heritageFn = function (obj) {
+ if (obj) { return obj.#f; }
+ }) {}) {
+ #f = "C0.#f"
+ }
+ }
+
+ const o = new O;
+ const c = new O.C;
+ assertEquals(heritageFn(o), "O.#f");
+ assertThrows(() => heritageFn(c), TypeError);
+}
+
+{
+ // Test private name context chain recalc where skipping function has no
+ // context.
+ let heritageFn;
+ class O {
+ #f = "O.#f";
+ static C = class C extends (heritageFn = function () {
+ return (obj) => { return obj.#f; }
+ }) {
+ #f = "C.#f";
+ }
+ }
+
+ const o = new O;
+ const c = new O.C;
+ assertEquals(heritageFn()(o), "O.#f");
+ assertThrows(() => heritageFn()(c), TypeError);
+}
+
+{
+ // Test private name context chain recalc where neither skipped class nor
+ // skipping function has contexts.
+ let heritageFn;
+ class O {
+ #f = "O.#f";
+ static C = class C0 extends (class C1 extends (heritageFn = function () {
+ return (obj) => { return obj.#f; }
+ }) {}) {
+ #f = "C0.#f";
+ }
+ }
+
+ const o = new O;
+ const c = new O.C;
+ assertEquals(heritageFn()(o), "O.#f");
+ assertThrows(() => heritageFn()(c), TypeError);
+}
diff --git a/deps/v8/test/mjsunit/harmony/regexp-match-indices.js b/deps/v8/test/mjsunit/harmony/regexp-match-indices.js
new file mode 100644
index 0000000000..cc3710ce7e
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regexp-match-indices.js
@@ -0,0 +1,105 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-regexp-match-indices
+
+// Sanity test.
+{
+ const re = /a+(?<Z>z)?/;
+ const m = re.exec("xaaaz");
+
+ assertEquals(m.indices, [[1, 5], [4, 5]]);
+ assertEquals(m.indices.groups, {'Z': [4, 5]})
+}
+
+// Capture groups that are not matched return `undefined`.
+{
+ const re = /a+(?<Z>z)?/;
+ const m = re.exec("xaaay");
+
+ assertEquals(m.indices, [[1, 4], undefined]);
+ assertEquals(m.indices.groups, {'Z': undefined});
+}
+
+// Two capture groups.
+{
+ const re = /a+(?<A>zz)?(?<B>ii)?/;
+ const m = re.exec("xaaazzii");
+
+ assertEquals(m.indices, [[1, 8], [4, 6], [6, 8]]);
+ assertEquals(m.indices.groups, {'A': [4, 6], 'B': [6, 8]});
+}
+
+// No capture groups.
+{
+ const re = /a+/;
+ const m = re.exec("xaaazzii");
+
+ assertEquals(m.indices [[1, 4]]);
+ assertEquals(m.indices.groups, undefined);
+}
+
+// No match.
+{
+ const re = /a+/;
+ const m = re.exec("xzzii");
+
+ assertEquals(null, m);
+}
+
+// Unnamed capture groups.
+{
+ const re = /a+(z)?/;
+ const m = re.exec("xaaaz")
+
+ assertEquals(m.indices, [[1, 5], [4, 5]]);
+ assertEquals(m.indices.groups, undefined)
+}
+
+// Named and unnamed capture groups.
+{
+ const re = /a+(z)?(?<Y>y)?/;
+ const m = re.exec("xaaazyy")
+
+ assertEquals(m.indices, [[1, 6], [4, 5], [5, 6]]);
+ assertEquals(m.indices.groups, {'Y': [5, 6]})
+}
+
+
+// Verify property overwrite.
+{
+ const re = /a+(?<Z>z)?/;
+ const m = re.exec("xaaaz");
+
+ m.indices = null;
+ assertEquals(null, m.indices);
+}
+
+// Mess with array prototype, we should still do the right thing.
+{
+ Object.defineProperty(Array.prototype, "groups", {
+ get: () => {
+ assertUnreachable();
+ return null;
+ },
+ set: (x) => {
+ assertUnreachable();
+ }
+ });
+
+ Object.defineProperty(Array.prototype, "0", {
+ get: () => {
+ assertUnreachable();
+ return null;
+ },
+ set: (x) => {
+ assertUnreachable();
+ }
+ });
+
+ const re = /a+(?<Z>z)?/;
+ const m = re.exec("xaaaz");
+
+ assertEquals(m.indices.groups, {'Z': [4, 5]})
+}
diff --git a/deps/v8/test/mjsunit/harmony/sharedarraybuffer-stress.js b/deps/v8/test/mjsunit/harmony/sharedarraybuffer-stress.js
index 24724eea14..e4cdff5d36 100644
--- a/deps/v8/test/mjsunit/harmony/sharedarraybuffer-stress.js
+++ b/deps/v8/test/mjsunit/harmony/sharedarraybuffer-stress.js
@@ -9,11 +9,9 @@ function Alloc(size) {
}
function RunSomeAllocs(total, retained, size) {
- print(`-------iterations = ${total}, retained = $ { retained } -------`);
+ print(`-------iterations = ${total}, retained = ${retained} -------`);
var array = new Array(retained);
for (var i = 0; i < total; i++) {
- if ((i % 25) == 0)
- print(`iteration $ { i }`);
let pair = Alloc(size);
// For some iterations, retain the memory, view, or both.
switch (i % 3) {
diff --git a/deps/v8/test/mjsunit/harmony/static-private-methods.js b/deps/v8/test/mjsunit/harmony/static-private-methods.js
new file mode 100644
index 0000000000..ed81bb3038
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/static-private-methods.js
@@ -0,0 +1,248 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-private-methods
+
+"use strict";
+
+// Static private methods
+{
+ let store = 1;
+ class C {
+ static #a() { return store; }
+ static a() { return this.#a(); }
+ }
+ assertEquals(C.a(), store);
+ assertThrows(() => C.a.call(new C), TypeError);
+}
+
+// Complementary static private accessors.
+{
+ let store = 1;
+ class C {
+ static get #a() { return store; }
+ static set #a(val) { store = val; }
+ static incA() { this.#a++; }
+ static getA() { return this.#a; }
+ static setA(val) { this.#a = val; }
+ }
+ assertEquals(C.getA(), 1);
+ C.incA();
+ assertEquals(store, 2);
+ C.setA(3);
+ assertEquals(store, 3);
+
+ assertThrows(() => C.incA.call(new C), TypeError);
+ assertThrows(() => C.getA.call(new C), TypeError);
+ assertThrows(() => C.setA.call(new C), TypeError);
+
+ assertThrows(() => { const incA = C.incA; incA(); }, TypeError);
+ assertThrows(() => { const getA = C.getA; getA(); }, TypeError);
+ assertThrows(() => { const setA = C.setA; setA(); }, TypeError);
+}
+
+// Static private methods accessed explicitly in an anonymous nested class.
+{
+ class Outer {
+ #a() { return 'Outer'; }
+ a() { return this.#a(); }
+ test() {
+ return class {
+ static #a() { return 'Inner'; }
+ static a() { return this.#a(); }
+ };
+ }
+ }
+
+ const obj = new Outer;
+ const C = obj.test();
+ assertEquals(C.a(), 'Inner');
+ assertThrows(() => obj.a.call(C), TypeError);
+ assertThrows(() => obj.a.call(new C), TypeError);
+}
+
+// Static private methods accessed explicitly in a named nested class.
+{
+ class Outer {
+ #a() { return 'Outer'; }
+ a() { return this.#a(); }
+ test() {
+ return class Inner {
+ static #a() { return 'Inner'; }
+ static a() { return this.#a(); }
+ };
+ }
+ }
+
+ const obj = new Outer;
+ const C = obj.test();
+ assertEquals(C.a(), 'Inner');
+ assertThrows(() => obj.a.call(C), TypeError);
+ assertThrows(() => obj.a.call(new C), TypeError);
+}
+
+// Static private methods accessed through eval in an anonymous nested class.
+{
+ class Outer {
+ #a() { return 'Outer'; }
+ a() { return this.#a(); }
+ test() {
+ return class {
+ static #a() { return 'Inner'; }
+ static a(str) { return eval(str); }
+ };
+ }
+ }
+
+ const obj = new Outer;
+ const C = obj.test();
+ assertEquals(C.a('this.#a()'), 'Inner');
+ assertThrows(() => C.a('Outer.#a()'), TypeError);
+}
+
+// Static private methods accessed through eval in a named nested class.
+{
+ class Outer {
+ #a() { return 'Outer'; }
+ a() { return this.#a(); }
+ test() {
+ return class Inner {
+ static #a() { return 'Inner'; }
+ static a(str) { return eval(str); }
+ };
+ }
+ }
+
+ const obj = new Outer;
+ const C = obj.test();
+ assertEquals(C.a('this.#a()'), 'Inner');
+ assertEquals(C.a('Inner.#a()'), 'Inner');
+ assertThrows(() => C.a('Outer.#a()'), TypeError);
+ assertThrows(() => C.run('(new Outer).#a()'), TypeError);
+}
+
+// Static private methods in the outer class accessed through eval
+// in a named nested class.
+{
+ class Outer {
+ static #a() { return 'Outer'; }
+ static test() {
+ return class Inner {
+ static run(str) { return eval(str); }
+ };
+ }
+ }
+
+ const C = Outer.test();
+ assertEquals(C.run('Outer.#a()'), 'Outer');
+ assertThrows(() => C.run('this.#a()'), TypeError);
+ assertThrows(() => C.run('Inner.#a()'), TypeError);
+ assertThrows(() => C.run('(new Outer).#a()'), TypeError);
+}
+
+// Static private methods in the outer class accessed explicitly
+// in a named nested class.
+{
+ class Outer {
+ static #a() { return 'Outer'; }
+ static test() {
+ return class Inner {
+ static getA(klass) { return klass.#a(); }
+ };
+ }
+ }
+
+ const C = Outer.test();
+ assertEquals(C.getA(Outer), 'Outer');
+ assertThrows(() => C.getA.call(C), TypeError);
+ assertThrows(() => C.getA.call(new Outer), TypeError);
+}
+
+// Static private methods in the outer class accessed explicitly
+// in an anonymous nested class.
+{
+ class Outer {
+ static #a() { return 'Outer'; }
+ static test() {
+ return class {
+ static getA(klass) { return klass.#a(); }
+ };
+ }
+ }
+
+ const C = Outer.test();
+ assertEquals(C.getA(Outer), 'Outer');
+ assertThrows(() => C.getA.call(C), TypeError);
+ assertThrows(() => C.getA.call(new Outer), TypeError);
+}
+
+// Super property access in static private methods
+{
+ class A {
+ static a = 1;
+ }
+
+ class B extends A {
+ static #a() { return super.a; }
+ static getA() { return this.#a(); }
+ }
+
+ assertEquals(B.getA(), 1);
+}
+
+// Invalid super property access in static private methods
+{
+ class A {
+ static #a() { return 1; }
+ static getA() { return this.#a(); }
+ }
+
+ class B extends A {
+ static getA() { return super.getA(); }
+ }
+
+ assertThrows(() => B.getA(), TypeError);
+}
+
+// Static private methods accessed in eval.
+{
+ class C {
+ static #m(v) { return v; }
+ static test(str) {
+ return eval(str);
+ }
+ }
+
+ assertEquals(C.test('this.#m(1)'), 1);
+}
+
+// Test that the receiver is checked during run time.
+{
+ const C = class {
+ static #a() { }
+ static test(klass) { return klass.#a; }
+ };
+ const test = C.test;
+ assertThrows(test, TypeError);
+}
+
+// Duplicate static private accessors and methods.
+{
+ assertThrows('class C { static get #a() {} static get #a() {} }', SyntaxError);
+ assertThrows('class C { static get #a() {} static #a() {} }', SyntaxError);
+ assertThrows('class C { static get #a() {} get #a() {} }', SyntaxError);
+ assertThrows('class C { static get #a() {} set #a(val) {} }', SyntaxError);
+ assertThrows('class C { static get #a() {} #a() {} }', SyntaxError);
+
+ assertThrows('class C { static set #a(val) {} static set #a(val) {} }', SyntaxError);
+ assertThrows('class C { static set #a(val) {} static #a() {} }', SyntaxError);
+ assertThrows('class C { static set #a(val) {} get #a() {} }', SyntaxError);
+ assertThrows('class C { static set #a(val) {} set #a(val) {} }', SyntaxError);
+ assertThrows('class C { static set #a(val) {} #a() {} }', SyntaxError);
+
+ assertThrows('class C { static #a() {} static #a() {} }', SyntaxError);
+ assertThrows('class C { static #a() {} #a(val) {} }', SyntaxError);
+ assertThrows('class C { static #a() {} set #a(val) {} }', SyntaxError);
+ assertThrows('class C { static #a() {} get #a() {} }', SyntaxError);
+}
diff --git a/deps/v8/test/mjsunit/md5.js b/deps/v8/test/mjsunit/md5.js
index 38dc802312..b2dbc1e45a 100644
--- a/deps/v8/test/mjsunit/md5.js
+++ b/deps/v8/test/mjsunit/md5.js
@@ -201,11 +201,9 @@ To know our further pleasure in this case,\n\
To old Free-town, our common judgment-place.\n\
Once more, on pain of death, all men depart.\n"
-for (var i = 0; i < 4; ++i) {
+for (var i = 0; i < 2; ++i) {
plainText += plainText;
}
-assertEquals(hex_md5("abc"), "900150983cd24fb0d6963f7d28e17f72");
-for (var i = 0; i < 11; ++i) {
- assertEquals(hex_md5(plainText), "1b8719c72d5d8bfd06e096ef6c6288c5");
-}
+assertEquals("900150983cd24fb0d6963f7d28e17f72", hex_md5("abc"));
+assertEquals("6c843ffbdd773e88ae4ac4a5df79a784", hex_md5(plainText));
diff --git a/deps/v8/test/mjsunit/messages.js b/deps/v8/test/mjsunit/messages.js
index 916a7d554f..7c3521b685 100644
--- a/deps/v8/test/mjsunit/messages.js
+++ b/deps/v8/test/mjsunit/messages.js
@@ -166,13 +166,13 @@ for (constructor of typedArrayConstructors) {
const ta = new constructor([1]);
%ArrayBufferDetach(ta.buffer);
ta.find(() => {});
- }, "Cannot perform %TypedArray%.prototype.find on a neutered ArrayBuffer", TypeError);
+ }, "Cannot perform %TypedArray%.prototype.find on a detached ArrayBuffer", TypeError);
test(() => {
const ta = new constructor([1]);
%ArrayBufferDetach(ta.buffer);
ta.findIndex(() => {});
- }, "Cannot perform %TypedArray%.prototype.findIndex on a neutered ArrayBuffer", TypeError);
+ }, "Cannot perform %TypedArray%.prototype.findIndex on a detached ArrayBuffer", TypeError);
}
// kFirstArgumentNotRegExp
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index 134a49f748..f0d473f84a 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -81,6 +81,9 @@
'wasm/shared-memory-worker-explicit-gc-stress': [PASS, ['mode == debug', SKIP], ['tsan', SKIP]],
'wasm/shared-memory-worker-gc-stress': [PASS, ['mode == debug', SKIP]],
+ # Slow in simulator runs.
+ 'random-bit-correlations': [PASS, ['simulator_run == True', SLOW]],
+
##############################################################################
# Only RegExp stuff tested, no need for extensive optimizing compiler tests.
'regexp-global': [PASS, NO_VARIANTS],
@@ -292,7 +295,6 @@
'compare-known-objects-slow': [SKIP],
'compiler/array-multiple-receiver-maps': [SKIP],
# Tests taking too long
- 'packed-elements': [SKIP],
'regress/regress-1122': [SKIP],
'regress/regress-331444': [SKIP],
'regress/regress-353551': [SKIP],
@@ -377,6 +379,7 @@
'regress/regress-6838-2': [SKIP],
'regress/regress-6838-3': [SKIP],
'regress/regress-9022': [SKIP],
+ 'regress/regress-9832': [SKIP],
'regress/regress-crbug-934138': [SKIP],
'regress/regress-crbug-976934': [SKIP],
@@ -393,13 +396,14 @@
'regress/regress-crbug-759327': [SKIP],
'regress/regress-crbug-898974': [SKIP],
'regexp-tier-up': [SKIP],
+ 'regexp-tier-up-multiple': [SKIP],
+ 'regress/regress-996234': [SKIP],
# These tests check that we can trace the compiler.
'tools/compiler-trace-flags': [SKIP],
'tools/compiler-trace-flags-wasm': [SKIP],
# Too slow on arm64 simulator and debug: https://crbug.com/v8/7783
- 'bit-not': [PASS, ['arch == arm64 and mode == debug and simulator_run', SKIP]],
'md5': [PASS, ['arch == arm64 and mode == debug and simulator_run', SKIP]],
# Slow with pointer compression.
@@ -470,15 +474,15 @@
'array-reduce': [PASS, SLOW],
'array-sort': [PASS, SLOW],
'array-splice': [PASS, SLOW],
- 'bit-not': [PASS, SLOW],
+ 'array-store-and-grow': [PASS, SLOW],
'compiler/alloc-number': [PASS, SLOW],
'compiler/osr-with-args': [PASS, SLOW],
'generated-transition-stub': [PASS, SLOW],
'json2': [PASS, SLOW],
+ 'large-object-literal-slow-elements': [PASS, SLOW],
'math-floor-of-div-nosudiv': [PASS, SLOW],
'math-floor-of-div': [PASS, SLOW],
'messages': [PASS, SLOW],
- 'packed-elements': [PASS, SLOW],
'regress/regress-2790': [PASS, SLOW],
'regress/regress-331444': [PASS, SLOW],
'regress/regress-490': [PASS, SLOW],
@@ -486,6 +490,7 @@
'regress/regress-create-exception': [PASS, SLOW],
'regress/regress-json-stringify-gc': [PASS, SLOW],
'string-indexof-2': [PASS, SLOW],
+ 'unbox-double-arrays': [PASS, SLOW],
'unicodelctest-no-optimization': [PASS, SLOW],
'unicodelctest': [PASS, SLOW],
'unicode-test': [PASS, SLOW],
@@ -494,19 +499,28 @@
# BUG(v8:7247).
'regress/regress-779407': [PASS, SLOW, NO_VARIANTS],
-
- # BUG(v8:9256). Slow with pointer compression.
- 'regress/regress-708247': [PASS, ['pointer_compression', SLOW]],
- 'es6/array-concat': [PASS, ['pointer_compression', SLOW]],
- 'non-extensible-array-reduce': [PASS, ['pointer_compression', SLOW]],
- 'regress/regress-454725': [PASS, ['pointer_compression', SLOW]],
}], # 'arch == arm64'
+##############################################################################
+['arch == arm64 and simulator_run', {
+ # Slow in simulator builds
+ 'compiler/osr-follow': [PASS, SLOW],
+ 'es6/array-concat': [PASS, SLOW],
+ 'non-extensible-array-reduce': [PASS, SLOW],
+ 'regress/regress-454725': [PASS, SLOW],
+ 'regress/regress-708247': [PASS, SLOW],
+ 'compiler/osr-big': [PASS, SLOW],
+ 'frozen-array-reduce': [PASS, SLOW],
+ 'json': [PASS, SLOW],
+ 'sealed-array-reduce': [PASS, SLOW],
+ 'try': [PASS, SLOW],
+}], # 'arch == arm64 and simulator_run'
+
+##############################################################################
['arch == arm64 and mode == debug and simulator_run', {
# Pass but take too long with the simulator in debug mode.
'array-sort': [PASS, SLOW],
- 'packed-elements': [SKIP],
'regexp-global': [SKIP],
'math-floor-of-div': [PASS, SLOW],
'math-floor-of-div-nosudiv': [PASS, SLOW],
@@ -641,7 +655,6 @@
# Slow tests.
'array-sort': [PASS, SLOW],
'compiler/osr-with-args': [PASS, SLOW],
- 'packed-elements': [PASS, SLOW],
'regress/regress-2790': [PASS, SLOW],
'regress/regress-91008': [PASS, SLOW],
'regress/regress-json-stringify-gc': [PASS, SLOW],
@@ -913,6 +926,10 @@
'wasm/atomics-stress': [SKIP],
'wasm/atomics64-stress': [SKIP],
'wasm/futex': [SKIP],
+
+ # Deadlocks on predictable platform (https://crbug.com/v8/9760).
+ 'wasm/async-compile': [SKIP],
+ 'wasm/streaming-compile': [SKIP],
}], # 'predictable == True'
##############################################################################
@@ -925,7 +942,6 @@
'regress/regress-crbug-482998': [PASS, SLOW],
'regress/regress-91008': [PASS, SLOW],
'regress/regress-779407': [PASS, SLOW],
- 'packed-elements': [PASS, SLOW],
'harmony/regexp-property-lu-ui': [PASS, SLOW],
'whitespaces': [PASS, SLOW],
'generated-transition-stub': [PASS, SLOW],
@@ -964,6 +980,7 @@
# The RegExp code cache means running this test multiple times is invalid.
'regexp-tier-up': [SKIP],
+ 'regexp-tier-up-multiple': [SKIP],
# Flaky crash on Odroid devices: https://crbug.com/v8/7678
'regress/regress-336820': [PASS, ['arch == arm and not simulator_run', SKIP]],
@@ -997,10 +1014,8 @@
}], # variant == stress and (arch == arm or arch == arm64) and simulator_run
##############################################################################
-['variant == nooptimization and (arch == arm or arch == arm64) and simulator_run', {
+['variant in (nooptimization, jitless) and arch in (arm, arm64) and simulator_run', {
# Slow tests: https://crbug.com/v8/7783
- 'md5': [SKIP],
- 'packed-elements': [SKIP],
'regress/regress-crbug-319860': [SKIP],
'wasm/asm-wasm-f32': [SKIP],
'wasm/asm-wasm-f64': [SKIP],
@@ -1057,7 +1072,7 @@
##############################################################################
# Liftoff is currently only sufficiently implemented on x64, ia32, arm64 and
# arm.
-# TODO(clemensh): Implement on all other platforms (crbug.com/v8/6600).
+# TODO(clemensb): Implement on all other platforms (crbug.com/v8/6600).
['arch != x64 and arch != ia32 and arch != arm64 and arch != arm', {
'wasm/liftoff': [SKIP],
'wasm/tier-up-testing-flag': [SKIP],
@@ -1068,9 +1083,6 @@
# Slow tests.
'regress/regress-crbug-493779': [SKIP],
'string-replace-gc': [SKIP],
-
- # https://crbug.com/v8/9221
- 'wasm/grow-shared-memory': [SKIP],
}], # variant == slow_path
##############################################################################
@@ -1096,4 +1108,34 @@
'regress/regress-992389': [SKIP],
}], # not embedded_builtins
+##############################################################################
+['variant == turboprop', {
+ # Deopts differently than TurboFan.
+ 'parallel-optimize-disabled': [SKIP],
+ 'compiler/native-context-specialization-hole-check': [SKIP],
+ 'compiler/number-comparison-truncations': [SKIP],
+ 'compiler/redundancy-elimination': [SKIP],
+
+ # Static asserts for optimizations don't hold due to removed optimization
+ # phases.
+ 'compiler/concurrent-inlining-1': [SKIP],
+ 'compiler/concurrent-inlining-2': [SKIP],
+ 'compiler/diamond-followedby-branch': [SKIP],
+ 'compiler/load-elimination-const-field': [SKIP],
+ 'compiler/constant-fold-add-static': [SKIP],
+}], # variant == turboprop
+
+##############################################################################
+['variant == top_level_await', {
+ # specifically expects to fail on top level await.
+ 'harmony/modules-import-15': [SKIP],
+}], # variant == top_level_await
+
+##############################################################################
+['variant == stress_js_bg_compile_wasm_code_gc', {
+ # Runs significantly slower with --stress-wasm-code-gc, problematic
+ # especially in combination with tsan or other slow configurations.
+ 'wasm/many-modules': [SKIP],
+}], # variant == stress_js_bg_compile_wasm_code_gc
+
]
diff --git a/deps/v8/test/mjsunit/mod.js b/deps/v8/test/mjsunit/mod.js
index 8ad98fa7ec..4374dcc11d 100644
--- a/deps/v8/test/mjsunit/mod.js
+++ b/deps/v8/test/mjsunit/mod.js
@@ -31,7 +31,6 @@ function foo() {
for (var j = 1; j < 100; j++) {
if (answer == i) answer = 0;
// Positive case.
- print(j + " % " + i + " = " + answer);
m = j % i;
assertEquals(answer, m, j + " % " + i);
m = j % (-i);
diff --git a/deps/v8/test/mjsunit/packed-elements.js b/deps/v8/test/mjsunit/packed-elements.js
index d0df553451..85630e7954 100644
--- a/deps/v8/test/mjsunit/packed-elements.js
+++ b/deps/v8/test/mjsunit/packed-elements.js
@@ -92,12 +92,15 @@ function test6() {
}
function test_with_optimization(f) {
- // Run tests in a loop to make sure that inlined Array() constructor runs out
- // of new space memory and must fall back on runtime impl.
%PrepareFunctionForOptimization(f);
- for (i = 0; i < 25000; ++i) f();
+ for (i = 0; i < 3; ++i) f();
+ // Cause the inlined Array() constructor to fall back to the runtime impl.
+ %SimulateNewspaceFull();
+ f();
%OptimizeFunctionOnNextCall(f);
- for (i = 0; i < 25000; ++i) f(); // Make sure GC happens
+ f();
+ %SimulateNewspaceFull(); // Make sure GC happens.
+ f();
}
test_with_optimization(test1);
diff --git a/deps/v8/test/mjsunit/readonly.js b/deps/v8/test/mjsunit/readonly.js
index ec938d65c0..69a3e6a016 100644
--- a/deps/v8/test/mjsunit/readonly.js
+++ b/deps/v8/test/mjsunit/readonly.js
@@ -26,6 +26,8 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax
+// This test manually triggers optimization, no need for stress modes.
+// Flags: --nostress-opt --noalways-opt
// Different ways to create an object.
diff --git a/deps/v8/test/mjsunit/regexp-tier-up-multiple.js b/deps/v8/test/mjsunit/regexp-tier-up-multiple.js
new file mode 100644
index 0000000000..7325b341d0
--- /dev/null
+++ b/deps/v8/test/mjsunit/regexp-tier-up-multiple.js
@@ -0,0 +1,101 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Tier-up behavior differs between slow and fast paths in
+// RegExp.prototype.replace with a function as an argument.
+// Flags: --regexp-tier-up --regexp-tier-up-ticks=5
+// Flags: --allow-natives-syntax --no-force-slow-path --no-regexp-interpret-all
+
+const kLatin1 = true;
+const kUnicode = false;
+
+function CheckRegexpNotYetCompiled(regexp) {
+ assertFalse(%RegexpHasBytecode(regexp, kLatin1) &&
+ %RegexpHasNativeCode(regexp, kLatin1));
+ assertFalse(%RegexpHasBytecode(regexp, kUnicode) &&
+ %RegexpHasNativeCode(regexp, kUnicode));
+}
+
+// Testing RegExp.test method which calls into Runtime_RegExpExec.
+let re = new RegExp('^.$');
+CheckRegexpNotYetCompiled(re);
+
+// Testing first five executions of regexp with one-byte string subject.
+for (var i = 0; i < 5; i++) {
+ re.test("a");
+ assertTrue(%RegexpHasBytecode(re, kLatin1));
+ assertTrue(!%RegexpHasBytecode(re, kUnicode) &&
+ !%RegexpHasNativeCode(re, kUnicode));
+}
+// Testing the tier-up to native code.
+re.test("a");
+assertTrue(!%RegexpHasBytecode(re, kLatin1) &&
+ %RegexpHasNativeCode(re,kLatin1));
+assertTrue(!%RegexpHasBytecode(re, kUnicode) &&
+ !%RegexpHasNativeCode(re,kUnicode));
+re.test("a");
+assertTrue(!%RegexpHasBytecode(re, kLatin1) &&
+ %RegexpHasNativeCode(re,kLatin1));
+assertTrue(!%RegexpHasBytecode(re, kUnicode) &&
+ !%RegexpHasNativeCode(re,kUnicode));
+// Testing that the regexp will compile to native code for two-byte string
+// subject as well, because we have a single tick counter for both string
+// representations.
+re.test("π");
+assertTrue(!%RegexpHasBytecode(re, kLatin1) &&
+ %RegexpHasNativeCode(re,kLatin1));
+assertTrue(!%RegexpHasBytecode(re, kUnicode) &&
+ %RegexpHasNativeCode(re,kUnicode));
+
+// Testing String.replace method for non-global regexps.
+var subject = "a1111";
+re = /\w1/;
+CheckRegexpNotYetCompiled(re);
+
+for (var i = 0; i < 5; i++) {
+ subject.replace(re, "x");
+ assertTrue(%RegexpHasBytecode(re, kLatin1));
+ assertTrue(!%RegexpHasBytecode(re, kUnicode) &&
+ !%RegexpHasNativeCode(re, kUnicode));
+}
+
+subject.replace(re, "x");
+assertTrue(!%RegexpHasBytecode(re, kLatin1) &&
+ %RegexpHasNativeCode(re, kLatin1));
+assertTrue(!%RegexpHasBytecode(re, kUnicode) &&
+ !%RegexpHasNativeCode(re, kUnicode));
+
+// Testing String.replace method for global regexps.
+let re_g = /\w11111/g;
+CheckRegexpNotYetCompiled(re_g);
+// This regexp will not match, so it will only execute the bytecode once,
+// each time the replace method is invoked, without tiering-up and
+// recompiling to native code.
+for (var i = 0; i < 5; i++) {
+ subject.replace(re_g, "x");
+ assertTrue(%RegexpHasBytecode(re_g, kLatin1));
+ assertTrue(!%RegexpHasBytecode(re_g, kUnicode) &&
+ !%RegexpHasNativeCode(re_g, kUnicode));
+}
+
+// This regexp will match, so it will execute five times, and tier-up.
+re_g = /\w/g;
+CheckRegexpNotYetCompiled(re_g);
+subject.replace(re_g, "x");
+assertTrue(!%RegexpHasBytecode(re_g, kLatin1) &&
+ %RegexpHasNativeCode(re_g, kLatin1));
+assertTrue(!%RegexpHasBytecode(re_g, kUnicode) &&
+ !%RegexpHasNativeCode(re_g, kUnicode));
+
+// Testing String.replace method for global regexps with a function as a
+// parameter. This will tier-up eagerly and compile to native code right
+// away, even though the regexp is only executed once.
+function f() { return "x"; }
+re_g = /\w2/g;
+CheckRegexpNotYetCompiled(re_g);
+subject.replace(re_g, f);
+assertTrue(!%RegexpHasBytecode(re_g, kLatin1) &&
+ %RegexpHasNativeCode(re_g, kLatin1));
+assertTrue(!%RegexpHasBytecode(re_g, kUnicode) &&
+ !%RegexpHasNativeCode(re_g, kUnicode));
diff --git a/deps/v8/test/mjsunit/regexp-tier-up.js b/deps/v8/test/mjsunit/regexp-tier-up.js
index e55e87f593..6269128f53 100644
--- a/deps/v8/test/mjsunit/regexp-tier-up.js
+++ b/deps/v8/test/mjsunit/regexp-tier-up.js
@@ -2,9 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Tier-up behavior differs between slow and fast paths in functional
-// RegExp.prototype.replace.
-// Flags: --regexp-tier-up --allow-natives-syntax --no-force-slow-path
+// Tier-up behavior differs between slow and fast paths in
+// RegExp.prototype.replace with a function as an argument.
+// Flags: --regexp-tier-up --regexp-tier-up-ticks=1
+// Flags: --allow-natives-syntax --no-force-slow-path --no-regexp-interpret-all
const kLatin1 = true;
const kUnicode = false;
@@ -90,3 +91,15 @@ assertTrue(!%RegexpHasBytecode(re_g, kLatin1) &&
%RegexpHasNativeCode(re_g, kLatin1));
assertTrue(!%RegexpHasBytecode(re_g, kUnicode) &&
!%RegexpHasNativeCode(re_g, kUnicode));
+
+// Testing eager tier-up for very long strings.
+let dna = "ATCG".repeat(251);
+
+re_g = />.*\n|\n/;
+CheckRegexpNotYetCompiled(re_g);
+
+dna = dna.replace(re_g,"");
+assertTrue(!%RegexpHasBytecode(re_g, kLatin1) &&
+ %RegexpHasNativeCode(re_g, kLatin1));
+assertTrue(!%RegexpHasBytecode(re_g, kUnicode) &&
+ !%RegexpHasNativeCode(re_g, kUnicode));
diff --git a/deps/v8/test/mjsunit/regress/regress-1002827.js b/deps/v8/test/mjsunit/regress/regress-1002827.js
new file mode 100644
index 0000000000..2acaf73deb
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1002827.js
@@ -0,0 +1,13 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --expose-gc
+
+var PI = new Proxy(this, {
+ get() {
+ PI();
+ }
+});
+
+assertThrows(() => new gc(PI, {}), TypeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-1003730.js b/deps/v8/test/mjsunit/regress/regress-1003730.js
new file mode 100644
index 0000000000..e20a4e4a44
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1003730.js
@@ -0,0 +1,25 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --concurrent-inlining
+
+function bar(error) {
+ try {
+ throw "didn't throw TypeError";
+ } catch (err) {
+ error instanceof error, "didn't throw " + error.prototype.name;
+ }
+}
+function foo(param) {
+ bar(TypeError);
+}
+try {
+ bar();
+} catch (e) {}
+%PrepareFunctionForOptimization(foo);
+try {
+ foo();
+} catch (e) {}
+%OptimizeFunctionOnNextCall(foo);
+foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-1003919.js b/deps/v8/test/mjsunit/regress/regress-1003919.js
new file mode 100644
index 0000000000..def45eeca4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1003919.js
@@ -0,0 +1,20 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Define an object with a getter and a proxy as it's prototype.
+var obj = {foo: 'bar'};
+Object.defineProperty(obj, 'foo', {
+ get: function () {
+ }
+});
+obj.__proto__ = new Proxy([], {});
+
+// Get key from a function to avoid the property access turning into a
+// named property access.
+function getKey() {
+ return 'values'
+}
+
+// Keyed access to update obj's values property.
+obj[getKey()] = 1;
diff --git a/deps/v8/test/mjsunit/regress/regress-1004912.js b/deps/v8/test/mjsunit/regress/regress-1004912.js
new file mode 100644
index 0000000000..baa31db138
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1004912.js
@@ -0,0 +1,12 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var key = {
+ toString() {
+ return Symbol();
+ }
+};
+
+var obj = {};
+obj[key];
diff --git a/deps/v8/test/mjsunit/regress/regress-1005400.js b/deps/v8/test/mjsunit/regress/regress-1005400.js
new file mode 100644
index 0000000000..77234235fe
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1005400.js
@@ -0,0 +1,23 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function foo(a, key) {
+ a[key];
+}
+
+let obj = {};
+let count = 0;
+
+var key_obj = {
+ toString: function() {
+ count++;
+ // Force string to be internalized during keyed lookup.
+ return 'foo' + count;
+ }
+};
+
+foo(obj, key_obj);
+
+// We should only call toString once.
+assertEquals(count, 1);
diff --git a/deps/v8/test/mjsunit/regress/regress-1006629.js b/deps/v8/test/mjsunit/regress/regress-1006629.js
new file mode 100644
index 0000000000..bd307fa228
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1006629.js
@@ -0,0 +1,10 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const workerScript = `
+ onmessage = function() {
+ };`;
+const worker = new Worker(workerScript, {type: 'string'});
+const i32a = new Int32Array( new SharedArrayBuffer() );
+worker.postMessage([i32a.buffer]);
diff --git a/deps/v8/test/mjsunit/regress/regress-1006640.js b/deps/v8/test/mjsunit/regress/regress-1006640.js
new file mode 100644
index 0000000000..597b42057d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1006640.js
@@ -0,0 +1,20 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-gc
+
+function main() {
+ const v2 = [1337,1337,1337,1337,1337];
+ function v9() {
+ const v15 = {get:RegExp};
+ Object.defineProperty(v2,501,v15);
+ const v18 = RegExp();
+ const v19 = 1337 instanceof v18;
+ }
+ const v30 = {defineProperty:Function,get:v9,getPrototypeOf:Object};
+ const v32 = new Proxy(ArrayBuffer,v30);
+ const v34 = gc(v32);
+}
+
+assertThrows(() => main(), TypeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-1006670.js b/deps/v8/test/mjsunit/regress/regress-1006670.js
new file mode 100644
index 0000000000..4d1408b3d1
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1006670.js
@@ -0,0 +1,5 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows(() => /(a?;?){4000000}/.exec("a"), RangeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-1011980.js b/deps/v8/test/mjsunit/regress/regress-1011980.js
new file mode 100644
index 0000000000..89e4fed159
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1011980.js
@@ -0,0 +1,22 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax
+
+let hex_b = 0x0b;
+let hex_d = 0x0d;
+let hex_20 = 0x20;
+let hex_52 = 0x52;
+let hex_fe = 0xfe;
+
+function f(a) {
+ let unused = [ a / 8, ...[ ...[ ...[], a / 8, ...[ 7, hex_fe, a, 0, 0, hex_20,
+ 6, hex_52, hex_d, 0, hex_b], 0, hex_b], hex_b]];
+}
+
+%PrepareFunctionForOptimization(f)
+f(64)
+f(64);
+%OptimizeFunctionOnNextCall(f);
+f(64);
diff --git a/deps/v8/test/mjsunit/regress/regress-752764.js b/deps/v8/test/mjsunit/regress/regress-752764.js
index 30ab7b2a6d..106d9edd87 100644
--- a/deps/v8/test/mjsunit/regress/regress-752764.js
+++ b/deps/v8/test/mjsunit/regress/regress-752764.js
@@ -3,6 +3,8 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax --nostress-incremental-marking
+// Stress-testing this test is very slow and provides no useful coverage.
+// Flags: --nostress-opt --noalways-opt
// This test uses a lot of memory and fails with flaky OOM when run
// with --stress-incremental-marking on TSAN.
diff --git a/deps/v8/test/mjsunit/regress/regress-779407.js b/deps/v8/test/mjsunit/regress/regress-779407.js
index 140f7bdd74..71e57df9a7 100644
--- a/deps/v8/test/mjsunit/regress/regress-779407.js
+++ b/deps/v8/test/mjsunit/regress/regress-779407.js
@@ -10,4 +10,4 @@ for (var i = 0; i < 17; i++) {
} catch (e) {
}
}
-s.replace(/[a]/g);
+s.replace(/a/g);
diff --git a/deps/v8/test/mjsunit/regress/regress-9165.js b/deps/v8/test/mjsunit/regress/regress-9165.js
index 1de6e9db2a..1709b488fd 100644
--- a/deps/v8/test/mjsunit/regress/regress-9165.js
+++ b/deps/v8/test/mjsunit/regress/regress-9165.js
@@ -14,12 +14,12 @@ let kSig_r_i = makeSig([kWasmI32], [kWasmAnyRef]);
builder.addFunction("merge", kSig_r_i)
.addLocals({anyref_count: 1, anyfunc_count: 1})
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprI32Eqz,
kExprIf, kWasmAnyRef,
- kExprGetLocal, 1,
+ kExprLocalGet, 1,
kExprElse,
- kExprGetLocal, 2,
+ kExprLocalGet, 2,
kExprEnd,
]).exportFunc();
let instance = builder.instantiate();
@@ -33,12 +33,12 @@ let kSig_r_i = makeSig([kWasmI32], [kWasmAnyRef]);
builder.addFunction("merge", kSig_r_i)
.addLocals({anyfunc_count: 1})
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprI32Eqz,
kExprIf, kWasmAnyRef,
kExprRefNull,
kExprElse,
- kExprGetLocal, 1,
+ kExprLocalGet, 1,
kExprEnd,
]).exportFunc();
let instance = builder.instantiate();
diff --git a/deps/v8/test/mjsunit/regress/regress-9832.js b/deps/v8/test/mjsunit/regress/regress-9832.js
new file mode 100644
index 0000000000..41a8c38f0d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-9832.js
@@ -0,0 +1,35 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-eh
+
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+(function TestRegress9832() {
+ let builder = new WasmModuleBuilder();
+ let f = builder.addFunction("f", kSig_i_i)
+ .addBody([
+ kExprLocalGet, 0,
+ kExprLocalGet, 0,
+ kExprI32Add,
+ ]).exportFunc();
+ builder.addFunction("main", kSig_i_i)
+ .addLocals({except_count: 1})
+ .addBody([
+ kExprTry, kWasmStmt,
+ kExprLocalGet, 0,
+ kExprCallFunction, f.index,
+ kExprCallFunction, f.index,
+ kExprLocalSet, 0,
+ kExprCatch,
+ kExprDrop,
+ kExprLocalGet, 0,
+ kExprCallFunction, f.index,
+ kExprLocalSet, 0,
+ kExprEnd,
+ kExprLocalGet, 0,
+ ]).exportFunc();
+ let instance = builder.instantiate();
+ assertEquals(92, instance.exports.main(23));
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-9894.js b/deps/v8/test/mjsunit/regress/regress-9894.js
new file mode 100644
index 0000000000..0a7bf5d456
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-9894.js
@@ -0,0 +1,48 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function frozen() {
+ const ary = [1.1]
+ Object.defineProperty(ary, 0, {get:run_it} );
+
+ // v8::internal::Runtime_ArrayIncludes_Slow.
+ ary.includes();
+
+ function run_it(el) {
+ ary.length = 0;
+ ary[0] = 1.1;
+ Object.freeze(ary);
+ return 2.2;
+ }
+})();
+
+(function seal() {
+ const ary = [1.1]
+ Object.defineProperty(ary, 0, {get:run_it} );
+
+ // v8::internal::Runtime_ArrayIncludes_Slow.
+ ary.includes();
+
+ function run_it(el) {
+ ary.length = 0;
+ ary[0] = 1.1;
+ Object.seal(ary);
+ return 2.2;
+ }
+})();
+
+(function preventExtensions() {
+ const ary = [1.1]
+ Object.defineProperty(ary, 0, {get:run_it} );
+
+ // v8::internal::Runtime_ArrayIncludes_Slow.
+ ary.includes();
+
+ function run_it(el) {
+ ary.length = 0;
+ ary[0] = 1.1;
+ Object.preventExtensions(ary);
+ return 2.2;
+ }
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-996161.js b/deps/v8/test/mjsunit/regress/regress-996161.js
new file mode 100644
index 0000000000..dada3f47bd
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-996161.js
@@ -0,0 +1,43 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function checkOwnProperties(v, count) {
+ var properties = Object.getOwnPropertyNames(v);
+ assertEquals(properties.length, count);
+}
+
+
+function testStoreNoFeedback() {
+ arr = new Int32Array(10);
+ function f(a) { a["-1"] = 15; }
+
+ for (var i = 0; i < 3; i++) {
+ arr.__defineGetter__("x", function() { });
+ checkOwnProperties(arr, 11);
+ f(arr);
+ }
+}
+testStoreNoFeedback();
+
+function testStoreGeneric() {
+ arr = new Int32Array(10);
+ var index = "-1";
+ function f1(a) { a[index] = 15; }
+ %EnsureFeedbackVectorForFunction(f1);
+
+ // Make a[index] in f1 megamorphic
+ f1({a: 1});
+ f1({b: 1});
+ f1({c: 1});
+ f1({d: 1});
+
+ for (var i = 0; i < 3; i++) {
+ arr.__defineGetter__("x", function() { });
+ checkOwnProperties(arr, 11);
+ f1(arr);
+ }
+}
+testStoreGeneric();
diff --git a/deps/v8/test/mjsunit/regress/regress-997485.js b/deps/v8/test/mjsunit/regress/regress-997485.js
new file mode 100644
index 0000000000..bcc1664222
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-997485.js
@@ -0,0 +1,127 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function doubleToTaggedWithTaggedValueStoresCorrectly() {
+
+ function setX_Double(o) { o.x = 4.2; }
+
+ function foo() {
+ // o.x starts off as Double
+ const o = { x: 0.1 };
+
+ // Write to it a few times with setX_Double, to make sure setX_Double has
+ // Double feedback.
+ setX_Double(o);
+ setX_Double(o);
+
+ // Transition o.x to Tagged.
+ o.x = {};
+
+ // setX_Double will still have Double feedback, so make sure it works with
+ // the new Tagged representation o.x.
+ setX_Double(o);
+
+ assertEquals(o.x, 4.2);
+ }
+
+ %EnsureFeedbackVectorForFunction(setX_Double);
+ foo();
+
+})();
+
+(function doubleToTaggedWithDoubleValueDoesNotMutate() {
+
+ function setX_Double(o) { o.x = 4.2; }
+
+ function foo() {
+ // o.x starts off as Double
+ const o = { x: 0.1 };
+
+ // Write to it a few times with setX_Double, to make sure setX_Double has
+ // Double feedback.
+ setX_Double(o);
+ setX_Double(o);
+
+ // Transition o.x to Tagged.
+ o.x = {};
+
+ // Write the HeapNumber val to o.x.
+ const val = 1.25;
+ o.x = val;
+
+ // setX_Double will still have Double feedback, which expects to be able to
+ // mutate o.x's HeapNumber, so make sure it does not mutate val.
+ setX_Double(o);
+
+ assertEquals(o.x, 4.2);
+ assertNotEquals(val, 4.2);
+ }
+
+ %EnsureFeedbackVectorForFunction(setX_Double);
+ foo();
+
+})();
+
+(function doubleToTaggedWithTaggedValueStoresSmiCorrectly() {
+
+ function setX_Smi(o) { o.x = 42; }
+
+ function foo() {
+ // o.x starts off as Double
+ const o = { x: 0.1 };
+
+ // Write to it a few times with setX_Smi, to make sure setX_Smi has
+ // Double feedback.
+ setX_Smi(o);
+ setX_Smi(o);
+
+ // Transition o.x to Tagged.
+ o.x = {};
+
+ // setX_Smi will still have Double feedback, so make sure it works with
+ // the new Tagged representation o.x.
+ setX_Smi(o);
+
+ assertEquals(o.x, 42);
+ }
+
+ %EnsureFeedbackVectorForFunction(setX_Smi);
+ foo();
+
+})();
+
+(function doubleToTaggedWithSmiValueDoesNotMutate() {
+
+ function setX_Smi(o) { o.x = 42; }
+
+ function foo() {
+ // o.x starts off as Double
+ const o = { x: 0.1 };
+
+ // Write to it a few times with setX_Smi, to make sure setX_Smi has
+ // Double feedback.
+ setX_Smi(o);
+ setX_Smi(o);
+
+ // Transition o.x to Tagged.
+ o.x = {};
+
+ // Write the HeapNumber val to o.x.
+ const val = 1.25;
+ o.x = val;
+
+ // setX_Smi will still have Double feedback, which expects to be able to
+ // mutate o.x's HeapNumber, so make sure it does not mutate val.
+ setX_Smi(o);
+
+ assertEquals(o.x, 42);
+ assertNotEquals(val, 42);
+ }
+
+ %EnsureFeedbackVectorForFunction(setX_Smi);
+ foo();
+
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-997989.js b/deps/v8/test/mjsunit/regress/regress-997989.js
new file mode 100644
index 0000000000..f049a31724
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-997989.js
@@ -0,0 +1,27 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax
+
+// A function with a for-in loop, that will be optimized.
+function foo(o) {
+ for (var i in o) {
+ return o[i];
+ }
+}
+
+var o = { x: 0.5 };
+
+// Warm up foo with Double values in the enum cache.
+%PrepareFunctionForOptimization(foo);
+assertEquals(foo(o), 0.5);
+assertEquals(foo(o), 0.5);
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(foo(o), 0.5);
+
+// Transition the double field to a tagged field
+o.x = "abc";
+
+// Make sure that the optimized code correctly loads the tagged field.
+assertEquals(foo(o), "abc");
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1002628.js b/deps/v8/test/mjsunit/regress/regress-crbug-1002628.js
new file mode 100644
index 0000000000..8be7e8687d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1002628.js
@@ -0,0 +1,22 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --always-opt
+
+"use strict";
+var __v_0 = {};
+try {
+ __v_0 = this;
+ Object.freeze(__v_0);
+}
+catch (e) {
+}
+
+function f() {
+ x = { [Symbol.toPrimitive]: () => FAIL };
+}
+try {
+ f()
+} catch (e) { }
+assertThrows(() => f(), ReferenceError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1003403.js b/deps/v8/test/mjsunit/regress/regress-crbug-1003403.js
new file mode 100644
index 0000000000..877b9c8c73
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1003403.js
@@ -0,0 +1,10 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --enable-lazy-source-positions --stress-lazy-source-positions
+// Flags: --no-lazy
+({ x: b = 0 }) => {
+ try { b; } catch (e) {}
+ function a() { b }
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1003732.js b/deps/v8/test/mjsunit/regress/regress-crbug-1003732.js
new file mode 100644
index 0000000000..5e2bbe7732
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1003732.js
@@ -0,0 +1,25 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function f_1() {
+ var v = new Array();
+ v[0] = 10;
+ return v;
+}
+
+function test() {
+ var setter_called = false;
+ // Turn array to NumberDictionary
+ Array.prototype[123456789] = 42;
+ assertEquals(f_1().length, 1);
+
+ // Reset to empty_slow_dictionary
+ Array.prototype.length = 0;
+
+ // This should reset the prototype validity cell.
+ Array.prototype.__defineSetter__("0", function() {setter_called = true});
+ f_1();
+ assertEquals(setter_called, true);
+}
+test();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1004037.js b/deps/v8/test/mjsunit/regress/regress-crbug-1004037.js
new file mode 100644
index 0000000000..cf7ba70458
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1004037.js
@@ -0,0 +1,23 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --always-opt
+
+__v_1 = {};
+__v_1.__defineGetter__('x', function () { });
+__proto__ = __v_1;
+function __f_4() {
+ __v_1 = {};
+}
+function __f_3() {
+ 'use strict';
+ x = 42;
+}
+__f_4()
+try {
+ __f_3();
+} catch (e) { }
+
+__proto__ = __v_1;
+assertThrows(() => __f_3(), ReferenceError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1004061.js b/deps/v8/test/mjsunit/regress/regress-crbug-1004061.js
new file mode 100644
index 0000000000..8b36d4d609
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1004061.js
@@ -0,0 +1,55 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function testPackedDoublesIncludes() {
+ arr = [1.5, 2.5];
+ arr.length = 0;
+ function f() {
+ return arr.includes(1);
+ };
+ %PrepareFunctionForOptimization(f);
+ assertEquals(f(), false);
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(f(), false);
+})();
+
+(function testHoleyDoublesIncludes() {
+ arr = [1.1];
+ arr[3]= 1.5;
+ arr.length = 0;
+ function f() {
+ return arr.includes(1);
+ };
+ %PrepareFunctionForOptimization(f);
+ assertEquals(f(), false);
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(f(), false);
+})();
+
+(function testPackedDoublesIndexOf() {
+ arr = [1.5, 2.5];
+ arr.length = 0;
+ function f() {
+ return arr.indexOf(1);
+ };
+ %PrepareFunctionForOptimization(f);
+ assertEquals(f(), -1);
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(f(), -1);
+})();
+
+(function testHoleyDoublesIndexOf() {
+ arr = [1.1];
+ arr[3]= 1.5;
+ arr.length = 0;
+ function f() {
+ return arr.indexOf(1);
+ };
+ %PrepareFunctionForOptimization(f);
+ assertEquals(f(), -1);
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(f(), -1);
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1006592.js b/deps/v8/test/mjsunit/regress/regress-crbug-1006592.js
new file mode 100644
index 0000000000..c051d0861a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1006592.js
@@ -0,0 +1,21 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function Module(stdlib) {
+ "use asm";
+ var fround = stdlib.Math.fround;
+ function f(a, b) {
+ a = +a;
+ b = +b;
+ return fround(a, b);
+ }
+ return { f: f };
+}
+
+var m = Module(this);
+assertEquals(23, m.f(23));
+assertEquals(42, m.f(42, 65));
+assertFalse(%IsAsmWasmCode(Module));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1008632.js b/deps/v8/test/mjsunit/regress/regress-crbug-1008632.js
new file mode 100644
index 0000000000..8b46baefa1
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1008632.js
@@ -0,0 +1,24 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --no-lazy-feedback-allocation
+
+var __v_9690 = function () {};
+try {
+ (function () {
+ __f_1653();
+ })()
+} catch (__v_9763) {
+}
+function __f_1653(__v_9774, __v_9775) {
+ try {
+ } catch (e) {}
+ __v_9774[__v_9775 + 4] = 2;
+}
+(function () {
+ %PrepareFunctionForOptimization(__f_1653);
+ __f_1653(__v_9690, true);
+ %OptimizeFunctionOnNextCall(__f_1653);
+ assertThrows(() => __f_1653(), TypeError);
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1012301-1.js b/deps/v8/test/mjsunit/regress/regress-crbug-1012301-1.js
new file mode 100644
index 0000000000..9c2f87c4fe
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1012301-1.js
@@ -0,0 +1,27 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function get() {
+ // Update the descriptor array now shared between the Foo map and the
+ // (Foo + c) map.
+ o1.c = 10;
+ // Change the type of the field on the new descriptor array in-place to
+ // Tagged. If Object.assign has a cached descriptor array, then it will point
+ // to the old Foo map's descriptors, which still have .b as Double.
+ o2.b = "string";
+ return 1;
+}
+
+function Foo() {
+ Object.defineProperty(this, "a", {get, enumerable: true});
+ // Initialise Foo.b to have Double representation.
+ this.b = 1.5;
+}
+
+var o1 = new Foo();
+var o2 = new Foo();
+var target = {};
+Object.assign(target, o2);
+// Make sure that target has the right representation after assignment.
+assertEquals(target.b, "string");
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1012301.js b/deps/v8/test/mjsunit/regress/regress-crbug-1012301.js
new file mode 100644
index 0000000000..dc2ef92a6f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1012301.js
@@ -0,0 +1,23 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax
+
+function f(o) {
+ // The spread after the CloneObject IC shouldn't crash when trying to write a
+ // double value to a field created by CloneObject.
+ return {...o, ...{a:1.4}};
+}
+
+%EnsureFeedbackVectorForFunction(f);
+
+var o = {};
+// Train the CloneObject IC with a Double field.
+o.a = 1.5;
+f(o);
+f(o);
+f(o);
+// Change the source map to have a Tagged field.
+o.a = undefined;
+f(o);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1015567.js b/deps/v8/test/mjsunit/regress/regress-crbug-1015567.js
new file mode 100644
index 0000000000..520d6c539b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1015567.js
@@ -0,0 +1,5 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows('a ( { b() {} } [ [ 1 , c.d = 1 ] = 1.1 ] )', SyntaxError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1015945.js b/deps/v8/test/mjsunit/regress/regress-crbug-1015945.js
new file mode 100644
index 0000000000..a43736e7b5
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1015945.js
@@ -0,0 +1,20 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --expose-async-hooks
+
+async function* foo() {
+ await 1;
+ throw new Error();
+}
+
+(async () => {
+ for await (const x of foo()) { }
+})();
+
+async_hooks.createHook({
+ promiseResolve() {
+ throw new Error();
+ }
+}).enable()
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-729597.js b/deps/v8/test/mjsunit/regress/regress-crbug-729597.js
index b4c54e8e88..7656bc048a 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-729597.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-729597.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --verify-heap
+// Flags: --verify-heap --expose-gc
function __f_3(f) {
arguments.__defineGetter__('length', f);
@@ -13,6 +13,7 @@ function __f_4() { return "boom"; }
__v_4 = [];
__v_13 = "";
-for (var i = 0; i < 12800; ++i) {
+for (var i = 0; i < 128; ++i) {
__v_13 += __v_4.__proto__ = __f_3(__f_4);
}
+gc();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-808192.js b/deps/v8/test/mjsunit/regress/regress-crbug-808192.js
index f57d5fc3a6..af92ce7f35 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-808192.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-808192.js
@@ -3,6 +3,8 @@
// found in the LICENSE file.
// Flags: --expose-gc
+// Stress-testing this test is very slow and doesn't provide useful coverage.
+// Flags: --nostress-opt --noalways-opt
const f = eval(`(function f(i) {
if (i == 0) {
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-941743.js b/deps/v8/test/mjsunit/regress/regress-crbug-941743.js
index eaac4c4c4d..81416b8725 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-941743.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-941743.js
@@ -3,6 +3,8 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax --noenable-slow-asserts
+// This test triggers optimization manually, no stress mode necessary.
+// Flags: --nostress-opt --noalways-opt
// This call ensures that TurboFan won't inline array constructors.
Array(2 ** 30);
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-9758.js b/deps/v8/test/mjsunit/regress/regress-v8-9758.js
new file mode 100644
index 0000000000..7f9eab3339
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-9758.js
@@ -0,0 +1,9 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --throws
+
+// Can't put this in a try-catch as that changes the parsing so the crash
+// doesn't reproduce.
+((a = ((b = a) => {})()) => 1)();
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-9825.mjs b/deps/v8/test/mjsunit/regress/regress-v8-9825.mjs
new file mode 100644
index 0000000000..f8d0708848
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-9825.mjs
@@ -0,0 +1,11 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+async function foo() {
+ for (;;await[]) {
+ break;
+ }
+}
+
+foo();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-02256.js b/deps/v8/test/mjsunit/regress/wasm/regress-02256.js
index 199626b3c3..63da0cc10b 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-02256.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-02256.js
@@ -273,19 +273,19 @@ try {
function __f_16() {
var __v_1 = new WasmModuleBuilder();
__v_1.addFunction("grow_memory", kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprMemoryGrow])
+ .addBody([kExprLocalGet, 0, kExprMemoryGrow])
.exportFunc();
__v_1.addFunction("load", kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprI32LoadMem, 0, 0])
+ .addBody([kExprLocalGet, 0, kExprI32LoadMem, 0, 0])
.exportFunc();
__v_1.addFunction("store", kSig_i_ii)
- .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprI32StoreMem, 0, 0, kExprGetLocal, 1])
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1, kExprI32StoreMem, 0, 0, kExprLocalGet, 1])
.exportFunc();
__v_1.addFunction("load16", kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprI32LoadMem16U, 0, 0])
+ .addBody([kExprLocalGet, 0, kExprI32LoadMem16U, 0, 0])
.exportFunc();
__v_1.addFunction("store16", kSig_i_ii)
- .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprI32StoreMem16, 0, 0, kExprGetLocal, 1])
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1, kExprI32StoreMem16, 0, 0, kExprLocalGet, 1])
.exportFunc();
__v_1.__p_1551105852 = __v_1[getRandomProperty(__v_1, 1551105852)];
__v_1.__defineGetter__(getRandomProperty(__v_1, 348910887), function() {
@@ -294,10 +294,10 @@ function __f_16() {
return __v_1.__p_1551105852;
});
__v_1.addFunction("load8", kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprI32LoadMem8U, 0, 0])
+ .addBody([kExprLocalGet, 0, kExprI32LoadMem8U, 0, 0])
.exportFunc();
__v_1.addFunction("store8", kSig_i_ii)
- .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprI32StoreMem8, 0, 0, kExprGetLocal, 1])
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1, kExprI32StoreMem8, 0, 0, kExprLocalGet, 1])
.exportFunc();
return __v_1;
}
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1014798.js b/deps/v8/test/mjsunit/regress/wasm/regress-1014798.js
new file mode 100644
index 0000000000..98f0314b99
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1014798.js
@@ -0,0 +1,20 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addFunction('main', kSig_i_iii)
+ .addLocals({f32_count: 4})
+ .addLocals({i64_count: 1})
+ .addLocals({f32_count: 2})
+ .addBodyWithEnd([
+ kExprI64Const, 0,
+ kExprLocalGet, 3,
+ kExprI64SConvertF32,
+ kExprI64Ne,
+ kExprEnd, // @17
+ ]).exportFunc();
+const instance = builder.instantiate();
+assertEquals(0, instance.exports.main(1, 2, 3));
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1016515.js b/deps/v8/test/mjsunit/regress/wasm/regress-1016515.js
new file mode 100644
index 0000000000..f56579912d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1016515.js
@@ -0,0 +1,20 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-lazy-compilation
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+var builder = new WasmModuleBuilder();
+var func = builder.addFunction('func', kSig_i_v).addBody([kExprI32Const, 1]);
+var body = [];
+for (let i = 0; i < 200; ++i) {
+ body.push(kExprCallFunction, func.index);
+}
+for (let i = 1; i < 200; ++i) {
+ body.push(kExprI32Add);
+}
+builder.addFunction('test', kSig_i_v).addBody(body).exportFunc();
+var instance = builder.instantiate();
+instance.exports.test();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-648079.js b/deps/v8/test/mjsunit/regress/wasm/regress-648079.js
index fbb5414480..cf4bf1c698 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-648079.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-648079.js
@@ -8,7 +8,6 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
// Non-standard opcodes.
let kSig_s_v = makeSig([], [kWasmS128]);
-let kExprS128LoadMem = 0xc0;
(function() {
"use asm";
@@ -109,7 +108,7 @@ builder.addFunction("regression_648079", kSig_s_v)
kExprF32Min,
kExprI64GtU,
kExprBlock, 01, // @107 i32
- kExprTeeLocal,
+ kExprLocalTee,
kExprBlock, 01, // @111 i32
kExprBlock, 01, // @113 i32
kExprBlock, 01, // @115 i32
@@ -169,7 +168,7 @@ builder.addFunction("regression_648079", kSig_s_v)
kExprF64Sub,
kExprI32Const,
kExprUnreachable,
- kExprGetLocal,
+ kExprLocalGet,
kExprI64LoadMem32U,
kExprUnreachable,
kExprI64RemU,
@@ -273,7 +272,7 @@ builder.addFunction("regression_648079", kSig_s_v)
kExprF64Sub,
kExprI32Const,
kExprUnreachable,
- kExprGetLocal,
+ kExprLocalGet,
kExprI64LoadMem32U,
kExprUnreachable,
kExprUnreachable,
@@ -300,7 +299,7 @@ builder.addFunction("regression_648079", kSig_s_v)
kExprF64Sub,
kExprI32Const,
kExprUnreachable,
- kExprGetLocal,
+ kExprLocalGet,
kExprI64LoadMem32U,
kExprF64Min,
kExprF64Min,
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-689450.js b/deps/v8/test/mjsunit/regress/wasm/regress-689450.js
index bcd25387b4..a629766bce 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-689450.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-689450.js
@@ -9,7 +9,7 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
builder.addMemory(16, 32, false);
builder.addFunction('test', kSig_i_i)
.addBodyWithEnd([
- kExprGetLocal, 0x00,
+ kExprLocalGet, 0x00,
kExprI32Const, 0x29,
kExprI32Shl,
kExprI32Const, 0x18,
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-702460.js b/deps/v8/test/mjsunit/regress/wasm/regress-702460.js
index 21a84bcf28..3f1e11e393 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-702460.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-702460.js
@@ -4,10 +4,6 @@
load("test/mjsunit/wasm/wasm-module-builder.js");
-// Non-standard opcodes.
-let kSig_s_v = makeSig([], [kWasmS128]);
-let kExprS128LoadMem = 0xc0;
-
(function() {
"use asm";
var builder = new WasmModuleBuilder();
@@ -20,7 +16,7 @@ let kExprS128LoadMem = 0xc0;
kExprMemoryGrow, 0x00,
kExprMemoryGrow, 0x00,
kExprMemoryGrow, 0x00,
- kExprSetLocal, 0x00,
+ kExprLocalSet, 0x00,
kExprMemoryGrow, 0x00,
kExprMemoryGrow, 0x00,
kExprMemoryGrow, 0x00,
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-7049.js b/deps/v8/test/mjsunit/regress/wasm/regress-7049.js
index 6d2cd351fb..46dce4a871 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-7049.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-7049.js
@@ -20,7 +20,7 @@ let func1_sig = makeSig(new Array(8).fill(kWasmI32), [kWasmI32]);
let imp = builder1.addImport('q', 'gc', kSig_v_v);
let func1 = builder1.addFunction('func1', func1_sig)
.addBody([
- kExprGetLocal, 0, // -
+ kExprLocalGet, 0, // -
kExprCallFunction, imp
])
.exportFunc();
@@ -31,14 +31,14 @@ let builder2 = new WasmModuleBuilder();
let func1_imp = builder2.addImport('q', 'func1', func1_sig);
let func2 = builder2.addFunction('func2', kSig_i_i)
.addBody([
- kExprGetLocal, 0, // 1
- kExprGetLocal, 0, // 2
- kExprGetLocal, 0, // 3
- kExprGetLocal, 0, // 4
- kExprGetLocal, 0, // 5
- kExprGetLocal, 0, // 6
- kExprGetLocal, 0, // 7
- kExprGetLocal, 0, // 8
+ kExprLocalGet, 0, // 1
+ kExprLocalGet, 0, // 2
+ kExprLocalGet, 0, // 3
+ kExprLocalGet, 0, // 4
+ kExprLocalGet, 0, // 5
+ kExprLocalGet, 0, // 6
+ kExprLocalGet, 0, // 7
+ kExprLocalGet, 0, // 8
kExprCallFunction, func1_imp
])
.exportFunc();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-7353.js b/deps/v8/test/mjsunit/regress/wasm/regress-7353.js
index 81f45fe6a5..671da730fb 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-7353.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-7353.js
@@ -9,17 +9,17 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
builder.addMemory(16, 32);
builder.addFunction('grow', kSig_i_i).addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprMemoryGrow, 0,
]).exportFunc();
builder.addFunction('main', kSig_i_i).addBody([
...wasmI32Const(0x41),
- kExprSetLocal, 0,
+ kExprLocalSet, 0,
// Enter loop, such that values are spilled to the stack.
kExprLoop, kWasmStmt,
kExprEnd,
// Reload value. This must be loaded as 32 bit value.
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprI32LoadMem, 0, 0,
]).exportFunc();
const instance = builder.instantiate();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-7366.js b/deps/v8/test/mjsunit/regress/wasm/regress-7366.js
index b5cae8daa4..b5e4e2e2b6 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-7366.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-7366.js
@@ -7,22 +7,22 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
builder.addFunction(undefined, kSig_i_iii).addBody([
// Return the sum of all arguments.
- kExprGetLocal, 0, kExprGetLocal, 1, kExprGetLocal, 2, kExprI32Add, kExprI32Add
+ kExprLocalGet, 0, kExprLocalGet, 1, kExprLocalGet, 2, kExprI32Add, kExprI32Add
]);
const sig = builder.addType(kSig_i_iii);
builder.addFunction(undefined, kSig_i_iii)
.addBody([
...wasmI32Const(1), // i32.const 0x1
- kExprSetLocal, 0, // set_local 0
+ kExprLocalSet, 0, // set_local 0
...wasmI32Const(4), // i32.const 0x1
- kExprSetLocal, 1, // set_local 1
+ kExprLocalSet, 1, // set_local 1
...wasmI32Const(16), // i32.const 0x1
- kExprSetLocal, 2, // set_local 2
+ kExprLocalSet, 2, // set_local 2
kExprLoop, kWasmStmt, // loop
kExprEnd, // end
- kExprGetLocal, 0, // get_local 0
- kExprGetLocal, 1, // get_local 1
- kExprGetLocal, 2, // get_local 2
+ kExprLocalGet, 0, // get_local 0
+ kExprLocalGet, 1, // get_local 1
+ kExprLocalGet, 2, // get_local 2
kExprI32Const, 0, // i32.const 0 (func index)
kExprCallIndirect, sig, 0, // call indirect
])
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-739768.js b/deps/v8/test/mjsunit/regress/wasm/regress-739768.js
index 5fca49bc0f..0bd73223c9 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-739768.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-739768.js
@@ -11,7 +11,7 @@ builder0.setName('module_0');
let sig_index = builder0.addType(kSig_i_v);
builder0.addFunction('main', kSig_i_i)
.addBody([
- kExprGetLocal, 0, // --
+ kExprLocalGet, 0, // --
kExprCallIndirect, sig_index, kTableZero
]) // --
.exportAs('main');
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-7422.js b/deps/v8/test/mjsunit/regress/wasm/regress-7422.js
index 71e1eb89bd..6bf737857a 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-7422.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-7422.js
@@ -6,18 +6,18 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
var builder = new WasmModuleBuilder();
sig = makeSig([kWasmI32, kWasmI32, kWasmI32, kWasmI32, kWasmI32], [kWasmI32]);
-builder.addFunction(undefined, sig).addBody([kExprGetLocal, 4]);
+builder.addFunction(undefined, sig).addBody([kExprLocalGet, 4]);
builder.addMemory(16, 32);
builder.addFunction('main', sig)
.addBody([
- kExprI32Const, 0, kExprSetLocal, 0,
+ kExprI32Const, 0, kExprLocalSet, 0,
// Compute five arguments to the function call.
kExprI32Const, 0, kExprI32Const, 0, kExprI32Const, 0, kExprI32Const, 0,
- kExprGetLocal, 4, kExprI32Const, 1, kExprI32Add,
+ kExprLocalGet, 4, kExprI32Const, 1, kExprI32Add,
// Now some intermediate computation to force the arguments to be spilled
// to the stack:
- kExprGetLocal, 0, kExprI32Const, 1, kExprI32Add, kExprGetLocal, 1,
- kExprGetLocal, 1, kExprI32Add, kExprI32Add, kExprDrop,
+ kExprLocalGet, 0, kExprI32Const, 1, kExprI32Add, kExprLocalGet, 1,
+ kExprLocalGet, 1, kExprI32Add, kExprI32Add, kExprDrop,
// Now call the function.
kExprCallFunction, 0
])
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-7508.js b/deps/v8/test/mjsunit/regress/wasm/regress-7508.js
index 10ce500a44..1c02060957 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-7508.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-7508.js
@@ -15,6 +15,6 @@ builder.addFunction(undefined, kSig_v_v).addLocals({i64_count: 1}).addBody([
kExprI32Const, 0, // i32.const
kExprEnd, // end
kExprBrIf, 0, // br_if depth=0
- kExprSetLocal, 0, // set_local 0
+ kExprLocalSet, 0, // set_local 0
]);
builder.instantiate();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-752423.js b/deps/v8/test/mjsunit/regress/wasm/regress-752423.js
index 938ecbf252..304dbd955d 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-752423.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-752423.js
@@ -13,7 +13,7 @@ builder.addImportedTable("x", "table", 1, 10000000);
builder.addFunction("main", kSig_i_i)
.addBody([
kExprI32Const, 0,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprCallIndirect, 0, kTableZero])
.exportAs("main");
let module = new WebAssembly.Module(builder.toBuffer());
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-7565.js b/deps/v8/test/mjsunit/regress/wasm/regress-7565.js
index c9d4e2ca88..3b97fe8615 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-7565.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-7565.js
@@ -9,7 +9,7 @@ sig0 = makeSig([], [kWasmI32]);
builder.addFunction(undefined, sig0).addLocals({i64_count: 1}).addBody([
kExprLoop, kWasmI32, // loop i32
kExprF32Const, 0x00, 0x00, 0x00, 0x00, // f32.const 0 --> f32:0
- kExprGetLocal, 0x00, // get_local 0 --> i64:0
+ kExprLocalGet, 0x00, // get_local 0 --> i64:0
kExprF32SConvertI64, // f32.sconvert/i64 --> f32:0
kExprF32Ge, // f32.ge --> i32:1
kExprEnd, // end
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-7579.js b/deps/v8/test/mjsunit/regress/wasm/regress-7579.js
index 876a76cad9..da774b00f1 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-7579.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-7579.js
@@ -51,7 +51,7 @@ const builder2 = new WasmModuleBuilder();
sig0 = makeSig([], [kWasmI32]);
builder2.addFunction(undefined, sig0).addLocals({i64_count: 1}).addBody([
kExprLoop, kWasmI32, // loop i32
- kExprGetLocal, 0, // get_local 3
+ kExprLocalGet, 0, // get_local 3
kExprF32SConvertI64, // f32.sconvert/i64
kExprI32ReinterpretF32, // i32.reinterpret/f32
kExprEnd // end
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-763697.js b/deps/v8/test/mjsunit/regress/wasm/regress-763697.js
index c831a55fba..5f36d42c8d 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-763697.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-763697.js
@@ -8,7 +8,7 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
let builder = new WasmModuleBuilder();
builder.addFunction("main", kSig_i_i)
- .addBody([kExprGetLocal, 0])
+ .addBody([kExprLocalGet, 0])
.addLocals({s128_count: 1});
assertFalse(WebAssembly.validate(builder.toBuffer()));
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-771243.js b/deps/v8/test/mjsunit/regress/wasm/regress-771243.js
index 81b9e8f2a9..c06adebd76 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-771243.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-771243.js
@@ -25,7 +25,7 @@ function __f_1() {
__v_21 = __f_1(__v_18 = false, __v_25 = kSig_i_i);
__v_21.addFunction('plus_one', kSig_i_i)
.addBody([
- kExprGetLocal, 0, // -
+ kExprLocalGet, 0, // -
kExprCallFunction, __v_29 ])
.exportFunc();
__v_32 =
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-772332.js b/deps/v8/test/mjsunit/regress/wasm/regress-772332.js
index e8547c8175..54676b198e 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-772332.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-772332.js
@@ -19,7 +19,7 @@ function __f_15356(__v_50316, __v_50317) {
}
(function __f_15357() {
let __v_50320 = __f_15356(__v_50350 = false, __v_50351 = kSig_i_i);
- __v_50320.addFunction('plus_one', kSig_i_i).addBody([kExprGetLocal, 0, kExprCallFunction, __v_50315, kExprI32Const, kExprI32Add, kExprReturn]).exportFunc();
+ __v_50320.addFunction('plus_one', kSig_i_i).addBody([kExprLocalGet, 0, kExprCallFunction, __v_50315, kExprI32Const, kExprI32Add, kExprReturn]).exportFunc();
let __v_50321 = __f_15356();
let __v_50324 = __v_50321.instantiate();
let __v_50325 = __v_50320.instantiate({
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-7785.js b/deps/v8/test/mjsunit/regress/wasm/regress-7785.js
index 72638b1685..9f06ae5f10 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-7785.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-7785.js
@@ -24,7 +24,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
(function testAnyRefIsNull() {
const builder = new WasmModuleBuilder();
builder.addFunction('main', kSig_i_r)
- .addBody([kExprGetLocal, 0, kExprRefIsNull])
+ .addBody([kExprLocalGet, 0, kExprRefIsNull])
.exportFunc();
var wire_bytes = builder.toBuffer();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-791810.js b/deps/v8/test/mjsunit/regress/wasm/regress-791810.js
index 73b47bdd78..3daeff9e15 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-791810.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-791810.js
@@ -7,7 +7,7 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
builder.addFunction('test', kSig_i_i)
.addBody([
- kExprGetLocal, 0x00, // get_local 0
+ kExprLocalGet, 0x00, // get_local 0
kExprBlock, kWasmStmt, // block
kExprBr, 0x00, // br depth=0
kExprEnd, // end
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-793551.js b/deps/v8/test/mjsunit/regress/wasm/regress-793551.js
index 657b2c0013..ac2b34019e 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-793551.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-793551.js
@@ -8,8 +8,8 @@ const builder = new WasmModuleBuilder();
builder.addFunction('test', kSig_i_i)
.addBody([
// body:
- kExprGetLocal, 0, // get_local 0
- kExprGetLocal, 0, // get_local 0
+ kExprLocalGet, 0, // get_local 0
+ kExprLocalGet, 0, // get_local 0
kExprLoop, kWasmStmt, // loop
kExprBr, 0, // br depth=0
kExprEnd, // end
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-801785.js b/deps/v8/test/mjsunit/regress/wasm/regress-801785.js
index 105fd4bc38..7c68a0d593 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-801785.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-801785.js
@@ -10,11 +10,11 @@ const builder = new WasmModuleBuilder();
builder.addMemory(8, 16);
builder.addFunction(undefined, kSig_i_i).addBody([
// wasm to wasm call.
- kExprGetLocal, 0, kExprCallFunction, 0x1
+ kExprLocalGet, 0, kExprCallFunction, 0x1
]);
builder.addFunction(undefined, kSig_i_i).addBody([
// load from <get_local 0> to create trap code.
- kExprGetLocal, 0, kExprI32LoadMem, 0,
+ kExprLocalGet, 0, kExprI32LoadMem, 0,
// unreachable to create a runtime call.
kExprUnreachable
]);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-802244.js b/deps/v8/test/mjsunit/regress/wasm/regress-802244.js
index aeaf850365..e212ec05d1 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-802244.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-802244.js
@@ -8,10 +8,10 @@ const builder = new WasmModuleBuilder();
builder.addFunction(undefined, kSig_v_iii).addBody([
kExprI32Const, 0x41, // i32.const 0x41
kExprLoop, 0x7c, // loop f64
- kExprGetLocal, 0x00, // get_local 0
- kExprGetLocal, 0x01, // get_local 1
+ kExprLocalGet, 0x00, // get_local 0
+ kExprLocalGet, 0x01, // get_local 1
kExprBrIf, 0x01, // br_if depth=1
- kExprGetLocal, 0x00, // get_local 0
+ kExprLocalGet, 0x00, // get_local 0
kExprI32Rol, // i32.rol
kExprBrIf, 0x00, // br_if depth=0
kExprUnreachable, // unreachable
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-8059.js b/deps/v8/test/mjsunit/regress/wasm/regress-8059.js
index 78ee6bd1d2..4ee9cd3c43 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-8059.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-8059.js
@@ -9,7 +9,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
(function TestPostModule() {
let builder = new WasmModuleBuilder();
builder.addFunction("add", kSig_i_ii)
- .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprI32Add])
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1, kExprI32Add])
.exportFunc();
let module = builder.toModule();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-808848.js b/deps/v8/test/mjsunit/regress/wasm/regress-808848.js
index 57920de09d..269489059f 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-808848.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-808848.js
@@ -27,11 +27,11 @@ function varuint32(val) {
let body = [];
for (let i = 0; i < kNumLocals; ++i) {
- body.push(kExprCallFunction, 0, kExprSetLocal, ...varuint32(i));
+ body.push(kExprCallFunction, 0, kExprLocalSet, ...varuint32(i));
}
for (let i = 0; i < kNumLocals; ++i) {
- body.push(kExprGetLocal, ...varuint32(i), kExprCallFunction, 1);
+ body.push(kExprLocalGet, ...varuint32(i), kExprCallFunction, 1);
}
let builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-808980.js b/deps/v8/test/mjsunit/regress/wasm/regress-808980.js
index d78c07f36c..6487a35cd3 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-808980.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-808980.js
@@ -10,7 +10,7 @@ let kTableSize = 3;
var builder = new WasmModuleBuilder();
var sig_index1 = builder.addType(kSig_i_v);
builder.addFunction('main', kSig_i_ii).addBody([
- kExprGetLocal,
+ kExprLocalGet,
0,
kExprCallIndirect,
sig_index1,
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-817380.js b/deps/v8/test/mjsunit/regress/wasm/regress-817380.js
index c7748d8904..23ab2a5c91 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-817380.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-817380.js
@@ -8,7 +8,7 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
const builder1 = new WasmModuleBuilder();
builder1.addFunction('mul', kSig_i_ii)
- .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprI32Mul])
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1, kExprI32Mul])
.exportFunc();
const mul = builder1.instantiate().exports.mul;
const table = new WebAssembly.Table({
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-834619.js b/deps/v8/test/mjsunit/regress/wasm/regress-834619.js
index 1062d5547a..af7043904e 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-834619.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-834619.js
@@ -29,7 +29,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
builder.addTable(kWasmAnyFunc, 4);
builder.addFunction("main", kSig_i_i)
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprCallIndirect, 0, kTableZero
])
.exportFunc();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-834624.js b/deps/v8/test/mjsunit/regress/wasm/regress-834624.js
index 45af23cde2..3e3548ed32 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-834624.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-834624.js
@@ -14,7 +14,7 @@ let instance;
let module = new WasmModuleBuilder();
module.addImport('mod', 'func', kSig_v_i);
module.addFunction('main', kSig_v_i)
- .addBody([kExprGetLocal, 0, kExprCallFunction, 0])
+ .addBody([kExprLocalGet, 0, kExprCallFunction, 0])
.exportFunc();
instance = module.instantiate({
mod: {
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-8505.js b/deps/v8/test/mjsunit/regress/wasm/regress-8505.js
index b1fdedfc93..c1becbe454 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-8505.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-8505.js
@@ -171,8 +171,8 @@ function wasmBinop(name, sig) {
builder.addImport('Math', name, sig_index);
builder.addFunction('main', sig_index)
.addBody([
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
kExprCallFunction, 0
]) // --
.exportAs('main');
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-854011.js b/deps/v8/test/mjsunit/regress/wasm/regress-854011.js
index b0356a873f..00cfe655cb 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-854011.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-854011.js
@@ -9,14 +9,14 @@ builder.addFunction('main', kSig_d_d)
.addBody([
// Call with param 0 (converted to i64), to fill the stack with non-zero
// values.
- kExprGetLocal, 0, kExprI64SConvertF64, // arg 0
- kExprGetLocal, 0, kExprI64SConvertF64, // arg 1
- kExprGetLocal, 0, kExprI64SConvertF64, // arg 2
- kExprGetLocal, 0, kExprI64SConvertF64, // arg 3
- kExprGetLocal, 0, kExprI64SConvertF64, // arg 4
- kExprGetLocal, 0, kExprI64SConvertF64, // arg 5
- kExprGetLocal, 0, kExprI64SConvertF64, // arg 6
- kExprGetLocal, 0, kExprI64SConvertF64, // arg 7
+ kExprLocalGet, 0, kExprI64SConvertF64, // arg 0
+ kExprLocalGet, 0, kExprI64SConvertF64, // arg 1
+ kExprLocalGet, 0, kExprI64SConvertF64, // arg 2
+ kExprLocalGet, 0, kExprI64SConvertF64, // arg 3
+ kExprLocalGet, 0, kExprI64SConvertF64, // arg 4
+ kExprLocalGet, 0, kExprI64SConvertF64, // arg 5
+ kExprLocalGet, 0, kExprI64SConvertF64, // arg 6
+ kExprLocalGet, 0, kExprI64SConvertF64, // arg 7
kExprCallFunction, 1, // call #1
// Now call with 0 constants.
// The bug was that they were written out as i32 values, thus the upper 32
@@ -36,7 +36,7 @@ builder.addFunction('main', kSig_d_d)
.exportFunc();
builder.addFunction(undefined, makeSig(new Array(8).fill(kWasmI64), [kWasmF64]))
.addBody([
- kExprGetLocal, 7, // get_local 7 (last parameter)
+ kExprLocalGet, 7, // get_local 7 (last parameter)
kExprF64SConvertI64, // f64.convert_s/i64
]);
const instance = builder.instantiate();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-854050.js b/deps/v8/test/mjsunit/regress/wasm/regress-854050.js
index d6c4829acd..7130595870 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-854050.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-854050.js
@@ -8,18 +8,18 @@ const builder = new WasmModuleBuilder();
builder.addFunction(undefined, makeSig([kWasmI32, kWasmF32], []))
.addLocals({i32_count: 7})
.addBody([
- kExprGetLocal, 0, // get_local
+ kExprLocalGet, 0, // get_local
kExprI32Const, 0, // i32.const 0
kExprIf, kWasmStmt, // if
kExprUnreachable, // unreachable
kExprEnd, // end if
- kExprGetLocal, 4, // get_local
- kExprTeeLocal, 8, // tee_local
+ kExprLocalGet, 4, // get_local
+ kExprLocalTee, 8, // tee_local
kExprBrIf, 0, // br_if depth=0
- kExprTeeLocal, 7, // tee_local
- kExprTeeLocal, 0, // tee_local
- kExprTeeLocal, 2, // tee_local
- kExprTeeLocal, 8, // tee_local
+ kExprLocalTee, 7, // tee_local
+ kExprLocalTee, 0, // tee_local
+ kExprLocalTee, 2, // tee_local
+ kExprLocalTee, 8, // tee_local
kExprDrop, // drop
kExprLoop, kWasmStmt, // loop
kExprEnd, // end loop
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-864509.js b/deps/v8/test/mjsunit/regress/wasm/regress-864509.js
index 19e3bfcfb8..45e9e0b898 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-864509.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-864509.js
@@ -10,7 +10,7 @@ const builder = new WasmModuleBuilder();
builder.addMemory(1, 1);
// First function is Liftoff. The first parameter is used as memory offset.
builder.addFunction(undefined, kSig_v_i).addBody([
- kExprGetLocal, 0, // get_local 0
+ kExprLocalGet, 0, // get_local 0
kExprI32Const, 0, // i32.const 0
kExprI32StoreMem, 0, 0, // i32.store offset=0
]);
@@ -19,7 +19,7 @@ builder.addFunction(undefined, kSig_v_i).addBody([
// is loaded as 64-bit value on x64.
builder.addFunction(undefined, makeSig(new Array(6).fill(kWasmI32), []))
.addBody([
- kExprGetLocal, 5, // get_local 5
+ kExprLocalGet, 5, // get_local 5
kExprCallFunction, 0 // call 0
]);
// The third function is Liftoff again. A value is spilled on the stack as i32,
@@ -27,8 +27,8 @@ builder.addFunction(undefined, makeSig(new Array(6).fill(kWasmI32), []))
// copied on the stack, even though just 32-bit were written before. Hence, the
// stack slot is not zero-extended.
const gen_i32_code = [
- kExprTeeLocal, 0, // tee_local 0
- kExprGetLocal, 0, // get_local 0
+ kExprLocalTee, 0, // tee_local 0
+ kExprLocalGet, 0, // get_local 0
kExprI32Const, 1, // i32.const 1
kExprI32Add // i32.add --> 2nd param
];
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-894307.js b/deps/v8/test/mjsunit/regress/wasm/regress-894307.js
index f40388fcb4..a9a3595fbc 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-894307.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-894307.js
@@ -8,8 +8,8 @@ const builder = new WasmModuleBuilder();
const sig = makeSig([kWasmI32, kWasmI64, kWasmI64], [kWasmI64]);
builder.addFunction(undefined, sig)
.addBody([
- kExprGetLocal, 2,
- kExprGetLocal, 1,
+ kExprLocalGet, 2,
+ kExprLocalGet, 1,
kExprI64Shl,
]);
builder.instantiate();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-910824.js b/deps/v8/test/mjsunit/regress/wasm/regress-910824.js
index b795425b1f..6101f8ca81 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-910824.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-910824.js
@@ -11,26 +11,26 @@ builder.addType(makeSig([kWasmI32, kWasmF32, kWasmF32, kWasmF64], [kWasmI32]));
builder.addFunction(undefined, 0 /* sig */)
.addLocals({i32_count: 504})
.addBody([
-kExprGetGlobal, 0x00,
-kExprSetLocal, 0x04,
-kExprGetLocal, 0x04,
+kExprGlobalGet, 0x00,
+kExprLocalSet, 0x04,
+kExprLocalGet, 0x04,
kExprI32Const, 0x01,
kExprI32Sub,
-kExprGetGlobal, 0x00,
+kExprGlobalGet, 0x00,
kExprI32Const, 0x00,
kExprI32Eqz,
-kExprGetGlobal, 0x00,
+kExprGlobalGet, 0x00,
kExprI32Const, 0x01,
kExprI32Const, 0x01,
kExprI32Sub,
-kExprGetGlobal, 0x00,
+kExprGlobalGet, 0x00,
kExprI32Const, 0x00,
kExprI32Eqz,
-kExprGetGlobal, 0x00,
+kExprGlobalGet, 0x00,
kExprI32Const, 0x00,
kExprI32Const, 0x01,
kExprI32Sub,
-kExprGetGlobal, 0x01,
+kExprGlobalGet, 0x01,
kExprUnreachable,
]);
builder.instantiate();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-917412.js b/deps/v8/test/mjsunit/regress/wasm/regress-917412.js
index b74572ac8a..4b9528ccf6 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-917412.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-917412.js
@@ -14,11 +14,11 @@ kExprIf, kWasmI32,
kExprElse,
kExprI32Const, 1,
kExprEnd,
-kExprTeeLocal, 0,
-kExprGetLocal, 0,
+kExprLocalTee, 0,
+kExprLocalGet, 0,
kExprLoop, kWasmStmt,
kExprI64Const, 0x80, 0x80, 0x80, 0x70,
- kExprSetLocal, 0x01,
+ kExprLocalSet, 0x01,
kExprI32Const, 0x00,
kExprIf, kWasmI32,
kExprI32Const, 0x00,
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-918284.js b/deps/v8/test/mjsunit/regress/wasm/regress-918284.js
index dadbf3f7ea..16de9caabd 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-918284.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-918284.js
@@ -14,7 +14,7 @@ builder.addFunction(undefined, kSig_i_i)
kExprElse, // @15
kExprI32Const, 1,
kExprEnd, // @18
- kExprTeeLocal, 0,
+ kExprLocalTee, 0,
kExprI32Popcnt
]);
builder.instantiate();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-918917.js b/deps/v8/test/mjsunit/regress/wasm/regress-918917.js
index f007957c6f..3660244cda 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-918917.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-918917.js
@@ -8,9 +8,9 @@ const builder = new WasmModuleBuilder();
builder.addFunction(undefined, kSig_v_v)
.addLocals({i32_count: 1}).addLocals({f32_count: 1}).addLocals({f64_count: 1})
.addBody([
-kExprGetLocal, 1,
-kExprGetLocal, 2,
-kExprGetLocal, 0,
+kExprLocalGet, 1,
+kExprLocalGet, 2,
+kExprLocalGet, 0,
kExprIf, kWasmI32,
kExprI32Const, 1,
kExprElse,
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-919308.js b/deps/v8/test/mjsunit/regress/wasm/regress-919308.js
index 8c454413e8..e2f0426702 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-919308.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-919308.js
@@ -8,23 +8,23 @@ const builder = new WasmModuleBuilder();
builder.addFunction(undefined, kSig_i_i)
.addLocals({i32_count: 5})
.addBody([
- kExprGetLocal, 0, // --> 1
+ kExprLocalGet, 0, // --> 1
kExprIf, kWasmI32,
- kExprGetLocal, 0, // --> 1
+ kExprLocalGet, 0, // --> 1
kExprElse,
kExprUnreachable,
kExprEnd,
kExprIf, kWasmI32,
- kExprGetLocal, 0, // --> 1
+ kExprLocalGet, 0, // --> 1
kExprElse,
kExprUnreachable,
kExprEnd,
kExprIf, kWasmI32,
kExprI32Const, 0,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprI32Sub, // --> -1
- kExprGetLocal, 0,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
+ kExprLocalGet, 0,
kExprI32Sub, // --> 0
kExprI32Sub, // --> -1
kExprElse,
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-919533.js b/deps/v8/test/mjsunit/regress/wasm/regress-919533.js
index 706d3cc7f4..1cc4b675c2 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-919533.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-919533.js
@@ -8,8 +8,8 @@ const builder = new WasmModuleBuilder();
builder.addFunction(undefined, kSig_v_v).addBody([]);
builder.addFunction(undefined, kSig_i_i)
.addBody([
- kExprGetLocal, 0,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
+ kExprLocalGet, 0,
// Stack now contains two copies of the first param register.
// Start a loop to create a merge point (values still in registers).
kExprLoop, kWasmStmt,
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-922432.js b/deps/v8/test/mjsunit/regress/wasm/regress-922432.js
index f6175b3a63..d5aee0d332 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-922432.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-922432.js
@@ -12,7 +12,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
.addLocals({except_count: 1})
.addBody([
kExprLoop, kWasmStmt,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprBrOnExn // Bytecode truncated here.
]).exportFunc();
fun.body.pop(); // Pop implicitly added kExprEnd from body.
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-922670.js b/deps/v8/test/mjsunit/regress/wasm/regress-922670.js
index 2988eddf30..96a17bebba 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-922670.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-922670.js
@@ -10,10 +10,10 @@ builder.addFunction(undefined, sig)
.addLocals({i64_count: 1})
.addBody([
kExprLoop, kWasmI32,
- kExprGetLocal, 1,
+ kExprLocalGet, 1,
kExprI64Const, 1,
kExprLoop, kWasmI32,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprI32Const, 1,
kExprI32Const, 1,
kExprIf, kWasmI32,
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-922933.js b/deps/v8/test/mjsunit/regress/wasm/regress-922933.js
index 4d44509598..6d0286d95a 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-922933.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-922933.js
@@ -14,30 +14,30 @@ builder.addFunction(undefined, sig)
kExprEnd,
kExprBlock, kWasmStmt,
kExprI32Const, 0x00,
- kExprSetLocal, 0x09,
+ kExprLocalSet, 0x09,
kExprI32Const, 0x00,
kExprIf, kWasmStmt,
kExprBlock, kWasmStmt,
kExprI32Const, 0x00,
- kExprSetLocal, 0x0a,
+ kExprLocalSet, 0x0a,
kExprBr, 0x00,
kExprEnd,
kExprBlock, kWasmStmt,
kExprBlock, kWasmStmt,
- kExprGetLocal, 0x00,
- kExprSetLocal, 0x12,
+ kExprLocalGet, 0x00,
+ kExprLocalSet, 0x12,
kExprBr, 0x00,
kExprEnd,
- kExprGetLocal, 0x16,
- kExprSetLocal, 0x0f,
- kExprGetLocal, 0x0f,
- kExprSetLocal, 0x17,
- kExprGetLocal, 0x0f,
- kExprSetLocal, 0x18,
- kExprGetLocal, 0x17,
- kExprGetLocal, 0x18,
+ kExprLocalGet, 0x16,
+ kExprLocalSet, 0x0f,
+ kExprLocalGet, 0x0f,
+ kExprLocalSet, 0x17,
+ kExprLocalGet, 0x0f,
+ kExprLocalSet, 0x18,
+ kExprLocalGet, 0x17,
+ kExprLocalGet, 0x18,
kExprI64ShrS,
- kExprSetLocal, 0x19,
+ kExprLocalSet, 0x19,
kExprUnreachable,
kExprEnd,
kExprUnreachable,
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-924843.js b/deps/v8/test/mjsunit/regress/wasm/regress-924843.js
index 0549a769fb..c77845af76 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-924843.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-924843.js
@@ -8,7 +8,7 @@ const builder = new WasmModuleBuilder();
const sig = builder.addType(makeSig([kWasmI32, kWasmI32, kWasmI32], [kWasmI32]));
builder.addFunction(undefined, sig)
.addBody([
- kExprGetLocal, 2,
+ kExprLocalGet, 2,
kExprIf, kWasmStmt,
kExprBlock, kWasmStmt
]);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-935138.js b/deps/v8/test/mjsunit/regress/wasm/regress-935138.js
index 20835428e3..dd585bb255 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-935138.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-935138.js
@@ -11,8 +11,8 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
binary.emit_header();
binary.emit_bytes([kTypeSectionCode, 4, 1, kWasmFunctionTypeForm, 0, 0]);
binary.emit_bytes([kFunctionSectionCode, 2, 1, 0]);
- binary.emit_bytes([kCodeSectionCode, 6, 1, 4, 0, kExprGetLocal, 0, kExprEnd]);
- binary.emit_bytes([kCodeSectionCode, 6, 1, 4, 0, kExprGetLocal, 0, kExprEnd]);
+ binary.emit_bytes([kCodeSectionCode, 6, 1, 4, 0, kExprLocalGet, 0, kExprEnd]);
+ binary.emit_bytes([kCodeSectionCode, 6, 1, 4, 0, kExprLocalGet, 0, kExprEnd]);
let buffer = binary.trunc_buffer();
assertPromiseResult(WebAssembly.compile(buffer), assertUnreachable,
e => assertInstanceof(e, WebAssembly.CompileError));
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-952342.js b/deps/v8/test/mjsunit/regress/wasm/regress-952342.js
index eb81f5a9c6..5e20860d4d 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-952342.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-952342.js
@@ -9,9 +9,9 @@ const memory = new WebAssembly.Memory({initial: 1});
let builder = new WasmModuleBuilder();
builder.addImportedMemory("imports", "mem", 1);
builder.addFunction("copy", kSig_v_iii)
- .addBody([kExprGetLocal, 0, // dst
- kExprGetLocal, 1, // src
- kExprGetLocal, 2, // size
+ .addBody([kExprLocalGet, 0, // dst
+ kExprLocalGet, 1, // src
+ kExprLocalGet, 2, // size
kNumericPrefix, kExprMemoryCopy, 0, 0]).exportAs("copy");
let instance = builder.instantiate({imports: {mem: memory}});
memory.grow(1);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-957405.js b/deps/v8/test/mjsunit/regress/wasm/regress-957405.js
index a83104297e..51adce7698 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-957405.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-957405.js
@@ -9,9 +9,9 @@ const memory = new WebAssembly.Memory({initial: 1});
let builder = new WasmModuleBuilder();
builder.addImportedMemory("imports", "mem");
builder.addFunction("fill", kSig_v_iii)
- .addBody([kExprGetLocal, 0, // dst
- kExprGetLocal, 1, // value
- kExprGetLocal, 2, // size
+ .addBody([kExprLocalGet, 0, // dst
+ kExprLocalGet, 1, // value
+ kExprLocalGet, 2, // size
kNumericPrefix, kExprMemoryFill, 0]).exportAs("fill");
let instance = builder.instantiate({imports: {mem: memory}});
memory.grow(1);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-968078.js b/deps/v8/test/mjsunit/regress/wasm/regress-968078.js
index 2935ea05e3..07081087fa 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-968078.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-968078.js
@@ -28,16 +28,16 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
builder.addFunction("foo", kSig_v_iii)
.addBody([].concat([
kExprBlock, kWasmStmt,
- kExprGetLocal, 0x2,
+ kExprLocalGet, 0x2,
kExprI32Const, 0x01,
kExprI32And,
// Generate a test branch (which has 32k limited reach).
kExprIf, kWasmStmt,
- kExprGetLocal, 0x0,
+ kExprLocalGet, 0x0,
kExprI32Const, 0x01,
kExprI32And,
kExprBrIf, 0x1,
- kExprGetLocal, 0x0,
+ kExprLocalGet, 0x0,
// Emit a br_table that is long enough to make the test branch go out of range.
], br_table(0x1, 9000, 0x00), [
kExprEnd,
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-9759.js b/deps/v8/test/mjsunit/regress/wasm/regress-9759.js
new file mode 100644
index 0000000000..9d1f86a48d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-9759.js
@@ -0,0 +1,26 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-wasm-tier-up --no-liftoff
+
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+// This constant was chosen as it is the smallest number of cases that still
+// triggers the input count overflow. The new limit put into place is smaller.
+const NUM_CASES = 0xfffd;
+
+(function TestBrTableTooLarge() {
+ let builder = new WasmModuleBuilder();
+ let cases = new Array(NUM_CASES).fill(0);
+ builder.addFunction('main', kSig_v_i)
+ .addBody([].concat([
+ kExprBlock, kWasmStmt,
+ kExprLocalGet, 0,
+ kExprBrTable], wasmSignedLeb(NUM_CASES),
+ cases, [0,
+ kExprEnd
+ ])).exportFunc();
+ assertThrows(() => new WebAssembly.Module(builder.toBuffer()),
+ WebAssembly.CompileError, /invalid table count/);
+})();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-crbug-1006631.js b/deps/v8/test/mjsunit/regress/wasm/regress-crbug-1006631.js
new file mode 100644
index 0000000000..ab555e4551
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-crbug-1006631.js
@@ -0,0 +1,7 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-type-reflection --trace-turbo-graph
+
+new WebAssembly.Function({ parameters: [], results: [] }, x => x);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-crbug-1007608.js b/deps/v8/test/mjsunit/regress/wasm/regress-crbug-1007608.js
index 37d5b2e4a2..279d2dbd06 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-crbug-1007608.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-crbug-1007608.js
@@ -13,7 +13,7 @@ let types = new Array(argc).fill(kWasmI32);
let sig = makeSig(types, []);
let body = [];
for (let i = 0; i < argc; ++i) {
- body.push(kExprGetLocal, i);
+ body.push(kExprLocalGet, i);
}
body.push(kExprCallFunction, 0);
builder.addImport('', 'f', sig);
diff --git a/deps/v8/test/mjsunit/string-replace-gc.js b/deps/v8/test/mjsunit/string-replace-gc.js
index 2f1efd8813..56b6a09da1 100644
--- a/deps/v8/test/mjsunit/string-replace-gc.js
+++ b/deps/v8/test/mjsunit/string-replace-gc.js
@@ -25,31 +25,22 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Regression test for the r1512 fix.
+// Regression test for the r1513 fix.
+
+// Flags: --allow-natives-syntax
var foo = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
+assertEquals(39, foo.length);
+
+for (var i = 0; i < 12; i++) {
+ foo = foo + foo;
+}
-foo = foo + foo;
-foo = foo + foo;
-foo = foo + foo;
-foo = foo + foo;
-foo = foo + foo;
-foo = foo + foo;
-foo = foo + foo;
-foo = foo + foo;
-foo = foo + foo;
-foo = foo + foo;
-foo = foo + foo;
-foo = foo + foo;
-foo = foo + foo;
-foo = foo + foo;
-foo = foo + foo;
-
-foo.replace(/[b]/, "c"); // Flatten foo.
+foo = %FlattenString(foo);
var moving_string = "b" + "c";
-var bar = foo.replace(/[a]/g, moving_string);
+var bar = foo.replace(/a/g, moving_string);
-print(bar.length);
+// 39 * 2^12 * 2
+assertEquals(319488, bar.length);
diff --git a/deps/v8/test/mjsunit/tools/compiler-trace-flags-wasm.js b/deps/v8/test/mjsunit/tools/compiler-trace-flags-wasm.js
index 2d7cd00ac3..7be5abb675 100644
--- a/deps/v8/test/mjsunit/tools/compiler-trace-flags-wasm.js
+++ b/deps/v8/test/mjsunit/tools/compiler-trace-flags-wasm.js
@@ -20,8 +20,8 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
let builder = new WasmModuleBuilder();
builder.addFunction("add", kSig_i_ii)
- .addBody([kExprGetLocal, 0,
- kExprGetLocal, 1,
+ .addBody([kExprLocalGet, 0,
+ kExprLocalGet, 1,
kExprI32Add])
.exportFunc();
diff --git a/deps/v8/test/mjsunit/unicodelctest-no-optimization.js b/deps/v8/test/mjsunit/unicodelctest-no-optimization.js
index 0b31c560e0..e56d690ed2 100644
--- a/deps/v8/test/mjsunit/unicodelctest-no-optimization.js
+++ b/deps/v8/test/mjsunit/unicodelctest-no-optimization.js
@@ -93,7 +93,6 @@ function fuzz() {
fuzz_index = 0;
seed = 49734321;
for (var i = 0; i < 1000; i++) {
- print(i);
var len = rand() & 0x1f;
var ranges = new Array(len);
var last = rand();
diff --git a/deps/v8/test/mjsunit/wasm/OWNERS b/deps/v8/test/mjsunit/wasm/OWNERS
index b6d75023d7..c400f97de0 100644
--- a/deps/v8/test/mjsunit/wasm/OWNERS
+++ b/deps/v8/test/mjsunit/wasm/OWNERS
@@ -1,3 +1,3 @@
ahaas@chromium.org
-clemensh@chromium.org
+clemensb@chromium.org
titzer@chromium.org
diff --git a/deps/v8/test/mjsunit/wasm/adapter-frame.js b/deps/v8/test/mjsunit/wasm/adapter-frame.js
index 55634163c6..a25e2aaf3b 100644
--- a/deps/v8/test/mjsunit/wasm/adapter-frame.js
+++ b/deps/v8/test/mjsunit/wasm/adapter-frame.js
@@ -28,7 +28,7 @@ function makeSelect(type, args, which) {
var params = [];
for (var i = 0; i < args; i++) params.push(type);
builder.addFunction("select", makeSig(params, [type]))
- .addBody([kExprGetLocal, which])
+ .addBody([kExprLocalGet, which])
.exportFunc();
return builder.instantiate().exports.select;
diff --git a/deps/v8/test/mjsunit/wasm/anyfunc.js b/deps/v8/test/mjsunit/wasm/anyfunc.js
index f0d587b25a..4a53a04468 100644
--- a/deps/v8/test/mjsunit/wasm/anyfunc.js
+++ b/deps/v8/test/mjsunit/wasm/anyfunc.js
@@ -10,7 +10,7 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
print(arguments.callee.name);
const builder = new WasmModuleBuilder();
builder.addFunction('main', kSig_a_a)
- .addBody([kExprGetLocal, 0])
+ .addBody([kExprLocalGet, 0])
.exportFunc();
const instance = builder.instantiate();
@@ -27,7 +27,7 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
const sig_index = builder.addType(kSig_v_a);
const imp_index = builder.addImport('q', 'func', sig_index);
builder.addFunction('main', sig_index)
- .addBody([kExprGetLocal, 0, kExprCallFunction, imp_index])
+ .addBody([kExprLocalGet, 0, kExprCallFunction, imp_index])
.exportFunc();
const main = builder.instantiate({q: {func: checkFunction}}).exports.main;
@@ -50,28 +50,28 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
builder.addFunction('main', ref_sig)
.addLocals({anyfunc_count: 10})
.addBody([
- kExprGetLocal, 0,
- kExprSetLocal, 1, // Set local
- kExprGetLocal, 0,
- kExprSetLocal, 2, // Set local
- kExprGetLocal, 0,
- kExprSetLocal, 3, // Set local
- kExprGetLocal, 0,
- kExprSetLocal, 4, // Set local
- kExprGetLocal, 0,
- kExprSetLocal, 5, // Set local
- kExprGetLocal, 0,
- kExprSetLocal, 6, // Set local
- kExprGetLocal, 0,
- kExprSetLocal, 7, // Set local
- kExprGetLocal, 0,
- kExprSetLocal, 8, // Set local
- kExprGetLocal, 0,
- kExprSetLocal, 9, // Set local
- kExprGetLocal, 0,
- kExprSetLocal, 10, // Set local
+ kExprLocalGet, 0,
+ kExprLocalSet, 1, // Set local
+ kExprLocalGet, 0,
+ kExprLocalSet, 2, // Set local
+ kExprLocalGet, 0,
+ kExprLocalSet, 3, // Set local
+ kExprLocalGet, 0,
+ kExprLocalSet, 4, // Set local
+ kExprLocalGet, 0,
+ kExprLocalSet, 5, // Set local
+ kExprLocalGet, 0,
+ kExprLocalSet, 6, // Set local
+ kExprLocalGet, 0,
+ kExprLocalSet, 7, // Set local
+ kExprLocalGet, 0,
+ kExprLocalSet, 8, // Set local
+ kExprLocalGet, 0,
+ kExprLocalSet, 9, // Set local
+ kExprLocalGet, 0,
+ kExprLocalSet, 10, // Set local
kExprCallFunction, gc_index, // call gc
- kExprGetLocal, 9,
+ kExprLocalGet, 9,
kExprCallFunction, imp_index // call import
])
.exportFunc();
@@ -97,7 +97,7 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
builder.addFunction('main', ref_sig)
.addBody([
kExprCallFunction, gc_index, // call gc
- kExprGetLocal, 0, kExprCallFunction, imp_index // call import
+ kExprLocalGet, 0, kExprCallFunction, imp_index // call import
])
.exportFunc();
@@ -118,7 +118,7 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
makeSig([kWasmI32, kWasmAnyFunc, kWasmI32], [kWasmAnyFunc]);
const sig_index = builder.addType(kSig_a_iai);
builder.addFunction('main', sig_index)
- .addBody([kExprGetLocal, 1])
+ .addBody([kExprLocalGet, 1])
.exportFunc();
const main = builder.instantiate().exports.main;
@@ -140,7 +140,7 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
const sig_index = builder.addType(kSig_a_v);
builder.addFunction('main', sig_index)
.addLocals({anyfunc_count: 1})
- .addBody([kExprGetLocal, 0])
+ .addBody([kExprLocalGet, 0])
.exportFunc();
const main = builder.instantiate().exports.main;
@@ -152,7 +152,7 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
const sig_index = builder.addType(kSig_a_a);
builder.addFunction('main', sig_index)
- .addBody([kExprRefNull, kExprSetLocal, 0, kExprGetLocal, 0])
+ .addBody([kExprRefNull, kExprLocalSet, 0, kExprLocalGet, 0])
.exportFunc();
const main = builder.instantiate().exports.main;
@@ -187,7 +187,7 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
const sig_index = builder.addType(kSig_r_v);
builder.addFunction('main', sig_index)
.addLocals({anyfunc_count: 1})
- .addBody([kExprGetLocal, 0])
+ .addBody([kExprLocalGet, 0])
.exportFunc();
const main = builder.instantiate().exports.main;
@@ -200,7 +200,7 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
const sig_index = builder.addType(kSig_r_v);
builder.addFunction('main', sig_index)
.addLocals({anyfunc_count: 1})
- .addBody([kExprGetLocal, 0, kExprReturn])
+ .addBody([kExprLocalGet, 0, kExprReturn])
.exportFunc();
const main = builder.instantiate().exports.main;
diff --git a/deps/v8/test/mjsunit/wasm/anyref-globals.js b/deps/v8/test/mjsunit/wasm/anyref-globals.js
index 39d3bcb147..d243e37486 100644
--- a/deps/v8/test/mjsunit/wasm/anyref-globals.js
+++ b/deps/v8/test/mjsunit/wasm/anyref-globals.js
@@ -12,10 +12,10 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
const g_nullref = builder.addGlobal(kWasmAnyRef, true).index;
const g_nullfunc = builder.addGlobal(kWasmAnyFunc, true).index;
builder.addFunction("get_anyref_global", kSig_r_v)
- .addBody([kExprGetGlobal, g_nullref])
+ .addBody([kExprGlobalGet, g_nullref])
.exportAs("get_anyref_global");
builder.addFunction("get_anyfunc_global", kSig_a_v)
- .addBody([kExprGetGlobal, g_nullfunc])
+ .addBody([kExprGlobalGet, g_nullfunc])
.exportAs("get_anyfunc_global");
const instance = builder.instantiate();
@@ -32,16 +32,16 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
const g_nullfunc = builder.addGlobal(kWasmAnyFunc, true);
builder.addFunction("get_anyref_global", kSig_r_r)
.addBody([
- kExprGetLocal, 0,
- kExprSetGlobal, g_setref.index,
- kExprGetGlobal, g_nullref.index
+ kExprLocalGet, 0,
+ kExprGlobalSet, g_setref.index,
+ kExprGlobalGet, g_nullref.index
])
.exportAs("get_anyref_global");
builder.addFunction("get_anyfunc_global", kSig_a_a)
.addBody([
- kExprGetLocal, 0,
- kExprSetGlobal, g_setfunc.index,
- kExprGetGlobal, g_nullfunc.index
+ kExprLocalGet, 0,
+ kExprGlobalSet, g_setfunc.index,
+ kExprGlobalGet, g_nullfunc.index
])
.exportAs("get_anyfunc_global");
@@ -59,9 +59,9 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
const g = builder.addGlobal(kWasmAnyRef, true);
builder.addFunction("main", kSig_r_r)
.addBody([
- kExprGetLocal, 0,
- kExprSetGlobal, g.index,
- kExprGetGlobal, g.index
+ kExprLocalGet, 0,
+ kExprGlobalSet, g.index,
+ kExprGlobalGet, g.index
])
.exportAs("main");
@@ -79,9 +79,9 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
const g = builder.addGlobal(kWasmAnyFunc, true);
builder.addFunction("main", kSig_a_a)
.addBody([
- kExprGetLocal, 0,
- kExprSetGlobal, g.index,
- kExprGetGlobal, g.index
+ kExprLocalGet, 0,
+ kExprGlobalSet, g.index,
+ kExprGlobalGet, g.index
])
.exportAs("main");
@@ -100,10 +100,10 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
const g = builder.addGlobal(kWasmAnyRef, true);
builder.addFunction("main", kSig_r_r)
.addBody([
- kExprGetLocal, 0,
- kExprSetGlobal, g.index,
+ kExprLocalGet, 0,
+ kExprGlobalSet, g.index,
kExprCallFunction, gc_index, // call gc
- kExprGetGlobal, g.index
+ kExprGlobalGet, g.index
])
.exportAs("main");
@@ -121,14 +121,14 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
const g = builder.addGlobal(kWasmAnyRef, true);
builder.addFunction("get_global", kSig_r_v)
.addBody([
- kExprGetGlobal, g.index
+ kExprGlobalGet, g.index
])
.exportAs("get_global");
builder.addFunction("set_global", kSig_v_r)
.addBody([
- kExprGetLocal, 0,
- kExprSetGlobal, g.index
+ kExprLocalGet, 0,
+ kExprGlobalSet, g.index
])
.exportAs("set_global");
@@ -150,7 +150,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
let builder = new WasmModuleBuilder();
const g = builder.addImportedGlobal('m', 'val', kWasmAnyRef);
builder.addFunction('main', kSig_r_v)
- .addBody([kExprGetGlobal, g])
+ .addBody([kExprGlobalGet, g])
.exportAs('main');
const instance = builder.instantiate({ m: { val: obj } });
@@ -178,7 +178,7 @@ function dummy_func() {
let builder = new WasmModuleBuilder();
const g = builder.addImportedGlobal('m', 'val', kWasmAnyFunc);
builder.addFunction('main', kSig_a_v)
- .addBody([kExprGetGlobal, g])
+ .addBody([kExprGlobalGet, g])
.exportAs('main');
const module = builder.toModule();
@@ -285,14 +285,14 @@ function dummy_func() {
builder.addFunction("main",
makeSig([kWasmAnyRef, kWasmAnyFunc, kWasmAnyRef, kWasmAnyFunc], []))
.addBody([
- kExprGetLocal, 0,
- kExprSetGlobal, g1.index,
- kExprGetLocal, 1,
- kExprSetGlobal, g2.index,
- kExprGetLocal, 2,
- kExprSetGlobal, g3.index,
- kExprGetLocal, 3,
- kExprSetGlobal, g4.index
+ kExprLocalGet, 0,
+ kExprGlobalSet, g1.index,
+ kExprLocalGet, 1,
+ kExprGlobalSet, g2.index,
+ kExprLocalGet, 2,
+ kExprGlobalSet, g3.index,
+ kExprLocalGet, 3,
+ kExprGlobalSet, g4.index
])
.exportAs("main");
@@ -314,7 +314,7 @@ function dummy_func() {
let builder = new WasmModuleBuilder();
const g = builder.addImportedGlobal('m', 'val', kWasmAnyRef, true);
builder.addFunction('main', kSig_r_v)
- .addBody([kExprGetGlobal, g])
+ .addBody([kExprGlobalGet, g])
.exportAs('main');
const global = new WebAssembly.Global({ value: 'anyref', mutable: 'true' }, obj);
@@ -335,7 +335,7 @@ function dummy_func() {
let builder = new WasmModuleBuilder();
const g = builder.addImportedGlobal('m', 'val', kWasmAnyFunc, true);
builder.addFunction('main', kSig_a_v)
- .addBody([kExprGetGlobal, g])
+ .addBody([kExprGlobalGet, g])
.exportAs('main');
const global = new WebAssembly.Global({ value: 'anyfunc', mutable: 'true' }, obj);
@@ -358,19 +358,19 @@ function dummy_func() {
builder1.addFunction("set_globals", kSig_v_rr)
.addBody([
- kExprGetLocal, 0,
- kExprSetGlobal, g2.index,
- kExprGetLocal, 1,
- kExprSetGlobal, g3.index,
+ kExprLocalGet, 0,
+ kExprGlobalSet, g2.index,
+ kExprLocalGet, 1,
+ kExprGlobalSet, g3.index,
])
.exportAs("set_globals");
builder1.addFunction('get_global2', kSig_r_v)
- .addBody([kExprGetGlobal, g2.index])
+ .addBody([kExprGlobalGet, g2.index])
.exportAs('get_global2');
builder1.addFunction('get_global3', kSig_r_v)
- .addBody([kExprGetGlobal, g3.index])
+ .addBody([kExprGlobalGet, g3.index])
.exportAs('get_global3');
const instance1 = builder1.instantiate();
@@ -392,19 +392,19 @@ function dummy_func() {
builder2.addFunction("set_globals", kSig_v_rr)
.addBody([
- kExprGetLocal, 0,
- kExprSetGlobal, i2,
- kExprGetLocal, 1,
- kExprSetGlobal, i3,
+ kExprLocalGet, 0,
+ kExprGlobalSet, i2,
+ kExprLocalGet, 1,
+ kExprGlobalSet, i3,
])
.exportAs("set_globals");
builder2.addFunction('get_global2', kSig_r_v)
- .addBody([kExprGetGlobal, i2])
+ .addBody([kExprGlobalGet, i2])
.exportAs('get_global2');
builder2.addFunction('get_global3', kSig_r_v)
- .addBody([kExprGetGlobal, i3])
+ .addBody([kExprGlobalGet, i3])
.exportAs('get_global3');
const instance2 = builder2.instantiate(instance1);
@@ -454,19 +454,19 @@ function dummy_func() {
builder1.addFunction("set_globals", kSig_v_aa)
.addBody([
- kExprGetLocal, 0,
- kExprSetGlobal, g2.index,
- kExprGetLocal, 1,
- kExprSetGlobal, g3.index,
+ kExprLocalGet, 0,
+ kExprGlobalSet, g2.index,
+ kExprLocalGet, 1,
+ kExprGlobalSet, g3.index,
])
.exportAs("set_globals");
builder1.addFunction('get_global2', kSig_a_v)
- .addBody([kExprGetGlobal, g2.index])
+ .addBody([kExprGlobalGet, g2.index])
.exportAs('get_global2');
builder1.addFunction('get_global3', kSig_a_v)
- .addBody([kExprGetGlobal, g3.index])
+ .addBody([kExprGlobalGet, g3.index])
.exportAs('get_global3');
const instance1 = builder1.instantiate();
@@ -489,19 +489,19 @@ function dummy_func() {
builder2.addFunction("set_globals", kSig_v_aa)
.addBody([
- kExprGetLocal, 0,
- kExprSetGlobal, i2,
- kExprGetLocal, 1,
- kExprSetGlobal, i3,
+ kExprLocalGet, 0,
+ kExprGlobalSet, i2,
+ kExprLocalGet, 1,
+ kExprGlobalSet, i3,
])
.exportAs("set_globals");
builder2.addFunction('get_global2', kSig_a_v)
- .addBody([kExprGetGlobal, i2])
+ .addBody([kExprGlobalGet, i2])
.exportAs('get_global2');
builder2.addFunction('get_global3', kSig_a_v)
- .addBody([kExprGetGlobal, i3])
+ .addBody([kExprGlobalGet, i3])
.exportAs('get_global3');
const instance2 = builder2.instantiate(instance1);
@@ -563,10 +563,10 @@ function dummy_func() {
const g_ref = builder.addGlobal(kWasmAnyRef, true);
const g_func = builder.addGlobal(kWasmAnyFunc, true);
const f_ref = builder.addFunction('get_anyref_global', kSig_r_v)
- .addBody([kExprGetGlobal, g_ref.index])
+ .addBody([kExprGlobalGet, g_ref.index])
.exportAs('get_anyref_global');
const f_func = builder.addFunction('get_anyfunc_global', kSig_a_v)
- .addBody([kExprGetGlobal, g_func.index])
+ .addBody([kExprGlobalGet, g_func.index])
.exportAs('get_anyfunc_global');
g_ref.function_index = f_ref.index;
@@ -591,10 +591,10 @@ function dummy_func() {
g_wasm.function_index = import_wasm;
g_js.function_index = import_js;
builder.addFunction('get_global_wasm', kSig_a_v)
- .addBody([kExprGetGlobal, g_wasm.index])
+ .addBody([kExprGlobalGet, g_wasm.index])
.exportFunc();
builder.addFunction('get_global_js', kSig_a_v)
- .addBody([kExprGetGlobal, g_js.index])
+ .addBody([kExprGlobalGet, g_js.index])
.exportFunc();
const expected_wasm = dummy_func();
diff --git a/deps/v8/test/mjsunit/wasm/anyref.js b/deps/v8/test/mjsunit/wasm/anyref.js
index 141d25d1e3..cdb4742776 100644
--- a/deps/v8/test/mjsunit/wasm/anyref.js
+++ b/deps/v8/test/mjsunit/wasm/anyref.js
@@ -10,7 +10,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
print(arguments.callee.name);
const builder = new WasmModuleBuilder();
builder.addFunction('main', kSig_r_r)
- .addBody([kExprGetLocal, 0])
+ .addBody([kExprLocalGet, 0])
.exportFunc();
@@ -31,7 +31,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
const sig_index = builder.addType(kSig_v_r);
const imp_index = builder.addImport("q", "func", sig_index);
builder.addFunction('main', sig_index)
- .addBody([kExprGetLocal, 0,
+ .addBody([kExprLocalGet, 0,
kExprCallFunction, imp_index])
.exportFunc();
@@ -55,18 +55,18 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
builder.addFunction('main', ref_sig)
.addLocals({anyref_count: 10})
.addBody([
- kExprGetLocal, 0, kExprSetLocal, 1, // Set local
- kExprGetLocal, 0, kExprSetLocal, 2, // Set local
- kExprGetLocal, 0, kExprSetLocal, 3, // Set local
- kExprGetLocal, 0, kExprSetLocal, 4, // Set local
- kExprGetLocal, 0, kExprSetLocal, 5, // Set local
- kExprGetLocal, 0, kExprSetLocal, 6, // Set local
- kExprGetLocal, 0, kExprSetLocal, 7, // Set local
- kExprGetLocal, 0, kExprSetLocal, 8, // Set local
- kExprGetLocal, 0, kExprSetLocal, 9, // Set local
- kExprGetLocal, 0, kExprSetLocal, 10, // Set local
+ kExprLocalGet, 0, kExprLocalSet, 1, // Set local
+ kExprLocalGet, 0, kExprLocalSet, 2, // Set local
+ kExprLocalGet, 0, kExprLocalSet, 3, // Set local
+ kExprLocalGet, 0, kExprLocalSet, 4, // Set local
+ kExprLocalGet, 0, kExprLocalSet, 5, // Set local
+ kExprLocalGet, 0, kExprLocalSet, 6, // Set local
+ kExprLocalGet, 0, kExprLocalSet, 7, // Set local
+ kExprLocalGet, 0, kExprLocalSet, 8, // Set local
+ kExprLocalGet, 0, kExprLocalSet, 9, // Set local
+ kExprLocalGet, 0, kExprLocalSet, 10, // Set local
kExprCallFunction, gc_index, // call gc
- kExprGetLocal, 9, kExprCallFunction, imp_index // call import
+ kExprLocalGet, 9, kExprCallFunction, imp_index // call import
])
.exportFunc();
@@ -90,7 +90,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
builder.addFunction('main', ref_sig)
.addBody([
kExprCallFunction, gc_index, // call gc
- kExprGetLocal, 0, kExprCallFunction, imp_index // call import
+ kExprLocalGet, 0, kExprCallFunction, imp_index // call import
])
.exportFunc();
@@ -119,7 +119,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
builder.addFunction('main', main_sig)
.addBody([
kExprCallFunction, gc_index, // call gc
- kExprGetLocal, index, kExprCallFunction, imp_index // call import
+ kExprLocalGet, index, kExprCallFunction, imp_index // call import
])
.exportFunc();
@@ -145,7 +145,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
const kSig_r_iri = makeSig([kWasmI32, kWasmAnyRef, kWasmI32], [kWasmAnyRef]);
const sig_index = builder.addType(kSig_r_iri);
builder.addFunction('main', sig_index)
- .addBody([kExprGetLocal, 1])
+ .addBody([kExprLocalGet, 1])
.exportFunc();
const instance = builder.instantiate();
@@ -177,7 +177,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
print(arguments.callee.name);
const builder = new WasmModuleBuilder();
builder.addFunction('main', kSig_i_r)
- .addBody([kExprGetLocal, 0, kExprRefIsNull])
+ .addBody([kExprLocalGet, 0, kExprRefIsNull])
.exportFunc();
const instance = builder.instantiate();
@@ -208,7 +208,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
print(arguments.callee.name);
const builder = new WasmModuleBuilder();
builder.addFunction('main', kSig_r_v)
- .addBody([kExprGetLocal, 0])
+ .addBody([kExprLocalGet, 0])
.addLocals({anyref_count: 1})
.exportFunc();
diff --git a/deps/v8/test/mjsunit/wasm/asm-wasm-i32.js b/deps/v8/test/mjsunit/wasm/asm-wasm-i32.js
index 9d8b14afec..0f93e77f6c 100644
--- a/deps/v8/test/mjsunit/wasm/asm-wasm-i32.js
+++ b/deps/v8/test/mjsunit/wasm/asm-wasm-i32.js
@@ -192,28 +192,15 @@ function i32_invert(a) {
var inputs = [
0, 1, 2, 3, 4,
- 10, 20, 30, 31, 32, 33, 100, 2000,
- 30000, 400000, 5000000,
- 100000000, 2000000000,
2147483646,
- 2147483647,
- 2147483648,
- 2147483649,
- 0x273a798e, 0x187937a3, 0xece3af83, 0x5495a16b, 0x0b668ecc, 0x11223344,
+ 2147483647, // max positive int32
+ 2147483648, // overflow max positive int32
0x0000009e, 0x00000043, 0x0000af73, 0x0000116b, 0x00658ecc, 0x002b3b4c,
- 0x88776655, 0x70000000, 0x07200000, 0x7fffffff, 0x56123761, 0x7fffff00,
- 0x761c4761, 0x80000000, 0x88888888, 0xa0000000, 0xdddddddd, 0xe0000000,
- 0xeeeeeeee, 0xfffffffd, 0xf0000000, 0x007fffff, 0x003fffff, 0x001fffff,
- 0x000fffff, 0x0007ffff, 0x0003ffff, 0x0001ffff, 0x0000ffff, 0x00007fff,
- 0x00003fff, 0x00001fff, 0x00000fff, 0x000007ff, 0x000003ff, 0x000001ff,
+ 0xeeeeeeee, 0xfffffffd, 0xf0000000, 0x007fffff, 0x0003ffff, 0x00001fff,
-1, -2, -3, -4,
- -10, -20, -30, -31, -32, -33, -100, -2000,
- -30000, -400000, -5000000,
- -100000000, -2000000000,
- -2147483646,
-2147483647,
- -2147483648,
- -2147483649,
+ -2147483648, // min negative int32
+ -2147483649, // overflow min negative int32
];
var funcs = [
diff --git a/deps/v8/test/mjsunit/wasm/asm-wasm-u32.js b/deps/v8/test/mjsunit/wasm/asm-wasm-u32.js
index 0809bca6ab..fda6091084 100644
--- a/deps/v8/test/mjsunit/wasm/asm-wasm-u32.js
+++ b/deps/v8/test/mjsunit/wasm/asm-wasm-u32.js
@@ -170,28 +170,15 @@ function u32_invert(a) {
var inputs = [
0, 1, 2, 3, 4,
- 10, 20, 30, 31, 32, 33, 100, 2000,
- 30000, 400000, 5000000,
- 100000000, 2000000000,
2147483646,
- 2147483647,
- 2147483648,
- 2147483649,
- 0x273a798e, 0x187937a3, 0xece3af83, 0x5495a16b, 0x0b668ecc, 0x11223344,
+ 2147483647, // max positive int32
+ 2147483648, // overflow max positive int32
0x0000009e, 0x00000043, 0x0000af73, 0x0000116b, 0x00658ecc, 0x002b3b4c,
- 0x88776655, 0x70000000, 0x07200000, 0x7fffffff, 0x56123761, 0x7fffff00,
- 0x761c4761, 0x80000000, 0x88888888, 0xa0000000, 0xdddddddd, 0xe0000000,
- 0xeeeeeeee, 0xfffffffd, 0xf0000000, 0x007fffff, 0x003fffff, 0x001fffff,
- 0x000fffff, 0x0007ffff, 0x0003ffff, 0x0001ffff, 0x0000ffff, 0x00007fff,
- 0x00003fff, 0x00001fff, 0x00000fff, 0x000007ff, 0x000003ff, 0x000001ff,
+ 0xeeeeeeee, 0xfffffffd, 0xf0000000, 0x007fffff, 0x0003ffff, 0x00001fff,
-1, -2, -3, -4,
- -10, -20, -30, -31, -32, -33, -100, -2000,
- -30000, -400000, -5000000,
- -100000000, -2000000000,
- -2147483646,
-2147483647,
- -2147483648,
- -2147483649,
+ -2147483648, // min negative int32
+ -2147483649, // overflow min negative int32
];
var funcs = [
diff --git a/deps/v8/test/mjsunit/wasm/atomics-stress.js b/deps/v8/test/mjsunit/wasm/atomics-stress.js
index 8622919043..9eb18050cb 100644
--- a/deps/v8/test/mjsunit/wasm/atomics-stress.js
+++ b/deps/v8/test/mjsunit/wasm/atomics-stress.js
@@ -165,15 +165,15 @@ class Operation {
// Load address of low 32 bits.
kExprI32Const, 0,
// Load expected value.
- kExprGetLocal, 0, kExprI32StoreMem, 2, 0,
+ kExprLocalGet, 0, kExprI32StoreMem, 2, 0,
// Load address of high 32 bits.
kExprI32Const, 4,
// Load expected value.
- kExprGetLocal, 1, kExprI32StoreMem, 2, 0,
+ kExprLocalGet, 1, kExprI32StoreMem, 2, 0,
// Load address of where our window starts.
kExprI32Const, 0,
// Load input if there is one.
- ...(this.hasInput ? [kExprGetLocal, 2] : []),
+ ...(this.hasInput ? [kExprLocalGet, 2] : []),
// Perform operation.
kAtomicPrefix, ...this.wasmOpcode,
// Drop output if it had any.
@@ -261,19 +261,19 @@ function generateFunctionBodyForSequence(sequence) {
if (!kDebug) {
body.push(
// Decrement the wait count.
- kExprGetLocal, 2, kExprI32Const, 1, kAtomicPrefix, kExprI32AtomicSub, 2,
+ kExprLocalGet, 2, kExprI32Const, 1, kAtomicPrefix, kExprI32AtomicSub, 2,
0,
// Spin until zero.
- kExprLoop, kWasmStmt, kExprGetLocal, 2, kAtomicPrefix,
+ kExprLoop, kWasmStmt, kExprLocalGet, 2, kAtomicPrefix,
kExprI32AtomicLoad, 2, 0, kExprI32Const, 0, kExprI32GtU, kExprBrIf, 0,
kExprEnd);
}
for (let operation of sequence) {
body.push(
// Pre-load address of results sequence pointer for later.
- kExprGetLocal, 1,
+ kExprLocalGet, 1,
// Load address where atomic pointers are stored.
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
// Load the second argument if it had any.
...(operation.hasInput ?
[kExprI32Const, ...toSLeb128(operation.input)] :
@@ -285,10 +285,10 @@ function generateFunctionBodyForSequence(sequence) {
// Store read intermediate to sequence.
kExprI32StoreMem, 2, 0,
// Increment result sequence pointer.
- kExprGetLocal, 1, kExprI32Const, 4, kExprI32Add, kExprSetLocal, 1);
+ kExprLocalGet, 1, kExprI32Const, 4, kExprI32Add, kExprLocalSet, 1);
}
// Return end of sequence index.
- body.push(kExprGetLocal, 1, kExprReturn);
+ body.push(kExprLocalGet, 1, kExprReturn);
return body;
}
diff --git a/deps/v8/test/mjsunit/wasm/atomics.js b/deps/v8/test/mjsunit/wasm/atomics.js
index 08714bbc01..264662f2ac 100644
--- a/deps/v8/test/mjsunit/wasm/atomics.js
+++ b/deps/v8/test/mjsunit/wasm/atomics.js
@@ -25,8 +25,8 @@ function GetAtomicBinOpFunction(wasmExpression, alignment, offset) {
builder.addImportedMemory("m", "imported_mem", 0, maxSize, "shared");
builder.addFunction("main", kSig_i_ii)
.addBody([
- kExprGetLocal, 0,
- kExprGetLocal, 1,
+ kExprLocalGet, 0,
+ kExprLocalGet, 1,
kAtomicPrefix,
wasmExpression, alignment, offset])
.exportAs("main");
@@ -43,9 +43,9 @@ function GetAtomicCmpExchangeFunction(wasmExpression, alignment, offset) {
builder.addImportedMemory("m", "imported_mem", 0, maxSize, "shared");
builder.addFunction("main", kSig_i_iii)
.addBody([
- kExprGetLocal, 0,
- kExprGetLocal, 1,
- kExprGetLocal, 2,
+ kExprLocalGet, 0,
+ kExprLocalGet, 1,
+ kExprLocalGet, 2,
kAtomicPrefix,
wasmExpression, alignment, offset])
.exportAs("main");
@@ -62,7 +62,7 @@ function GetAtomicLoadFunction(wasmExpression, alignment, offset) {
builder.addImportedMemory("m", "imported_mem", 0, maxSize, "shared");
builder.addFunction("main", kSig_i_i)
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kAtomicPrefix,
wasmExpression, alignment, offset])
.exportAs("main");
@@ -79,8 +79,8 @@ function GetAtomicStoreFunction(wasmExpression, alignment, offset) {
builder.addImportedMemory("m", "imported_mem", 0, maxSize, "shared");
builder.addFunction("main", kSig_v_ii)
.addBody([
- kExprGetLocal, 0,
- kExprGetLocal, 1,
+ kExprLocalGet, 0,
+ kExprLocalGet, 1,
kAtomicPrefix,
wasmExpression, alignment, offset])
.exportAs("main");
@@ -440,11 +440,11 @@ function CmpExchgLoop(opcode, alignment) {
.addLocals({i64_count: 2})
.addBody([
kExprLoop, kWasmStmt,
- kExprGetLocal, 0,
- kExprGetLocal, 1,
- kExprGetLocal, 2,
+ kExprLocalGet, 0,
+ kExprLocalGet, 1,
+ kExprLocalGet, 2,
kAtomicPrefix, opcode, alignment, 0,
- kExprGetLocal, 1,
+ kExprLocalGet, 1,
kExprI64Ne,
kExprBrIf, 0,
kExprEnd
diff --git a/deps/v8/test/mjsunit/wasm/atomics64-stress.js b/deps/v8/test/mjsunit/wasm/atomics64-stress.js
index 386a3b5549..99e9016f1a 100644
--- a/deps/v8/test/mjsunit/wasm/atomics64-stress.js
+++ b/deps/v8/test/mjsunit/wasm/atomics64-stress.js
@@ -191,18 +191,18 @@ class Operation {
// Load address of low 32 bits.
kExprI32Const, 0,
// Load expected value.
- kExprGetLocal, 0, kExprI32StoreMem, 2, 0,
+ kExprLocalGet, 0, kExprI32StoreMem, 2, 0,
// Load address of high 32 bits.
kExprI32Const, 4,
// Load expected value.
- kExprGetLocal, 1, kExprI32StoreMem, 2, 0,
+ kExprLocalGet, 1, kExprI32StoreMem, 2, 0,
// Load address of where our window starts.
kExprI32Const, 0,
// Load input if there is one.
...(this.hasInput ?
[
- kExprGetLocal, 3, kExprI64UConvertI32, kExprI64Const, 32,
- kExprI64Shl, kExprGetLocal, 2, kExprI64UConvertI32,
+ kExprLocalGet, 3, kExprI64UConvertI32, kExprI64Const, 32,
+ kExprI64Shl, kExprLocalGet, 2, kExprI64UConvertI32,
kExprI64Ior
] :
[]),
@@ -299,19 +299,19 @@ function generateFunctionBodyForSequence(sequence) {
if (!kDebug) {
body.push(
// Decrement the wait count.
- kExprGetLocal, 2, kExprI32Const, 1, kAtomicPrefix, kExprI32AtomicSub, 2,
+ kExprLocalGet, 2, kExprI32Const, 1, kAtomicPrefix, kExprI32AtomicSub, 2,
0,
// Spin until zero.
- kExprLoop, kWasmStmt, kExprGetLocal, 2, kAtomicPrefix,
+ kExprLoop, kWasmStmt, kExprLocalGet, 2, kAtomicPrefix,
kExprI32AtomicLoad, 2, 0, kExprI32Const, 0, kExprI32GtU, kExprBrIf, 0,
kExprEnd);
}
for (let operation of sequence) {
body.push(
// Pre-load address of results sequence pointer for later.
- kExprGetLocal, 1,
+ kExprLocalGet, 1,
// Load address where atomic pointers are stored.
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
// Load the second argument if it had any.
...(operation.hasInput ?
[
@@ -326,10 +326,10 @@ function generateFunctionBodyForSequence(sequence) {
// Store read intermediate to sequence.
kExprI64StoreMem, 3, 0,
// Increment result sequence pointer.
- kExprGetLocal, 1, kExprI32Const, 8, kExprI32Add, kExprSetLocal, 1);
+ kExprLocalGet, 1, kExprI32Const, 8, kExprI32Add, kExprLocalSet, 1);
}
// Return end of sequence index.
- body.push(kExprGetLocal, 1, kExprReturn);
+ body.push(kExprLocalGet, 1, kExprReturn);
return body;
}
diff --git a/deps/v8/test/mjsunit/wasm/bigint.js b/deps/v8/test/mjsunit/wasm/bigint.js
index ff9046e9dc..0c9ebb6559 100644
--- a/deps/v8/test/mjsunit/wasm/bigint.js
+++ b/deps/v8/test/mjsunit/wasm/bigint.js
@@ -26,30 +26,30 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
let builder = new WasmModuleBuilder();
let a_global_index = builder
- .addImportedGlobal("mod", "a", kWasmI64)
+ .addImportedGlobal("mod", "a", kWasmI64);
let b_global_index = builder
.addImportedGlobal("mod", "b", kWasmI64);
- let c_global_index = builder
- .addImportedGlobal("mod", "c", kWasmI64);
-
builder
.addExportOfKind('a', kExternalGlobal, a_global_index)
.addExportOfKind('b', kExternalGlobal, b_global_index)
- .addExportOfKind('c', kExternalGlobal, c_global_index);
let module = builder.instantiate({
mod: {
a: 1n,
b: 2n ** 63n,
- c: "123",
}
});
assertEquals(module.exports.a.value, 1n);
assertEquals(module.exports.b.value, - (2n ** 63n));
- assertEquals(module.exports.c.value, 123n);
+})();
+
+(function TestJSBigIntGlobalImportInvalidType() {
+ let builder = new WasmModuleBuilder();
+ builder.addImportedGlobal("mod", "a", kWasmI64);
+ assertThrows(() => builder.instantiate({mod: { a: {} } }), WebAssembly.LinkError);
})();
(function TestJSBigIntToWasmI64MutableGlobal() {
@@ -86,7 +86,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
builder
.addFunction("f", kSig_l_l) // i64 -> i64
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
])
.exportFunc();
@@ -108,7 +108,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
builder
.addFunction("f", kSig_l_ll) // i64 -> i64
.addBody([
- kExprGetLocal, 1,
+ kExprLocalGet, 1,
])
.exportFunc();
diff --git a/deps/v8/test/mjsunit/wasm/bounds-check-64bit.js b/deps/v8/test/mjsunit/wasm/bounds-check-64bit.js
index 43ff8570c6..90ede1ac92 100644
--- a/deps/v8/test/mjsunit/wasm/bounds-check-64bit.js
+++ b/deps/v8/test/mjsunit/wasm/bounds-check-64bit.js
@@ -8,9 +8,9 @@ const builder = new WasmModuleBuilder();
builder.addMemory(1, undefined, false);
builder.addFunction('load', kSig_i_ii)
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprI64SConvertI32,
- kExprGetLocal, 1,
+ kExprLocalGet, 1,
kExprI64SConvertI32,
kExprI64Shl,
kExprI32ConvertI64,
diff --git a/deps/v8/test/mjsunit/wasm/bounds-check-turbofan.js b/deps/v8/test/mjsunit/wasm/bounds-check-turbofan.js
index d972e7830d..b740a20f1a 100644
--- a/deps/v8/test/mjsunit/wasm/bounds-check-turbofan.js
+++ b/deps/v8/test/mjsunit/wasm/bounds-check-turbofan.js
@@ -10,7 +10,7 @@ const builder = new WasmModuleBuilder();
builder.addMemory(1, undefined, false);
builder.addFunction('load', kSig_i_i)
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprI32LoadMem, 0, 100])
.exportFunc();
diff --git a/deps/v8/test/mjsunit/wasm/bulk-memory.js b/deps/v8/test/mjsunit/wasm/bulk-memory.js
index d783c6bf59..53ca1454b6 100644
--- a/deps/v8/test/mjsunit/wasm/bulk-memory.js
+++ b/deps/v8/test/mjsunit/wasm/bulk-memory.js
@@ -33,9 +33,9 @@ function getMemoryInit(mem, segment_data) {
builder.addPassiveDataSegment(segment_data);
builder.addFunction('init', kSig_v_iii)
.addBody([
- kExprGetLocal, 0, // Dest.
- kExprGetLocal, 1, // Source.
- kExprGetLocal, 2, // Size in bytes.
+ kExprLocalGet, 0, // Dest.
+ kExprLocalGet, 1, // Source.
+ kExprLocalGet, 2, // Size in bytes.
kNumericPrefix, kExprMemoryInit,
0, // Data segment index.
0, // Memory index.
@@ -102,9 +102,9 @@ function getMemoryCopy(mem) {
const builder = new WasmModuleBuilder();
builder.addImportedMemory("", "mem", 0);
builder.addFunction("copy", kSig_v_iii).addBody([
- kExprGetLocal, 0, // Dest.
- kExprGetLocal, 1, // Source.
- kExprGetLocal, 2, // Size in bytes.
+ kExprLocalGet, 0, // Dest.
+ kExprLocalGet, 1, // Source.
+ kExprLocalGet, 2, // Size in bytes.
kNumericPrefix, kExprMemoryCopy, 0, 0,
]).exportAs("copy");
return builder.instantiate({'': {mem}}).exports.copy;
@@ -128,9 +128,9 @@ function getMemoryFill(mem) {
const builder = new WasmModuleBuilder();
builder.addImportedMemory("", "mem", 0);
builder.addFunction("fill", kSig_v_iii).addBody([
- kExprGetLocal, 0, // Dest.
- kExprGetLocal, 1, // Byte value.
- kExprGetLocal, 2, // Size.
+ kExprLocalGet, 0, // Dest.
+ kExprLocalGet, 1, // Byte value.
+ kExprLocalGet, 2, // Size.
kNumericPrefix, kExprMemoryFill, 0,
]).exportAs("fill");
return builder.instantiate({'': {mem}}).exports.fill;
diff --git a/deps/v8/test/mjsunit/wasm/calls.js b/deps/v8/test/mjsunit/wasm/calls.js
index 97188964d1..f8a4616def 100644
--- a/deps/v8/test/mjsunit/wasm/calls.js
+++ b/deps/v8/test/mjsunit/wasm/calls.js
@@ -49,8 +49,8 @@ function assertFunction(module, func) {
builder.addMemory(1, 1, true);
builder.addFunction("sub", kSig_i_ii)
.addBody([
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
kExprI32Sub, // --
])
.exportFunc()
@@ -91,8 +91,8 @@ function assertFunction(module, func) {
builder.addMemory(kPages, kPages, true);
builder.addFunction("flt", kSig_i_dd)
.addBody([
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
kExprF64Lt // --
]) // --
.exportFunc();
diff --git a/deps/v8/test/mjsunit/wasm/code-space-exhaustion.js b/deps/v8/test/mjsunit/wasm/code-space-exhaustion.js
index 6f4698c0d0..45eaef92e8 100644
--- a/deps/v8/test/mjsunit/wasm/code-space-exhaustion.js
+++ b/deps/v8/test/mjsunit/wasm/code-space-exhaustion.js
@@ -9,7 +9,7 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
// We only have 1 MB code space. This is enough for the code below, but for all
// 1000 modules, it requires several GCs to get rid of the old code.
const builder = new WasmModuleBuilder();
-builder.addFunction('main', kSig_i_i).addBody([kExprGetLocal, 0]);
+builder.addFunction('main', kSig_i_i).addBody([kExprLocalGet, 0]);
const buffer = builder.toBuffer();
for (let i = 0; i < 1000; ++i) {
diff --git a/deps/v8/test/mjsunit/wasm/compare-exchange-stress.js b/deps/v8/test/mjsunit/wasm/compare-exchange-stress.js
index 5102216933..050a15e380 100644
--- a/deps/v8/test/mjsunit/wasm/compare-exchange-stress.js
+++ b/deps/v8/test/mjsunit/wasm/compare-exchange-stress.js
@@ -37,42 +37,42 @@ function makeWorkerCodeForOpcode(compareExchangeOpcode, size, functionName,
const kLocalNextValue = 7; // the value to write in the update
let body = [
// Turn sequence length to equivalent in bytes.
- kExprGetLocal, kArgSeqenceLength,
+ kExprLocalGet, kArgSeqenceLength,
kExprI32Const, size / 8,
kExprI32Mul,
- kExprSetLocal, kArgSeqenceLength,
+ kExprLocalSet, kArgSeqenceLength,
// Outer block so we have something to jump for return.
...[kExprBlock, kWasmStmt,
// Set counter to 0.
kExprI32Const, 0,
- kExprSetLocal, kLocalCurrentOffset,
+ kExprLocalSet, kLocalCurrentOffset,
// Outer loop until maxcount.
...[kExprLoop, kWasmStmt,
// Find the next value to wait for.
...[kExprLoop, kWasmStmt,
// Check end of sequence.
- kExprGetLocal, kLocalCurrentOffset,
- kExprGetLocal, kArgSeqenceLength,
+ kExprLocalGet, kLocalCurrentOffset,
+ kExprLocalGet, kArgSeqenceLength,
kExprI32Eq,
kExprBrIf, 2, // return
...[kExprBlock, kWasmStmt,
// Load next value.
- kExprGetLocal, kArgSequencePtr,
- kExprGetLocal, kLocalCurrentOffset,
+ kExprLocalGet, kArgSequencePtr,
+ kExprLocalGet, kLocalCurrentOffset,
kExprI32Add,
loadMemOpcode, 0, 0,
// Mask off bits.
- kExprGetLocal, kArgBitMask,
+ kExprLocalGet, kArgBitMask,
kExprI32And,
// Compare with worker id.
- kExprGetLocal, kArgWorkerId,
+ kExprLocalGet, kArgWorkerId,
kExprI32Eq,
kExprBrIf, 0,
// Not found, increment position.
- kExprGetLocal, kLocalCurrentOffset,
+ kExprLocalGet, kLocalCurrentOffset,
kExprI32Const, size / 8,
kExprI32Add,
- kExprSetLocal, kLocalCurrentOffset,
+ kExprLocalSet, kLocalCurrentOffset,
kExprBr, 1,
kExprEnd
],
@@ -80,41 +80,41 @@ function makeWorkerCodeForOpcode(compareExchangeOpcode, size, functionName,
kExprEnd
],
// Load expected value to local.
- kExprGetLocal, kArgSequencePtr,
- kExprGetLocal, kLocalCurrentOffset,
+ kExprLocalGet, kArgSequencePtr,
+ kExprLocalGet, kLocalCurrentOffset,
kExprI32Add,
loadMemOpcode, 0, 0,
- kExprSetLocal, kLocalExpectedValue,
+ kExprLocalSet, kLocalExpectedValue,
// Load value after expected one.
- kExprGetLocal, kArgSequencePtr,
- kExprGetLocal, kLocalCurrentOffset,
+ kExprLocalGet, kArgSequencePtr,
+ kExprLocalGet, kLocalCurrentOffset,
kExprI32Add,
kExprI32Const, size / 8,
kExprI32Add,
loadMemOpcode, 0, 0,
- kExprSetLocal, kLocalNextValue,
+ kExprLocalSet, kLocalNextValue,
// Hammer on memory until value found.
...[kExprLoop, kWasmStmt,
// Load address.
- kExprGetLocal, kArgMemoryCell,
+ kExprLocalGet, kArgMemoryCell,
// Load expected value.
- kExprGetLocal, kLocalExpectedValue,
+ kExprLocalGet, kLocalExpectedValue,
// Load updated value.
- kExprGetLocal, kLocalNextValue,
+ kExprLocalGet, kLocalNextValue,
// Try update.
kAtomicPrefix, compareExchangeOpcode, 0, 0,
// Load expected value.
- kExprGetLocal, kLocalExpectedValue,
+ kExprLocalGet, kLocalExpectedValue,
// Spin if not what expected.
kExprI32Ne,
kExprBrIf, 0,
kExprEnd
],
// Next iteration of loop.
- kExprGetLocal, kLocalCurrentOffset,
+ kExprLocalGet, kLocalCurrentOffset,
kExprI32Const, size / 8,
kExprI32Add,
- kExprSetLocal, kLocalCurrentOffset,
+ kExprLocalSet, kLocalCurrentOffset,
kExprBr, 0,
kExprEnd
], // outer loop
diff --git a/deps/v8/test/mjsunit/wasm/compare-exchange64-stress.js b/deps/v8/test/mjsunit/wasm/compare-exchange64-stress.js
index bd1c5c95a5..b2ffcf1475 100644
--- a/deps/v8/test/mjsunit/wasm/compare-exchange64-stress.js
+++ b/deps/v8/test/mjsunit/wasm/compare-exchange64-stress.js
@@ -40,44 +40,44 @@ function makeWorkerCodeForOpcode(compareExchangeOpcode, size, functionName,
const kLocalNextValue = 7; // the value to write in the update
let body = [
// Turn sequence length to equivalent in bytes.
- kExprGetLocal, kArgSeqenceLength,
+ kExprLocalGet, kArgSeqenceLength,
kExprI32Const, size / 8,
kExprI32Mul,
- kExprSetLocal, kArgSeqenceLength,
+ kExprLocalSet, kArgSeqenceLength,
// Outer block so we have something to jump for return.
...[kExprBlock, kWasmStmt,
// Set counter to 0.
kExprI32Const, 0,
- kExprSetLocal, kLocalCurrentOffset,
+ kExprLocalSet, kLocalCurrentOffset,
// Outer loop until maxcount.
...[kExprLoop, kWasmStmt,
// Find the next value to wait for.
...[kExprLoop, kWasmStmt,
// Check end of sequence.
- kExprGetLocal, kLocalCurrentOffset,
- kExprGetLocal, kArgSeqenceLength,
+ kExprLocalGet, kLocalCurrentOffset,
+ kExprLocalGet, kArgSeqenceLength,
kExprI32Eq,
kExprBrIf, 2, // return
...[kExprBlock, kWasmStmt,
// Load next value.
- kExprGetLocal, kArgSequencePtr,
- kExprGetLocal, kLocalCurrentOffset,
+ kExprLocalGet, kArgSequencePtr,
+ kExprLocalGet, kLocalCurrentOffset,
kExprI32Add,
loadMemOpcode, 0, 0,
// Mask off bits.
- kExprGetLocal, kArgBitMask,
+ kExprLocalGet, kArgBitMask,
kExprI64UConvertI32,
kExprI64And,
// Compare with worker id.
- kExprGetLocal, kArgWorkerId,
+ kExprLocalGet, kArgWorkerId,
kExprI64UConvertI32,
kExprI64Eq,
kExprBrIf, 0,
// Not found, increment position.
- kExprGetLocal, kLocalCurrentOffset,
+ kExprLocalGet, kLocalCurrentOffset,
kExprI32Const, size / 8,
kExprI32Add,
- kExprSetLocal, kLocalCurrentOffset,
+ kExprLocalSet, kLocalCurrentOffset,
kExprBr, 1,
kExprEnd
],
@@ -85,41 +85,41 @@ function makeWorkerCodeForOpcode(compareExchangeOpcode, size, functionName,
kExprEnd
],
// Load expected value to local.
- kExprGetLocal, kArgSequencePtr,
- kExprGetLocal, kLocalCurrentOffset,
+ kExprLocalGet, kArgSequencePtr,
+ kExprLocalGet, kLocalCurrentOffset,
kExprI32Add,
loadMemOpcode, 0, 0,
- kExprSetLocal, kLocalExpectedValue,
+ kExprLocalSet, kLocalExpectedValue,
// Load value after expected one.
- kExprGetLocal, kArgSequencePtr,
- kExprGetLocal, kLocalCurrentOffset,
+ kExprLocalGet, kArgSequencePtr,
+ kExprLocalGet, kLocalCurrentOffset,
kExprI32Add,
kExprI32Const, size / 8,
kExprI32Add,
loadMemOpcode, 0, 0,
- kExprSetLocal, kLocalNextValue,
+ kExprLocalSet, kLocalNextValue,
// Hammer on memory until value found.
...[kExprLoop, kWasmStmt,
// Load address.
- kExprGetLocal, kArgMemoryCell,
+ kExprLocalGet, kArgMemoryCell,
// Load expected value.
- kExprGetLocal, kLocalExpectedValue,
+ kExprLocalGet, kLocalExpectedValue,
// Load updated value.
- kExprGetLocal, kLocalNextValue,
+ kExprLocalGet, kLocalNextValue,
// Try update.
kAtomicPrefix, compareExchangeOpcode, 0, 0,
// Load expected value.
- kExprGetLocal, kLocalExpectedValue,
+ kExprLocalGet, kLocalExpectedValue,
// Spin if not what expected.
kExprI64Ne,
kExprBrIf, 0,
kExprEnd
],
// Next iteration of loop.
- kExprGetLocal, kLocalCurrentOffset,
+ kExprLocalGet, kLocalCurrentOffset,
kExprI32Const, size / 8,
kExprI32Add,
- kExprSetLocal, kLocalCurrentOffset,
+ kExprLocalSet, kLocalCurrentOffset,
kExprBr, 0,
kExprEnd
], // outer loop
diff --git a/deps/v8/test/mjsunit/wasm/compilation-hints-async-compilation.js b/deps/v8/test/mjsunit/wasm/compilation-hints-async-compilation.js
index 5ca20cbb95..4723b92acf 100644
--- a/deps/v8/test/mjsunit/wasm/compilation-hints-async-compilation.js
+++ b/deps/v8/test/mjsunit/wasm/compilation-hints-async-compilation.js
@@ -10,7 +10,7 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
builder.addFunction('id', kSig_i_i)
- .addBody([kExprGetLocal, 0])
+ .addBody([kExprLocalGet, 0])
.setCompilationHint(kCompilationHintStrategyLazy,
kCompilationHintTierOptimized,
kCompilationHintTierBaseline)
@@ -26,7 +26,7 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
builder.addFunction('id', kSig_i_l)
- .addBody([kExprGetLocal, 0])
+ .addBody([kExprLocalGet, 0])
.setCompilationHint(kCompilationHintStrategyLazy,
kCompilationHintTierDefault,
kCompilationHintTierDefault)
@@ -49,7 +49,7 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
builder.addFunction('id', kSig_i_i)
- .addBody([kExprGetLocal, 0])
+ .addBody([kExprLocalGet, 0])
.setCompilationHint(kCompilationHintStrategyLazy,
kCompilationHintTierDefault,
kCompilationHintTierDefault)
@@ -63,7 +63,7 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
builder.addFunction('id', kSig_i_i)
- .addBody([kExprGetLocal, 0])
+ .addBody([kExprLocalGet, 0])
.setCompilationHint(kCompilationHintStrategyLazyBaselineEagerTopTier,
kCompilationHintTierDefault,
kCompilationHintTierDefault)
diff --git a/deps/v8/test/mjsunit/wasm/compilation-hints-decoder.js b/deps/v8/test/mjsunit/wasm/compilation-hints-decoder.js
index e39e15feeb..5bcac2af9e 100644
--- a/deps/v8/test/mjsunit/wasm/compilation-hints-decoder.js
+++ b/deps/v8/test/mjsunit/wasm/compilation-hints-decoder.js
@@ -11,8 +11,8 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
let builder = new WasmModuleBuilder();
builder.addImport('mod', 'pow', kSig_i_ii);
builder.addFunction('upow', kSig_i_i)
- .addBody([kExprGetLocal, 0,
- kExprGetLocal, 0,
+ .addBody([kExprLocalGet, 0,
+ kExprLocalGet, 0,
kExprCallFunction, 0])
.setCompilationHint(kCompilationHintStrategyLazy,
kCompilationHintTierOptimized,
@@ -29,20 +29,20 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
let builder = new WasmModuleBuilder();
builder.addImport('mod', 'pow', kSig_i_ii);
builder.addFunction('upow', kSig_i_i)
- .addBody([kExprGetLocal, 0,
- kExprGetLocal, 0,
+ .addBody([kExprLocalGet, 0,
+ kExprLocalGet, 0,
kExprCallFunction, 0])
.setCompilationHint(kCompilationHintStrategyDefault,
kCompilationHintTierInterpreter,
kCompilationHintTierInterpreter)
.exportFunc();
builder.addFunction('upow2', kSig_i_i)
- .addBody([kExprGetLocal, 0,
- kExprGetLocal, 0,
+ .addBody([kExprLocalGet, 0,
+ kExprLocalGet, 0,
kExprCallFunction, 0])
builder.addFunction('upow3', kSig_i_i)
- .addBody([kExprGetLocal, 0,
- kExprGetLocal, 0,
+ .addBody([kExprLocalGet, 0,
+ kExprLocalGet, 0,
kExprCallFunction, 0])
let instance = builder.instantiate({mod: {pow: Math.pow}});
assertEquals(27, instance.exports.upow(3))
@@ -53,16 +53,16 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
let builder = new WasmModuleBuilder();
builder.addImport('mod', 'pow', kSig_i_ii);
builder.addFunction('upow2', kSig_i_i)
- .addBody([kExprGetLocal, 0,
- kExprGetLocal, 0,
+ .addBody([kExprLocalGet, 0,
+ kExprLocalGet, 0,
kExprCallFunction, 0])
builder.addFunction('upow3', kSig_i_i)
- .addBody([kExprGetLocal, 0,
- kExprGetLocal, 0,
+ .addBody([kExprLocalGet, 0,
+ kExprLocalGet, 0,
kExprCallFunction, 0])
builder.addFunction('upow', kSig_i_i)
- .addBody([kExprGetLocal, 0,
- kExprGetLocal, 0,
+ .addBody([kExprLocalGet, 0,
+ kExprLocalGet, 0,
kExprCallFunction, 0])
.setCompilationHint(kCompilationHintStrategyEager,
kCompilationHintTierBaseline,
@@ -76,8 +76,8 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
builder.addFunction('sq', kSig_i_i)
- .addBody([kExprGetLocal, 0,
- kExprGetLocal, 0,
+ .addBody([kExprLocalGet, 0,
+ kExprLocalGet, 0,
kExprI32Mul])
.setCompilationHint(kCompilationHintStrategyEager,
kCompilationHintTierDefault,
@@ -91,8 +91,8 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
builder.addFunction('sq', kSig_i_i)
- .addBody([kExprGetLocal, 0,
- kExprGetLocal, 0,
+ .addBody([kExprLocalGet, 0,
+ kExprLocalGet, 0,
kExprI32Mul])
.setCompilationHint(kCompilationHintStrategyEager,
kCompilationHintTierDefault,
@@ -104,8 +104,8 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
builder.addFunction('sq', kSig_i_i)
- .addBody([kExprGetLocal, 0,
- kExprGetLocal, 0,
+ .addBody([kExprLocalGet, 0,
+ kExprLocalGet, 0,
kExprI32Mul])
.setCompilationHint(kCompilationHintStrategyEager,
kCompilationHintTierOptimized,
@@ -119,8 +119,8 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
builder.addFunction('sq', kSig_i_i)
- .addBody([kExprGetLocal, 0,
- kExprGetLocal, 0,
+ .addBody([kExprLocalGet, 0,
+ kExprLocalGet, 0,
kExprI32Mul])
.setCompilationHint(kCompilationHintStrategyLazyBaselineEagerTopTier,
kCompilationHintTierOptimized,
diff --git a/deps/v8/test/mjsunit/wasm/compilation-hints-ignored.js b/deps/v8/test/mjsunit/wasm/compilation-hints-ignored.js
index 553426db08..4bfc22fb89 100644
--- a/deps/v8/test/mjsunit/wasm/compilation-hints-ignored.js
+++ b/deps/v8/test/mjsunit/wasm/compilation-hints-ignored.js
@@ -9,8 +9,8 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
let builder = new WasmModuleBuilder();
builder.addImport('mod', 'pow', kSig_i_ii);
builder.addFunction('upow', kSig_i_i)
- .addBody([kExprGetLocal, 0,
- kExprGetLocal, 0,
+ .addBody([kExprLocalGet, 0,
+ kExprLocalGet, 0,
kExprCallFunction, 0])
.setCompilationHint(kCompilationHintStrategyDefault,
kCompilationHintTierInterpreter,
diff --git a/deps/v8/test/mjsunit/wasm/compilation-hints-interpreter.js b/deps/v8/test/mjsunit/wasm/compilation-hints-interpreter.js
index f0a46b9ec7..f9f85a7d91 100644
--- a/deps/v8/test/mjsunit/wasm/compilation-hints-interpreter.js
+++ b/deps/v8/test/mjsunit/wasm/compilation-hints-interpreter.js
@@ -19,7 +19,7 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
builder1.addImport("otherModule", "getX", kSig_i_v);
builder1.addFunction("plusX", kSig_i_i)
.addBody([kExprCallFunction, 0,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprI32Add])
.setCompilationHint(kCompilationHintStrategyLazy,
kCompilationHintTierInterpreter,
@@ -44,7 +44,7 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
builder1.addImport("otherModule", "getX", kSig_i_v);
builder1.addFunction("plusX", kSig_i_i)
.addBody([kExprCallFunction, 0,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprI32Add])
.setCompilationHint(kCompilationHintStrategyLazy,
kCompilationHintTierInterpreter,
@@ -64,8 +64,8 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
let builder = new WasmModuleBuilder();
let sig_i_ii = builder.addType(kSig_i_ii);
let add = builder.addFunction('add', sig_i_ii)
- .addBody([kExprGetLocal, 0,
- kExprGetLocal, 1,
+ .addBody([kExprLocalGet, 0,
+ kExprLocalGet, 1,
kExprI32Add])
.setCompilationHint(kCompilationHintStrategyLazy,
kCompilationHintTierInterpreter,
@@ -73,9 +73,9 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
builder.appendToTable([add.index]);
builder.addFunction('main', kSig_i_iii)
.addBody([// Call indirect #0 with args <#1, #2>.
- kExprGetLocal, 1,
- kExprGetLocal, 2,
- kExprGetLocal, 0,
+ kExprLocalGet, 1,
+ kExprLocalGet, 2,
+ kExprLocalGet, 0,
kExprCallIndirect, sig_i_ii, kTableZero])
.setCompilationHint(kCompilationHintStrategyLazy,
kCompilationHintTierInterpreter,
@@ -89,8 +89,8 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
let builder = new WasmModuleBuilder();
let sig_i_ii = builder.addType(kSig_i_ii);
let add = builder.addFunction('add', sig_i_ii)
- .addBody([kExprGetLocal, 0,
- kExprGetLocal, 1,
+ .addBody([kExprLocalGet, 0,
+ kExprLocalGet, 1,
kExprI64Add])
.setCompilationHint(kCompilationHintStrategyLazy,
kCompilationHintTierInterpreter,
@@ -98,9 +98,9 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
builder.appendToTable([add.index]);
builder.addFunction('main', kSig_i_iii)
.addBody([// Call indirect #0 with args <#1, #2>.
- kExprGetLocal, 1,
- kExprGetLocal, 2,
- kExprGetLocal, 0,
+ kExprLocalGet, 1,
+ kExprLocalGet, 2,
+ kExprLocalGet, 0,
kExprCallIndirect, sig_i_ii, kTableZero])
.setCompilationHint(kCompilationHintStrategyLazy,
kCompilationHintTierInterpreter,
diff --git a/deps/v8/test/mjsunit/wasm/compilation-hints-lazy-validation.js b/deps/v8/test/mjsunit/wasm/compilation-hints-lazy-validation.js
index e6958cb554..de2bbd1c13 100644
--- a/deps/v8/test/mjsunit/wasm/compilation-hints-lazy-validation.js
+++ b/deps/v8/test/mjsunit/wasm/compilation-hints-lazy-validation.js
@@ -10,7 +10,7 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
builder.addFunction('id', kSig_i_i)
- .addBody([kExprGetLocal, 0,
+ .addBody([kExprLocalGet, 0,
kExprI64Const, 1,
kExprI32Mul])
.setCompilationHint(kCompilationHintStrategyLazy,
diff --git a/deps/v8/test/mjsunit/wasm/compilation-hints-streaming-compilation.js b/deps/v8/test/mjsunit/wasm/compilation-hints-streaming-compilation.js
index f48169fa0a..2708da149b 100644
--- a/deps/v8/test/mjsunit/wasm/compilation-hints-streaming-compilation.js
+++ b/deps/v8/test/mjsunit/wasm/compilation-hints-streaming-compilation.js
@@ -11,12 +11,12 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
let builder = new WasmModuleBuilder();
builder.addImport('mod', 'pow', kSig_i_ii);
builder.addFunction('upow', kSig_i_i)
- .addBody([kExprGetLocal, 0,
- kExprGetLocal, 0,
+ .addBody([kExprLocalGet, 0,
+ kExprLocalGet, 0,
kExprCallFunction, 0])
builder.addFunction('upow2', kSig_i_i)
- .addBody([kExprGetLocal, 0,
- kExprGetLocal, 0,
+ .addBody([kExprLocalGet, 0,
+ kExprLocalGet, 0,
kExprCallFunction, 0])
.setCompilationHint(kCompilationHintStrategyLazy,
kCompilationHintTierDefault,
@@ -33,12 +33,12 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
let builder = new WasmModuleBuilder();
builder.addImport('mod', 'pow', kSig_i_ii);
builder.addFunction('upow', kSig_i_i)
- .addBody([kExprGetLocal, 0,
- kExprGetLocal, 0,
+ .addBody([kExprLocalGet, 0,
+ kExprLocalGet, 0,
kExprCallFunction, 0])
builder.addFunction('upow2', kSig_i_i)
- .addBody([kExprGetLocal, 0,
- kExprGetLocal, 0,
+ .addBody([kExprLocalGet, 0,
+ kExprLocalGet, 0,
kExprCallFunction, 0])
.setCompilationHint(kCompilationHintStrategyLazy,
kCompilationHintTierOptimized,
@@ -59,12 +59,12 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
let builder = new WasmModuleBuilder();
builder.addImport('mod', 'pow', kSig_f_ff);
builder.addFunction('upow', kSig_i_i)
- .addBody([kExprGetLocal, 0,
- kExprGetLocal, 0,
+ .addBody([kExprLocalGet, 0,
+ kExprLocalGet, 0,
kExprCallFunction, 0])
builder.addFunction('upow2', kSig_i_i)
- .addBody([kExprGetLocal, 0,
- kExprGetLocal, 0,
+ .addBody([kExprLocalGet, 0,
+ kExprLocalGet, 0,
kExprCallFunction, 0])
.setCompilationHint(kCompilationHintStrategyLazy,
kCompilationHintTierDefault,
@@ -94,8 +94,8 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
let builder = new WasmModuleBuilder();
builder.addImport('mod', 'pow', kSig_i_ii);
builder.addFunction('upow', kSig_i_i)
- .addBody([kExprGetLocal, 0,
- kExprGetLocal, 0,
+ .addBody([kExprLocalGet, 0,
+ kExprLocalGet, 0,
kExprCallFunction, 0])
.setCompilationHint(kCompilationHintStrategyLazy,
kCompilationHintTierDefault,
@@ -112,8 +112,8 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
let builder = new WasmModuleBuilder();
builder.addImport('mod', 'pow', kSig_i_ii);
builder.addFunction('upow', kSig_i_i)
- .addBody([kExprGetLocal, 0,
- kExprGetLocal, 0,
+ .addBody([kExprLocalGet, 0,
+ kExprLocalGet, 0,
kExprCallFunction, 0])
.setCompilationHint(kCompilationHintStrategyLazyBaselineEagerTopTier,
kCompilationHintTierDefault,
diff --git a/deps/v8/test/mjsunit/wasm/compilation-hints-streaming-lazy-validation.js b/deps/v8/test/mjsunit/wasm/compilation-hints-streaming-lazy-validation.js
index 6db4c0e328..f125aeaa7e 100644
--- a/deps/v8/test/mjsunit/wasm/compilation-hints-streaming-lazy-validation.js
+++ b/deps/v8/test/mjsunit/wasm/compilation-hints-streaming-lazy-validation.js
@@ -10,7 +10,7 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
builder.addFunction('id', kSig_i_i)
- .addBody([kExprGetLocal, 0,
+ .addBody([kExprLocalGet, 0,
kExprI64Const, 1,
kExprI32Mul])
.setCompilationHint(kCompilationHintStrategyLazy,
diff --git a/deps/v8/test/mjsunit/wasm/compilation-hints-sync-compilation.js b/deps/v8/test/mjsunit/wasm/compilation-hints-sync-compilation.js
index 6c4364b6d3..35f77de157 100644
--- a/deps/v8/test/mjsunit/wasm/compilation-hints-sync-compilation.js
+++ b/deps/v8/test/mjsunit/wasm/compilation-hints-sync-compilation.js
@@ -10,7 +10,7 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
builder.addFunction('id', kSig_i_i)
- .addBody([kExprGetLocal, 0])
+ .addBody([kExprLocalGet, 0])
.setCompilationHint(kCompilationHintStrategyLazy,
kCompilationHintTierOptimized,
kCompilationHintTierBaseline)
@@ -25,7 +25,7 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
builder.addFunction('id', kSig_i_l)
- .addBody([kExprGetLocal, 0])
+ .addBody([kExprLocalGet, 0])
.setCompilationHint(kCompilationHintStrategyLazy,
kCompilationHintTierDefault,
kCompilationHintTierDefault)
@@ -46,7 +46,7 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
builder.addFunction('id', kSig_i_i)
- .addBody([kExprGetLocal, 0])
+ .addBody([kExprLocalGet, 0])
.setCompilationHint(kCompilationHintStrategyLazy,
kCompilationHintTierDefault,
kCompilationHintTierDefault)
@@ -58,7 +58,7 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
builder.addFunction('id', kSig_i_i)
- .addBody([kExprGetLocal, 0])
+ .addBody([kExprLocalGet, 0])
.setCompilationHint(kCompilationHintStrategyLazyBaselineEagerTopTier,
kCompilationHintTierDefault,
kCompilationHintTierDefault)
diff --git a/deps/v8/test/mjsunit/wasm/compiled-module-serialization.js b/deps/v8/test/mjsunit/wasm/compiled-module-serialization.js
index c95e4d05b7..859a3095ae 100644
--- a/deps/v8/test/mjsunit/wasm/compiled-module-serialization.js
+++ b/deps/v8/test/mjsunit/wasm/compiled-module-serialization.js
@@ -17,11 +17,11 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
builder.addFunction("main", kSig_i_i)
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprI32LoadMem, 0, 0,
kExprI32Const, 1,
kExprCallIndirect, signature, kTableZero,
- kExprGetLocal,0,
+ kExprLocalGet,0,
kExprI32LoadMem,0, 0,
kExprCallFunction, 0,
kExprI32Add
@@ -31,7 +31,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
// return mem[i] + some_value();
builder.addFunction("_wrap_writer", signature)
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprCallFunction, 1]);
builder.appendToTable([2, 3]);
@@ -175,13 +175,13 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
builder.addGlobal(kWasmI32, true);
builder.addFunction("read", kSig_i_v)
.addBody([
- kExprGetGlobal, 0])
+ kExprGlobalGet, 0])
.exportFunc();
builder.addFunction("write", kSig_v_i)
.addBody([
- kExprGetLocal, 0,
- kExprSetGlobal, 0])
+ kExprLocalGet, 0,
+ kExprGlobalSet, 0])
.exportFunc();
var wire_bytes = builder.toBuffer();
@@ -213,7 +213,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
builder.addFunction("main", kSig_i_ii)
.addBody([
- kExprGetLocal, 0, // --
+ kExprLocalGet, 0, // --
kExprCallIndirect, sig_index1, kTableZero]) // --
.exportAs("main");
@@ -234,7 +234,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
builder.addFunction("main", kSig_i_ii)
.addBody([
- kExprGetLocal, 0, // --
+ kExprLocalGet, 0, // --
kExprCallIndirect, sig_index2, kTableZero]) // --
.exportAs("main");
@@ -293,7 +293,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
const builder = new WasmModuleBuilder();
builder.addMemory(1, 1);
builder.addFunction('main', kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprI32LoadMem, 0, 0])
+ .addBody([kExprLocalGet, 0, kExprI32LoadMem, 0, 0])
.exportFunc();
const wire_bytes = builder.toBuffer();
const module = new WebAssembly.Module(wire_bytes);
@@ -364,7 +364,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
kExprBlock, kWasmStmt,
kExprBlock, kWasmStmt,
kExprBlock, kWasmStmt,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprBrTable, 6, 0, 1, 2, 3, 4, 5, 6,
kExprEnd,
kExprI32Const, 3,
diff --git a/deps/v8/test/mjsunit/wasm/data-segments.js b/deps/v8/test/mjsunit/wasm/data-segments.js
index 91b6525537..38fd5ee06b 100644
--- a/deps/v8/test/mjsunit/wasm/data-segments.js
+++ b/deps/v8/test/mjsunit/wasm/data-segments.js
@@ -13,7 +13,7 @@ function SimpleDataSegmentTest(offset) {
var builder = new WasmModuleBuilder();
builder.addMemory(1, 1, false);
builder.addFunction("load", kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprI32LoadMem, 0, 0])
+ .addBody([kExprLocalGet, 0, kExprI32LoadMem, 0, 0])
.exportAs("load");
builder.addDataSegment(offset, [9, 9, 9, 9]);
@@ -41,7 +41,7 @@ function GlobalImportedInitTest(pad) {
while (pad-- > 0) builder.addGlobal(kWasmI32); // pad
builder.addFunction("load", kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprI32LoadMem, 0, 0])
+ .addBody([kExprLocalGet, 0, kExprI32LoadMem, 0, 0])
.exportAs("load");
builder.addDataSegment(g.index, [5, 5, 5, 5], true);
diff --git a/deps/v8/test/mjsunit/wasm/divrem-trap.js b/deps/v8/test/mjsunit/wasm/divrem-trap.js
index d9a23693f0..5eadaa44e9 100644
--- a/deps/v8/test/mjsunit/wasm/divrem-trap.js
+++ b/deps/v8/test/mjsunit/wasm/divrem-trap.js
@@ -16,8 +16,8 @@ function makeBinop(opcode) {
builder.addFunction("main", kSig_i_ii)
.addBody([
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
opcode, // --
])
.exportFunc();
diff --git a/deps/v8/test/mjsunit/wasm/empirical_max_memory.js b/deps/v8/test/mjsunit/wasm/empirical_max_memory.js
index e2ff7ca517..59e15f8464 100644
--- a/deps/v8/test/mjsunit/wasm/empirical_max_memory.js
+++ b/deps/v8/test/mjsunit/wasm/empirical_max_memory.js
@@ -19,14 +19,14 @@ let kMaxMemory = 2 * k1GiB - kPageSize; // TODO(titzer): raise this to 4GiB
builder.addImportedMemory("i", "mem");
builder.addFunction("load", makeSig([kWasmI32], [type]))
.addBody([ // --
- kExprGetLocal, 0, // --
+ kExprLocalGet, 0, // --
load_opcode, 0, 0, // --
]) // --
.exportFunc();
builder.addFunction("store", makeSig([kWasmI32, type], []))
.addBody([ // --
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
store_opcode, 0, 0, // --
]) // --
.exportFunc();
diff --git a/deps/v8/test/mjsunit/wasm/ensure-wasm-binaries-up-to-date.js b/deps/v8/test/mjsunit/wasm/ensure-wasm-binaries-up-to-date.js
index 9f7a7f71d2..e9e380a4ca 100644
--- a/deps/v8/test/mjsunit/wasm/ensure-wasm-binaries-up-to-date.js
+++ b/deps/v8/test/mjsunit/wasm/ensure-wasm-binaries-up-to-date.js
@@ -14,7 +14,7 @@
var module = new WasmModuleBuilder();
module.addFunction(undefined, kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprI32Const, 1, kExprI32Add])
+ .addBody([kExprLocalGet, 0, kExprI32Const, 1, kExprI32Add])
.exportAs("increment");
var buffer = module.toBuffer(true);
diff --git a/deps/v8/test/mjsunit/wasm/errors.js b/deps/v8/test/mjsunit/wasm/errors.js
index d98452e0e8..4304e54588 100644
--- a/deps/v8/test/mjsunit/wasm/errors.js
+++ b/deps/v8/test/mjsunit/wasm/errors.js
@@ -64,7 +64,7 @@ function assertConversionError(bytes, imports, msg) {
.end().toBuffer(),
f_error('expected 1 elements on the stack for return, found 0 @+24'));
assertCompileError(builder().addFunction('f', kSig_v_v).addBody([
- kExprGetLocal, 0
+ kExprLocalGet, 0
]).end().toBuffer(), f_error('invalid local index: 0 @+24'));
assertCompileError(
builder().addStart(0).toBuffer(),
@@ -182,7 +182,7 @@ function import_error(index, module, func, msg) {
var sig = builder.addType(kSig_i_dd);
builder.addImport("mod", "func", sig);
builder.addFunction("main", sig)
- .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprCallFunction, 0])
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1, kExprCallFunction, 0])
.exportAs("main");
var main = builder.instantiate({
mod: {
diff --git a/deps/v8/test/mjsunit/wasm/exceptions-anyref.js b/deps/v8/test/mjsunit/wasm/exceptions-anyref.js
index a41d69c0af..ccda100f65 100644
--- a/deps/v8/test/mjsunit/wasm/exceptions-anyref.js
+++ b/deps/v8/test/mjsunit/wasm/exceptions-anyref.js
@@ -30,7 +30,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
builder.addFunction("throw_catch_null", kSig_i_i)
.addBody([
kExprTry, kWasmAnyRef,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprI32Eqz,
kExprIf, kWasmAnyRef,
kExprRefNull,
@@ -63,7 +63,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
let except = builder.addException(kSig_v_r);
builder.addFunction("throw_param", kSig_v_r)
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprThrow, except,
]).exportFunc();
let instance = builder.instantiate();
@@ -83,7 +83,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
builder.addFunction("throw_catch_param", kSig_r_r)
.addBody([
kExprTry, kWasmAnyRef,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprThrow, except,
kExprCatch,
kExprBrOnExn, 0, except,
@@ -108,7 +108,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
.addLocals({anyfunc_count: 1})
.addBody([
kExprTry, kWasmAnyFunc,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprThrow, except,
kExprCatch,
kExprBrOnExn, 0, except,
@@ -128,7 +128,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
builder.addFunction("throw_catch_param", kSig_e_e)
.addBody([
kExprTry, kWasmExnRef,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprThrow, except,
kExprCatch,
kExprBrOnExn, 0, except,
diff --git a/deps/v8/test/mjsunit/wasm/exceptions-global.js b/deps/v8/test/mjsunit/wasm/exceptions-global.js
index 4a74dfb010..80af193c3e 100644
--- a/deps/v8/test/mjsunit/wasm/exceptions-global.js
+++ b/deps/v8/test/mjsunit/wasm/exceptions-global.js
@@ -16,7 +16,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
let g = builder.addGlobal(kWasmExnRef);
builder.addFunction("push_and_drop_exnref", kSig_v_v)
.addBody([
- kExprGetGlobal, g.index,
+ kExprGlobalGet, g.index,
kExprDrop,
]).exportFunc();
let instance = builder.instantiate();
@@ -30,7 +30,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
let builder = new WasmModuleBuilder();
let g = builder.addGlobal(kWasmExnRef);
builder.addFunction('push_and_return_exnref', kSig_e_v)
- .addBody([kExprGetGlobal, g.index])
+ .addBody([kExprGlobalGet, g.index])
.exportFunc();
let instance = builder.instantiate();
@@ -46,10 +46,10 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
builder.addFunction('catch_and_set_exnref', kSig_v_i)
.addBody([
kExprTry, kWasmStmt,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprThrow, except,
kExprCatch,
- kExprSetGlobal, g.index,
+ kExprGlobalSet, g.index,
kExprEnd,
]).exportFunc();
let instance = builder.instantiate();
@@ -68,10 +68,10 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
builder.addFunction('set_param_exnref', kSig_v_e)
.addBody([
kExprTry, kWasmStmt,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprRethrow,
kExprCatch,
- kExprSetGlobal, g.index,
+ kExprGlobalSet, g.index,
kExprEnd,
]).exportFunc();
let exception = "my fancy exception";
@@ -88,7 +88,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
let g_index = builder.addImportedGlobal("m", "exn", kWasmExnRef);
builder.addFunction('rethrow_exnref', kSig_v_v)
.addBody([
- kExprGetGlobal, g_index,
+ kExprGlobalGet, g_index,
kExprRethrow,
]).exportFunc();
let exception = "my fancy exception";
@@ -104,7 +104,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
let g = builder.addGlobal(kWasmExnRef, true).exportAs("exn");
builder.addFunction('rethrow_exnref', kSig_v_v)
.addBody([
- kExprGetGlobal, g.index,
+ kExprGlobalGet, g.index,
kExprRethrow,
]).exportFunc();
let instance = builder.instantiate();
@@ -122,7 +122,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
let g_index = builder.addImportedGlobal("m", "exn", kWasmExnRef, true);
builder.addFunction('rethrow_exnref', kSig_v_v)
.addBody([
- kExprGetGlobal, g_index,
+ kExprGlobalGet, g_index,
kExprRethrow,
]).exportFunc();
let exception1 = "my fancy exception";
@@ -143,7 +143,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
let g2 = builder.addGlobal(kWasmExnRef);
g2.init_index = g1_index; // Initialize {g2} to equal {g1}.
builder.addFunction('push_and_return_exnref', kSig_e_v)
- .addBody([kExprGetGlobal, g2.index])
+ .addBody([kExprGlobalGet, g2.index])
.exportFunc();
let exception = { x: "my fancy exception" };
let instance = builder.instantiate({ "m": { "exn": exception }});
diff --git a/deps/v8/test/mjsunit/wasm/exceptions-rethrow.js b/deps/v8/test/mjsunit/wasm/exceptions-rethrow.js
index 3b3fa365d5..be609cbf2d 100644
--- a/deps/v8/test/mjsunit/wasm/exceptions-rethrow.js
+++ b/deps/v8/test/mjsunit/wasm/exceptions-rethrow.js
@@ -26,11 +26,11 @@ load("test/mjsunit/wasm/exceptions-utils.js");
kExprTry, kWasmI32,
kExprThrow, except,
kExprCatch,
- kExprSetLocal, 1,
- kExprGetLocal, 0,
+ kExprLocalSet, 1,
+ kExprLocalGet, 0,
kExprI32Eqz,
kExprIf, kWasmStmt,
- kExprGetLocal, 1,
+ kExprLocalGet, 1,
kExprRethrow,
kExprEnd,
kExprI32Const, 23,
@@ -56,23 +56,23 @@ load("test/mjsunit/wasm/exceptions-utils.js");
kExprTry, kWasmI32,
kExprThrow, except2,
kExprCatch,
- kExprSetLocal, 2,
+ kExprLocalSet, 2,
kExprTry, kWasmI32,
kExprThrow, except1,
kExprCatch,
- kExprSetLocal, 1,
- kExprGetLocal, 0,
+ kExprLocalSet, 1,
+ kExprLocalGet, 0,
kExprI32Const, 0,
kExprI32Eq,
kExprIf, kWasmStmt,
- kExprGetLocal, 1,
+ kExprLocalGet, 1,
kExprRethrow,
kExprEnd,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprI32Const, 1,
kExprI32Eq,
kExprIf, kWasmStmt,
- kExprGetLocal, 2,
+ kExprLocalGet, 2,
kExprRethrow,
kExprEnd,
kExprI32Const, 23,
@@ -98,12 +98,12 @@ load("test/mjsunit/wasm/exceptions-utils.js");
kExprTry, kWasmI32,
kExprThrow, except,
kExprCatch,
- kExprSetLocal, 1,
+ kExprLocalSet, 1,
kExprTry, kWasmI32,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprI32Eqz,
kExprIf, kWasmStmt,
- kExprGetLocal, 1,
+ kExprLocalGet, 1,
kExprRethrow,
kExprEnd,
kExprI32Const, 42,
diff --git a/deps/v8/test/mjsunit/wasm/exceptions-simd.js b/deps/v8/test/mjsunit/wasm/exceptions-simd.js
index ed16a7f2cc..00fc725f5d 100644
--- a/deps/v8/test/mjsunit/wasm/exceptions-simd.js
+++ b/deps/v8/test/mjsunit/wasm/exceptions-simd.js
@@ -14,7 +14,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
builder.addFunction("throw_simd", kSig_v_v)
.addLocals({s128_count: 1})
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprThrow, 0,
])
.exportFunc();
@@ -32,7 +32,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
.addLocals({s128_count: 1})
.addBody([
kExprTry, kWasmS128,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprThrow, 0,
kExprCatch,
kExprBrOnExn, 0, except,
diff --git a/deps/v8/test/mjsunit/wasm/exceptions.js b/deps/v8/test/mjsunit/wasm/exceptions.js
index 7d53037269..ecaf0d06c9 100644
--- a/deps/v8/test/mjsunit/wasm/exceptions.js
+++ b/deps/v8/test/mjsunit/wasm/exceptions.js
@@ -14,7 +14,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
builder.addFunction("push_and_drop_exnref", kSig_v_v)
.addLocals({except_count: 1})
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprDrop,
]).exportFunc();
let instance = builder.instantiate();
@@ -29,7 +29,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
let except = builder.addException(kSig_v_v);
builder.addFunction("throw_if_param_not_zero", kSig_i_i)
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprI32Const, 0,
kExprI32Ne,
kExprIf, kWasmStmt,
@@ -68,7 +68,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
builder.addFunction("simple_throw_catch_to_0_1", kSig_i_i)
.addBody([
kExprTry, kWasmI32,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprI32Eqz,
kExprIf, kWasmStmt,
kExprThrow, except,
@@ -99,12 +99,12 @@ load("test/mjsunit/wasm/exceptions-utils.js");
kExprBlock, kWasmStmt,
kExprTry, kWasmStmt,
kExprTry, kWasmStmt,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprI32Eqz,
kExprIf, kWasmStmt,
kExprThrow, except1,
kExprElse,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprI32Const, 1,
kExprI32Eq,
kExprIf, kWasmStmt,
@@ -149,12 +149,12 @@ load("test/mjsunit/wasm/exceptions-utils.js");
kExprBlock, kWasmStmt,
kExprBlock, kWasmStmt,
kExprTry, kWasmStmt,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprI32Eqz,
kExprIf, kWasmStmt,
kExprThrow, except1,
kExprElse,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprI32Const, 1,
kExprI32Eq,
kExprIf, kWasmStmt,
@@ -194,15 +194,15 @@ load("test/mjsunit/wasm/exceptions-utils.js");
.addBody([
kExprBlock, kWasmI32,
kExprTry, kWasmStmt,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprI32Eqz,
kExprIf, kWasmStmt,
kExprThrow, except,
kExprEnd,
kExprCatch,
- kExprSetLocal, 1,
+ kExprLocalSet, 1,
kExprI32Const, 23,
- kExprGetLocal, 1,
+ kExprLocalGet, 1,
kExprBrOnExn, 1, except,
kExprRethrow,
kExprEnd,
@@ -239,7 +239,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
builder.addFunction("throw_catch_param", kSig_i_i)
.addBody([
kExprTry, kWasmI32,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprThrow, except,
kExprI32Const, 2,
kExprCatch,
@@ -261,7 +261,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
let except = builder.addException(kSig_v_i);
builder.addFunction("throw_param", kSig_v_i)
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprThrow, except,
]).exportFunc();
let instance = builder.instantiate();
@@ -278,7 +278,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
builder.addFunction("throw_catch_param", kSig_f_f)
.addBody([
kExprTry, kWasmF32,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprThrow, except,
kExprF32Const, 0, 0, 0, 0,
kExprCatch,
@@ -299,7 +299,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
let except = builder.addException(kSig_v_f);
builder.addFunction("throw_param", kSig_v_f)
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprThrow, except,
]).exportFunc();
let instance = builder.instantiate();
@@ -316,18 +316,18 @@ load("test/mjsunit/wasm/exceptions-utils.js");
builder.addFunction("throw_catch_param", kSig_i_i)
.addLocals({i64_count: 1})
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprI64UConvertI32,
- kExprSetLocal, 1,
+ kExprLocalSet, 1,
kExprTry, kWasmI64,
- kExprGetLocal, 1,
+ kExprLocalGet, 1,
kExprThrow, except,
kExprI64Const, 23,
kExprCatch,
kExprBrOnExn, 0, except,
kExprRethrow,
kExprEnd,
- kExprGetLocal, 1,
+ kExprLocalGet, 1,
kExprI64Eq,
]).exportFunc();
let instance = builder.instantiate();
@@ -344,11 +344,11 @@ load("test/mjsunit/wasm/exceptions-utils.js");
let except = builder.addException(kSig_v_l);
builder.addFunction("throw_param", kSig_v_ii)
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprI64UConvertI32,
kExprI64Const, 32,
kExprI64Shl,
- kExprGetLocal, 1,
+ kExprLocalGet, 1,
kExprI64UConvertI32,
kExprI64Ior,
kExprThrow, except,
@@ -367,7 +367,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
builder.addFunction("throw_catch_param", kSig_d_d)
.addBody([
kExprTry, kWasmF64,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprThrow, except,
kExprF64Const, 0, 0, 0, 0, 0, 0, 0, 0,
kExprCatch,
@@ -388,7 +388,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
let except = builder.addException(kSig_v_d);
builder.addFunction("throw_param", kSig_v_f)
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprF64ConvertF32,
kExprThrow, except,
]).exportFunc();
@@ -406,10 +406,10 @@ load("test/mjsunit/wasm/exceptions-utils.js");
builder.addFunction("throw_expr_with_params", kSig_v_ddi)
.addBody([
// p2 * (p0 + min(p0, p1))|0 - 20
- kExprGetLocal, 2,
- kExprGetLocal, 0,
- kExprGetLocal, 0,
- kExprGetLocal, 1,
+ kExprLocalGet, 2,
+ kExprLocalGet, 0,
+ kExprLocalGet, 0,
+ kExprLocalGet, 1,
kExprF64Min,
kExprF64Add,
kExprI32SConvertF64,
@@ -468,7 +468,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
let kWasmThrowFunction =
builder.addFunction("throw", kSig_v_i)
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprThrow, except,
])
.index;
@@ -478,11 +478,11 @@ load("test/mjsunit/wasm/exceptions-utils.js");
builder.addFunction("same_scope", kSig_i_i)
.addBody([
kExprTry, kWasmI32,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprI32Const, 0,
kExprI32Ne,
kExprIf, kWasmStmt,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprThrow, except,
kExprUnreachable,
kExprEnd,
@@ -497,7 +497,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
builder.addFunction("same_scope_ignore", kSig_i_i)
.addBody([
kExprTry, kWasmI32,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprThrow, except,
kExprUnreachable,
kExprCatch,
@@ -545,7 +545,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
kExprTry, kWasmI32,
kExprTry, kWasmI32,
kExprTry, kWasmI32,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprI32Const, 1,
kExprI32Eq,
kExprIf, kWasmStmt,
@@ -555,9 +555,9 @@ load("test/mjsunit/wasm/exceptions-utils.js");
kExprEnd,
kExprI32Const, 2,
kExprCatch,
- kExprSetLocal, 2,
+ kExprLocalSet, 2,
kExprBlock, kWasmI32,
- kExprGetLocal, 2,
+ kExprLocalGet, 2,
kExprBrOnExn, 0, except,
kExprRethrow,
kExprEnd,
@@ -566,12 +566,12 @@ load("test/mjsunit/wasm/exceptions-utils.js");
kExprThrow, except,
kExprUnreachable,
kExprEnd,
- kExprTeeLocal, 1,
- kExprGetLocal, 0,
+ kExprLocalTee, 1,
+ kExprLocalGet, 0,
kExprI32Const, 2,
kExprI32Eq,
kExprIf, kWasmStmt,
- kExprGetLocal, 1,
+ kExprLocalGet, 1,
kExprI32Const, 8,
kExprI32Ior,
kExprThrow, except,
@@ -580,9 +580,9 @@ load("test/mjsunit/wasm/exceptions-utils.js");
kExprI32Const, 16,
kExprI32Ior,
kExprCatch,
- kExprSetLocal, 2,
+ kExprLocalSet, 2,
kExprBlock, kWasmI32,
- kExprGetLocal, 2,
+ kExprLocalGet, 2,
kExprBrOnExn, 0, except,
kExprRethrow,
kExprEnd,
@@ -591,12 +591,12 @@ load("test/mjsunit/wasm/exceptions-utils.js");
kExprThrow, except,
kExprUnreachable,
kExprEnd,
- kExprTeeLocal, 1,
- kExprGetLocal, 0,
+ kExprLocalTee, 1,
+ kExprLocalGet, 0,
kExprI32Const, 3,
kExprI32Eq,
kExprIf, kWasmStmt,
- kExprGetLocal, 1,
+ kExprLocalGet, 1,
kExprI32Const, /*64=*/ 192, 0,
kExprI32Ior,
kExprThrow, except,
@@ -605,9 +605,9 @@ load("test/mjsunit/wasm/exceptions-utils.js");
kExprI32Const, /*128=*/ 128, 1,
kExprI32Ior,
kExprCatch,
- kExprSetLocal, 2,
+ kExprLocalSet, 2,
kExprBlock, kWasmI32,
- kExprGetLocal, 2,
+ kExprLocalGet, 2,
kExprBrOnExn, 0, except,
kExprRethrow,
kExprEnd,
@@ -621,7 +621,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
builder.addFunction("from_direct_callee", kSig_i_i)
.addBody([
kExprTry, kWasmI32,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprCallFunction, kWasmThrowFunction,
kExprUnreachable,
kExprCatch,
@@ -637,8 +637,8 @@ load("test/mjsunit/wasm/exceptions-utils.js");
builder.addFunction("from_indirect_callee", kSig_i_ii)
.addBody([
kExprTry, kWasmI32,
- kExprGetLocal, 0,
- kExprGetLocal, 1,
+ kExprLocalGet, 0,
+ kExprLocalGet, 1,
kExprCallIndirect, sig_v_i, kTableZero,
kExprUnreachable,
kExprCatch,
@@ -653,7 +653,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
builder.addFunction("i_from_js", kSig_i_i)
.addBody([
kExprTry, kWasmI32,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprCallFunction, kJSThrowI,
kExprUnreachable,
kExprCatch,
diff --git a/deps/v8/test/mjsunit/wasm/export-mutable-global.js b/deps/v8/test/mjsunit/wasm/export-mutable-global.js
index 1ce918c6cc..90238f3cf9 100644
--- a/deps/v8/test/mjsunit/wasm/export-mutable-global.js
+++ b/deps/v8/test/mjsunit/wasm/export-mutable-global.js
@@ -59,10 +59,10 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
let global_builder = builder.addGlobal(type, true).exportAs(name);
if (value) global_builder.init = value;
builder.addFunction("get " + name, makeSig([], [type]))
- .addBody([kExprGetGlobal, index])
+ .addBody([kExprGlobalGet, index])
.exportFunc();
builder.addFunction("set " + name, makeSig([type], []))
- .addBody([kExprGetLocal, 0, kExprSetGlobal, index])
+ .addBody([kExprLocalGet, 0, kExprGlobalSet, index])
.exportFunc();
}
var instance = builder.instantiate();
diff --git a/deps/v8/test/mjsunit/wasm/ffi-error.js b/deps/v8/test/mjsunit/wasm/ffi-error.js
index 5f777ef1cf..217d7f3fd2 100644
--- a/deps/v8/test/mjsunit/wasm/ffi-error.js
+++ b/deps/v8/test/mjsunit/wasm/ffi-error.js
@@ -13,8 +13,8 @@ function CreateDefaultBuilder() {
builder.addImport('mod', 'fun', sig_index);
builder.addFunction('main', sig_index)
.addBody([
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
kExprCallFunction, 0, // --
]) // --
.exportFunc();
@@ -76,7 +76,7 @@ function checkFailingInstantiation(
let sig_index = kSig_i_dd;
builder.addFunction('exp', kSig_i_i)
.addBody([
- kExprGetLocal,
+ kExprLocalGet,
0,
]) // --
.exportFunc();
@@ -126,8 +126,8 @@ function checkFailingInstantiation(
builder.addMemory(1, 1, true);
builder.addFunction('function_with_invalid_signature', kSig_l_ll)
.addBody([ // --
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
kExprI64Sub]) // --
.exportFunc()
@@ -144,7 +144,7 @@ function checkFailingInstantiation(
builder.addMemory(1, 1, true);
builder.addFunction('function_with_invalid_signature', kSig_i_l)
- .addBody([kExprGetLocal, 0, kExprI32ConvertI64])
+ .addBody([kExprLocalGet, 0, kExprI32ConvertI64])
.exportFunc();
checkSuccessfulInstantiation(
@@ -163,7 +163,7 @@ function checkFailingInstantiation(
let index = builder.addImport('', 'func', sig_i64_index);
builder.addFunction('main', sig_index)
.addBody([
- kExprGetLocal, 0, kExprI64SConvertI32, kExprCallFunction, index // --
+ kExprLocalGet, 0, kExprI64SConvertI32, kExprCallFunction, index // --
]) // --
.exportFunc();
diff --git a/deps/v8/test/mjsunit/wasm/ffi.js b/deps/v8/test/mjsunit/wasm/ffi.js
index 72cc57f598..884bd29450 100644
--- a/deps/v8/test/mjsunit/wasm/ffi.js
+++ b/deps/v8/test/mjsunit/wasm/ffi.js
@@ -13,8 +13,8 @@ function testCallFFI(func, check) {
builder.addImport("", "func", sig_index);
builder.addFunction("main", sig_index)
.addBody([
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
kExprCallFunction, 0 // --
]) // --
.exportFunc();
@@ -76,8 +76,8 @@ testCallFFI(bind_sub, check_FOREIGN_SUB);
builder.addImport("", "func", sig_index);
builder.addFunction("main", sig_index)
.addBody([
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
kExprCallFunction, 0 // --
]) // --
.exportFunc();
@@ -259,8 +259,8 @@ function testCallBinopVoid(type, func, check) {
builder.addImport("", "func", makeSig_v_xx(type));
builder.addFunction("main", makeSig_r_xx(kWasmI32, type))
.addBody([
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
kExprCallFunction, 0, // --
kExprI32Const, 33 // --
]) // --
@@ -316,7 +316,7 @@ testCallBinopVoid(kWasmF64);
.addBody([
kExprI32Const, 37, // --
kExprCallFunction, 0, // --
- kExprGetLocal, 0, // --
+ kExprLocalGet, 0, // --
kExprCallFunction, 1 // --
]) // --
.exportFunc()
diff --git a/deps/v8/test/mjsunit/wasm/float-constant-folding.js b/deps/v8/test/mjsunit/wasm/float-constant-folding.js
index 6205da7cfc..332042e7fe 100644
--- a/deps/v8/test/mjsunit/wasm/float-constant-folding.js
+++ b/deps/v8/test/mjsunit/wasm/float-constant-folding.js
@@ -10,7 +10,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
print("F32: sNaN - 0 = qNaN");
var builder = new WasmModuleBuilder();
builder.addFunction("F32Sub0", kSig_i_i).addBody(
- [ kExprGetLocal, 0, kExprF32ReinterpretI32, kExprF32Const, 0x00, 0x00,
+ [ kExprLocalGet, 0, kExprF32ReinterpretI32, kExprF32Const, 0x00, 0x00,
0x00, 0x00, // 0.0
kExprF32Sub, kExprI32ReinterpretF32, ]).exportFunc();
var module = builder.instantiate();
@@ -23,7 +23,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
var builder = new WasmModuleBuilder();
builder.addFunction("F32Sub0", kSig_i_i).addBody(
[ kExprF32Const, 0x00, 0x00, 0x00, 0x80, // 0.0
- kExprGetLocal, 0, kExprF32ReinterpretI32, kExprF32Sub,
+ kExprLocalGet, 0, kExprF32ReinterpretI32, kExprF32Sub,
kExprI32ReinterpretF32, ]).exportFunc();
var module = builder.instantiate();
// F32Sub0(signalling_NaN)
diff --git a/deps/v8/test/mjsunit/wasm/futex.js b/deps/v8/test/mjsunit/wasm/futex.js
index 00353d48b0..d5bbf9ff1a 100644
--- a/deps/v8/test/mjsunit/wasm/futex.js
+++ b/deps/v8/test/mjsunit/wasm/futex.js
@@ -14,8 +14,8 @@ function WasmAtomicNotify(memory, offset, index, num) {
builder.addImportedMemory("m", "memory", 0, 20, "shared");
builder.addFunction("main", kSig_i_ii)
.addBody([
- kExprGetLocal, 0,
- kExprGetLocal, 1,
+ kExprLocalGet, 0,
+ kExprLocalGet, 1,
kAtomicPrefix,
kExprAtomicNotify, /* alignment */ 0, offset])
.exportAs("main");
@@ -32,9 +32,9 @@ function WasmI32AtomicWait(memory, offset, index, val, timeout) {
builder.addFunction("main",
makeSig([kWasmI32, kWasmI32, kWasmF64], [kWasmI32]))
.addBody([
- kExprGetLocal, 0,
- kExprGetLocal, 1,
- kExprGetLocal, 2,
+ kExprLocalGet, 0,
+ kExprLocalGet, 1,
+ kExprLocalGet, 2,
kExprI64SConvertF64,
kAtomicPrefix,
kExprI32AtomicWait, /* alignment */ 0, offset])
@@ -56,17 +56,17 @@ function WasmI64AtomicWait(memory, offset, index, val_low,
makeSig([kWasmI32, kWasmI32, kWasmI32, kWasmF64], [kWasmI32]))
.addLocals({i64_count: 1}) // local that is passed as value param to wait
.addBody([
- kExprGetLocal, 1,
+ kExprLocalGet, 1,
kExprI64UConvertI32,
kExprI64Const, 32,
kExprI64Shl,
- kExprGetLocal, 2,
+ kExprLocalGet, 2,
kExprI64UConvertI32,
kExprI64Ior,
- kExprSetLocal, 4, // Store the created I64 value in local
- kExprGetLocal, 0,
- kExprGetLocal, 4,
- kExprGetLocal, 3,
+ kExprLocalSet, 4, // Store the created I64 value in local
+ kExprLocalGet, 0,
+ kExprLocalGet, 4,
+ kExprLocalGet, 3,
kExprI64SConvertF64,
kAtomicPrefix,
kExprI64AtomicWait, /* alignment */ 0, offset])
diff --git a/deps/v8/test/mjsunit/wasm/gc-buffer.js b/deps/v8/test/mjsunit/wasm/gc-buffer.js
index d8aa9a86d8..c7fdbbc47c 100644
--- a/deps/v8/test/mjsunit/wasm/gc-buffer.js
+++ b/deps/v8/test/mjsunit/wasm/gc-buffer.js
@@ -14,7 +14,7 @@ function run(f) {
builder.addImport("mod", "the_name_of_my_import", kSig_i_i);
builder.addFunction("main", kSig_i_i)
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprCallFunction, 0])
.exportAs("main");
print("module");
diff --git a/deps/v8/test/mjsunit/wasm/gc-frame.js b/deps/v8/test/mjsunit/wasm/gc-frame.js
index de8bdab51e..7d3b19741a 100644
--- a/deps/v8/test/mjsunit/wasm/gc-frame.js
+++ b/deps/v8/test/mjsunit/wasm/gc-frame.js
@@ -16,28 +16,28 @@ function makeFFI(func, t) {
// the different parts of the stack.
builder.addFunction("main", sig_index)
.addBody([
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
- kExprGetLocal, 2, // --
- kExprGetLocal, 3, // --
- kExprGetLocal, 4, // --
- kExprGetLocal, 5, // --
- kExprGetLocal, 6, // --
- kExprGetLocal, 7, // --
- kExprGetLocal, 8, // --
- kExprGetLocal, 9, // --
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
+ kExprLocalGet, 2, // --
+ kExprLocalGet, 3, // --
+ kExprLocalGet, 4, // --
+ kExprLocalGet, 5, // --
+ kExprLocalGet, 6, // --
+ kExprLocalGet, 7, // --
+ kExprLocalGet, 8, // --
+ kExprLocalGet, 9, // --
kExprCallFunction, 0, // --
kExprDrop, // --
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
- kExprGetLocal, 2, // --
- kExprGetLocal, 3, // --
- kExprGetLocal, 4, // --
- kExprGetLocal, 5, // --
- kExprGetLocal, 6, // --
- kExprGetLocal, 7, // --
- kExprGetLocal, 8, // --
- kExprGetLocal, 9, // --
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
+ kExprLocalGet, 2, // --
+ kExprLocalGet, 3, // --
+ kExprLocalGet, 4, // --
+ kExprLocalGet, 5, // --
+ kExprLocalGet, 6, // --
+ kExprLocalGet, 7, // --
+ kExprLocalGet, 8, // --
+ kExprLocalGet, 9, // --
kExprCallFunction, 0, // --
]) // --
.exportFunc();
@@ -79,7 +79,7 @@ function print10(a, b, c, d, e, f, g, h, i) {
var sig_index = builder.addType(kSig_i_i);
builder.addFunction("main", sig_index)
.addBody([
- kExprGetLocal, 0, // --
+ kExprLocalGet, 0, // --
]) // --
.exportFunc();
diff --git a/deps/v8/test/mjsunit/wasm/gc-memory.js b/deps/v8/test/mjsunit/wasm/gc-memory.js
new file mode 100644
index 0000000000..31e96f8be3
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/gc-memory.js
@@ -0,0 +1,41 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let kPageSize = 65536;
+
+function allocMems(count, initial, maximum) {
+ print(`alloc ${count}`);
+ let result = [];
+ for (let i = 0; i < count; i++) {
+ print(` memory #${i} (initial=${initial}, maximum=${maximum})...`);
+ result.push(new WebAssembly.Memory({initial: initial, maximum: maximum}));
+ }
+ return result;
+}
+
+function check(mems, initial) {
+ for (m of mems) {
+ assertEquals(initial * kPageSize, m.buffer.byteLength);
+ }
+}
+
+function test(count, initial, maximum) {
+ let mems = allocMems(count, initial, maximum);
+ check(mems, initial);
+}
+
+test(1, 1, 1);
+test(1, 1, 2);
+test(1, 1, 3);
+test(1, 1, 4);
+
+test(2, 1, 1);
+test(2, 1, 2);
+test(2, 1, 3);
+test(2, 1, 4);
+
+test(1, 1, undefined);
+test(2, 1, undefined);
+test(3, 1, undefined);
+test(4, 1, undefined);
diff --git a/deps/v8/test/mjsunit/wasm/gc-stress.js b/deps/v8/test/mjsunit/wasm/gc-stress.js
index 8daff420da..55a780d3ec 100644
--- a/deps/v8/test/mjsunit/wasm/gc-stress.js
+++ b/deps/v8/test/mjsunit/wasm/gc-stress.js
@@ -11,7 +11,7 @@ function run(f) {
builder.addImport("m", "f", kSig_i_i);
builder.addFunction("main", kSig_i_i)
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprCallFunction, 0])
.exportAs("main");
diff --git a/deps/v8/test/mjsunit/wasm/globals.js b/deps/v8/test/mjsunit/wasm/globals.js
index b29993a8fc..a72bc118c7 100644
--- a/deps/v8/test/mjsunit/wasm/globals.js
+++ b/deps/v8/test/mjsunit/wasm/globals.js
@@ -15,12 +15,12 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
let sig_index = builder.addType(kSig_i_v);
builder.addFunction("get", sig_index)
.addBody([
- kExprGetGlobal, g.index])
+ kExprGlobalGet, g.index])
.exportAs("get");
builder.addFunction("set", kSig_v_i)
.addBody([
- kExprGetLocal, 0,
- kExprSetGlobal, g.index])
+ kExprLocalGet, 0,
+ kExprGlobalSet, g.index])
.exportAs("set");
let module = new WebAssembly.Module(builder.toBuffer());
@@ -54,7 +54,7 @@ function TestImported(type, val, expected) {
var sig = makeSig([], [type]);
var g = builder.addImportedGlobal("uuu", "foo", type);
builder.addFunction("main", sig)
- .addBody([kExprGetGlobal, g])
+ .addBody([kExprGlobalGet, g])
.exportAs("main");
builder.addGlobal(kWasmI32); // pad
@@ -76,7 +76,7 @@ TestImported(kWasmF64, 77777.88888, 77777.88888);
let sig_index = builder.addType(kSig_i_v);
builder.addFunction("main", sig_index)
.addBody([
- kExprGetGlobal, g])
+ kExprGlobalGet, g])
.exportAs("main");
let module = new WebAssembly.Module(builder.toBuffer());
@@ -152,7 +152,7 @@ function TestGlobalIndexSpace(type, val) {
var sig = makeSig([], [type]);
builder.addFunction("main", sig)
- .addBody([kExprGetGlobal, def.index])
+ .addBody([kExprGlobalGet, def.index])
.exportAs("main");
var instance = builder.instantiate({nnn: {foo: val}});
@@ -173,22 +173,22 @@ TestGlobalIndexSpace(kWasmF64, 12345.678);
let sig_index = builder.addType(kSig_i_i);
builder.addFunction("get", sig_index)
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprIf, kWasmI32,
- kExprGetGlobal, g.index,
+ kExprGlobalGet, g.index,
kExprElse,
- kExprGetGlobal, h.index,
+ kExprGlobalGet, h.index,
kExprEnd])
.exportAs("get");
builder.addFunction("set", kSig_v_ii)
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprIf, kWasmStmt,
- kExprGetLocal, 1,
- kExprSetGlobal, g.index,
+ kExprLocalGet, 1,
+ kExprGlobalSet, g.index,
kExprElse,
- kExprGetLocal, 1,
- kExprSetGlobal, h.index,
+ kExprLocalGet, 1,
+ kExprGlobalSet, h.index,
kExprEnd])
.exportAs("set");
diff --git a/deps/v8/test/mjsunit/wasm/graceful_shutdown.js b/deps/v8/test/mjsunit/wasm/graceful_shutdown.js
index aa50e6cf77..0f55b795c1 100644
--- a/deps/v8/test/mjsunit/wasm/graceful_shutdown.js
+++ b/deps/v8/test/mjsunit/wasm/graceful_shutdown.js
@@ -14,7 +14,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
for (i = 0; i < 100; i++) {
builder.addFunction("sub" + i, kSig_i_i)
.addBody([ // --
- kExprGetLocal, 0, // --
+ kExprLocalGet, 0, // --
kExprI32Const, i % 61, // --
kExprI32Sub]) // --
.exportFunc()
diff --git a/deps/v8/test/mjsunit/wasm/graceful_shutdown_during_tierup.js b/deps/v8/test/mjsunit/wasm/graceful_shutdown_during_tierup.js
index 17c6803784..f615602a8e 100644
--- a/deps/v8/test/mjsunit/wasm/graceful_shutdown_during_tierup.js
+++ b/deps/v8/test/mjsunit/wasm/graceful_shutdown_during_tierup.js
@@ -14,7 +14,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
for (i = 0; i < 100; i++) {
builder.addFunction("sub" + i, kSig_i_i)
.addBody([ // --
- kExprGetLocal, 0, // --
+ kExprLocalGet, 0, // --
kExprI32Const, i % 61, // --
kExprI32Sub]) // --
.exportFunc()
diff --git a/deps/v8/test/mjsunit/wasm/grow-memory-detaching.js b/deps/v8/test/mjsunit/wasm/grow-memory-detaching.js
index 9ab2334a63..b228449703 100644
--- a/deps/v8/test/mjsunit/wasm/grow-memory-detaching.js
+++ b/deps/v8/test/mjsunit/wasm/grow-memory-detaching.js
@@ -10,7 +10,7 @@ let module = (() => {
let builder = new WasmModuleBuilder();
builder.addMemory(1, undefined, false);
builder.addFunction("grow_memory", kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprMemoryGrow, kMemoryZero])
+ .addBody([kExprLocalGet, 0, kExprMemoryGrow, kMemoryZero])
.exportFunc();
builder.exportMemoryAs("memory");
return builder.toModule();
diff --git a/deps/v8/test/mjsunit/wasm/grow-memory-in-branch.js b/deps/v8/test/mjsunit/wasm/grow-memory-in-branch.js
index 93bb56d83d..8babc66b75 100644
--- a/deps/v8/test/mjsunit/wasm/grow-memory-in-branch.js
+++ b/deps/v8/test/mjsunit/wasm/grow-memory-in-branch.js
@@ -13,12 +13,12 @@ function generateBuilder() {
let builder = new WasmModuleBuilder();
builder.addMemory(initialMemoryPages, maximumMemoryPages, true);
builder.addFunction('load', kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprI32LoadMem, 0, 0])
+ .addBody([kExprLocalGet, 0, kExprI32LoadMem, 0, 0])
.exportFunc();
builder.addFunction('store', kSig_i_ii)
.addBody([
- kExprGetLocal, 0, kExprGetLocal, 1,
- kExprI32StoreMem, 0, 0, kExprGetLocal, 1
+ kExprLocalGet, 0, kExprLocalGet, 1,
+ kExprI32StoreMem, 0, 0, kExprLocalGet, 1
])
.exportFunc();
return builder;
@@ -32,7 +32,7 @@ function generateBuilder() {
let builder = generateBuilder();
builder.addFunction('main', kSig_i_i)
.addBody([
- kExprGetLocal, 0, // get condition parameter
+ kExprLocalGet, 0, // get condition parameter
kExprIf, kWasmStmt, // if it's 1 then enter if
kExprI32Const, deltaPages, // put deltaPages on stack
kExprMemoryGrow, kMemoryZero, // grow memory
@@ -59,7 +59,7 @@ function generateBuilder() {
let builder = generateBuilder();
builder.addFunction('main', kSig_i_i)
.addBody([
- kExprGetLocal, 0, // get condition parameter
+ kExprLocalGet, 0, // get condition parameter
kExprIf, kWasmStmt, // if it's 1 then enter if
kExprI32Const, deltaPages, // put deltaPages on stack
kExprMemoryGrow, kMemoryZero, // grow memory
@@ -94,7 +94,7 @@ function generateBuilder() {
let builder = generateBuilder();
builder.addFunction('main', kSig_i_i)
.addBody([
- kExprGetLocal, 0, // get condition parameter
+ kExprLocalGet, 0, // get condition parameter
kExprIf, kWasmStmt, // if it's 1 then enter if
kExprI32Const, index, // put index on stack
kExprI32Const, newValue, // put the value on stack
@@ -127,7 +127,7 @@ function generateBuilder() {
let builder = generateBuilder();
builder.addFunction('main', kSig_i_i)
.addBody([
- kExprGetLocal, 0, // get condition parameter
+ kExprLocalGet, 0, // get condition parameter
kExprIf, kWasmStmt, // if it's 1 then enter if
kExprI32Const, deltaPagesIf, // put deltaPagesIf on stack
kExprMemoryGrow, kMemoryZero, // grow memory
@@ -159,16 +159,16 @@ function generateBuilder() {
let builder = generateBuilder();
builder.addFunction('main', kSig_i_ii)
.addBody([
- kExprGetLocal, 0, // get condition parameter
+ kExprLocalGet, 0, // get condition parameter
kExprIf, kWasmStmt, // if it's 1 then enter if
kExprI32Const, deltaPages, // put deltaPages on stack
kExprMemoryGrow, kMemoryZero, // grow memory
kExprDrop, // drop the result of grow
- kExprGetLocal, 1, // get index parameter
+ kExprLocalGet, 1, // get index parameter
kExprI32Const, value, // put the value on stack
kExprI32StoreMem, 0, 0, // store
kExprEnd,
- kExprGetLocal, 1, // get index parameter
+ kExprLocalGet, 1, // get index parameter
kExprI32LoadMem, 0, 0 // load from grown memory
])
.exportFunc();
@@ -191,20 +191,20 @@ function generateBuilder() {
let builder = generateBuilder();
builder.addFunction('main', kSig_i_ii)
.addBody([
- kExprGetLocal, 0, // get condition parameter
+ kExprLocalGet, 0, // get condition parameter
kExprIf, kWasmStmt, // if it's 1 then enter if
kExprI32Const, deltaPages, // put deltaPages on stack
kExprMemoryGrow, kMemoryZero, // grow memory
kExprDrop, // drop the result of grow
- kExprGetLocal, 1, // get index parameter
+ kExprLocalGet, 1, // get index parameter
kExprI32Const, value, // put the value on stack
kExprI32StoreMem, 0, 0, // store
kExprElse,
- kExprGetLocal, 1, // get index parameter
+ kExprLocalGet, 1, // get index parameter
kExprI32Const, value, // put the value on stack
kExprI32StoreMem, 0, 0, // store
kExprEnd,
- kExprGetLocal, 1, // get index parameter
+ kExprLocalGet, 1, // get index parameter
kExprI32LoadMem, 0, 0 // load from grown memory
])
.exportFunc();
@@ -226,20 +226,20 @@ function generateBuilder() {
let builder = generateBuilder();
builder.addFunction('main', kSig_i_ii)
.addBody([
- kExprGetLocal, 0, // get condition parameter
+ kExprLocalGet, 0, // get condition parameter
kExprIf, kWasmStmt, // if it's 1 then enter if
- kExprGetLocal, 1, // get index parameter
+ kExprLocalGet, 1, // get index parameter
kExprI32Const, value, // put the value on stack
kExprI32StoreMem, 0, 0, // store
kExprElse,
kExprI32Const, deltaPages, // put deltaPages on stack
kExprMemoryGrow, kMemoryZero, // grow memory
kExprDrop, // drop the result of grow
- kExprGetLocal, 1, // get index parameter
+ kExprLocalGet, 1, // get index parameter
kExprI32Const, value, // put the value on stack
kExprI32StoreMem, 0, 0, // store
kExprEnd,
- kExprGetLocal, 1, // get index parameter
+ kExprLocalGet, 1, // get index parameter
kExprI32LoadMem, 0, 0 // load from grown memory
])
.exportFunc();
@@ -263,23 +263,23 @@ function generateBuilder() {
let builder = generateBuilder();
builder.addFunction('main', kSig_i_ii)
.addBody([
- kExprGetLocal, 0, // get condition parameter
+ kExprLocalGet, 0, // get condition parameter
kExprIf, kWasmStmt, // if it's 1 then enter if
kExprI32Const, deltaPagesIf, // put deltaPagesIf on stack
kExprMemoryGrow, kMemoryZero, // grow memory
kExprDrop, // drop the result of grow
- kExprGetLocal, 1, // get index parameter
+ kExprLocalGet, 1, // get index parameter
kExprI32Const, valueIf, // put valueIf on stack
kExprI32StoreMem, 0, 0, // store
kExprElse,
kExprI32Const, deltaPagesElse, // put deltaPagesElse on stack
kExprMemoryGrow, kMemoryZero, // grow memory
kExprDrop, // drop the result of grow
- kExprGetLocal, 1, // get index parameter
+ kExprLocalGet, 1, // get index parameter
kExprI32Const, valueElse, // put valueElse on stack
kExprI32StoreMem, 0, 0, // store
kExprEnd,
- kExprGetLocal, 1, // get index parameter
+ kExprLocalGet, 1, // get index parameter
kExprI32LoadMem, 0, 0 // load from grown memory
])
.exportFunc();
diff --git a/deps/v8/test/mjsunit/wasm/grow-memory-in-call.js b/deps/v8/test/mjsunit/wasm/grow-memory-in-call.js
index 1790f9760f..7940ab5f19 100644
--- a/deps/v8/test/mjsunit/wasm/grow-memory-in-call.js
+++ b/deps/v8/test/mjsunit/wasm/grow-memory-in-call.js
@@ -20,12 +20,12 @@ print('=== grow_memory in direct calls ===');
builder.addMemory(initialMemoryPages, maximumMemoryPages, true);
let kGrowFunction =
builder.addFunction('grow', kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprMemoryGrow, kMemoryZero])
+ .addBody([kExprLocalGet, 0, kExprMemoryGrow, kMemoryZero])
.exportFunc()
.index;
builder.addFunction('main', kSig_i_i)
.addBody([
- kExprGetLocal, 0, // get number of new pages
+ kExprLocalGet, 0, // get number of new pages
kExprCallFunction, kGrowFunction, // call the grow function
kExprDrop, // drop the result of grow
kExprMemorySize, kMemoryZero // get the memory size
@@ -47,19 +47,19 @@ print('=== grow_memory in direct calls ===');
builder.addMemory(initialMemoryPages, maximumMemoryPages, true);
let kGrowFunction =
builder.addFunction('grow', kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprMemoryGrow, kMemoryZero])
+ .addBody([kExprLocalGet, 0, kExprMemoryGrow, kMemoryZero])
.exportFunc()
.index;
builder.addFunction('load', kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprI32LoadMem, 0, 0])
+ .addBody([kExprLocalGet, 0, kExprI32LoadMem, 0, 0])
.exportFunc();
builder.addFunction('main', kSig_v_iii)
.addBody([
- kExprGetLocal, 0, // get number of new pages
+ kExprLocalGet, 0, // get number of new pages
kExprCallFunction, kGrowFunction, // call the grow function
kExprDrop, // drop the result of grow
- kExprGetLocal, 1, // get index
- kExprGetLocal, 2, // get value
+ kExprLocalGet, 1, // get index
+ kExprLocalGet, 2, // get value
kExprI32StoreMem, 0, 0 // store
])
.exportFunc();
@@ -118,24 +118,24 @@ print('=== grow_memory in direct calls ===');
builder.addMemory(initialMemoryPages, maximumMemoryPages, true);
let kGrowFunction =
builder.addFunction('grow', kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprMemoryGrow, kMemoryZero])
+ .addBody([kExprLocalGet, 0, kExprMemoryGrow, kMemoryZero])
.exportFunc()
.index;
builder.addFunction('main', kSig_i_ii)
.addBody([
// clang-format off
kExprLoop, kWasmStmt, // while
- kExprGetLocal, 0, // -
+ kExprLocalGet, 0, // -
kExprIf, kWasmStmt, // if <param0> != 0
// Grow memory.
- kExprGetLocal, 1, // get number of new pages
+ kExprLocalGet, 1, // get number of new pages
kExprCallFunction, kGrowFunction, // call the grow function
kExprDrop, // drop the result of grow
// Decrease loop variable.
- kExprGetLocal, 0, // -
+ kExprLocalGet, 0, // -
kExprI32Const, 1, // -
kExprI32Sub, // -
- kExprSetLocal, 0, // decrease <param0>
+ kExprLocalSet, 0, // decrease <param0>
kExprBr, 1, // continue
kExprEnd, // end if
kExprEnd, // end loop
@@ -161,13 +161,13 @@ print('=== grow_memory in direct calls ===');
builder.addMemory(initialMemoryPages, maximumMemoryPages, true);
builder.addFunction('store', kSig_i_ii)
.addBody([
- kExprGetLocal, 0, kExprGetLocal, 1, kExprI32StoreMem, 0, 0,
- kExprGetLocal, 1
+ kExprLocalGet, 0, kExprLocalGet, 1, kExprI32StoreMem, 0, 0,
+ kExprLocalGet, 1
])
.exportFunc();
let kGrowFunction =
builder.addFunction('grow', kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprMemoryGrow, kMemoryZero])
+ .addBody([kExprLocalGet, 0, kExprMemoryGrow, kMemoryZero])
.exportFunc()
.index;
// parameters: iterations, deltaPages, index
@@ -175,29 +175,29 @@ print('=== grow_memory in direct calls ===');
.addBody([
// clang-format off
kExprLoop, kWasmStmt, // while
- kExprGetLocal, 0, // -
+ kExprLocalGet, 0, // -
kExprIf, kWasmStmt, // if <param0> != 0
// Grow memory.
- kExprGetLocal, 1, // get number of new pages
+ kExprLocalGet, 1, // get number of new pages
kExprCallFunction, kGrowFunction, // call the grow function
kExprDrop, // drop the result of grow
// Increase counter in memory.
- kExprGetLocal, 2, // put index (for store)
- kExprGetLocal, 2, // put index (for load)
+ kExprLocalGet, 2, // put index (for store)
+ kExprLocalGet, 2, // put index (for load)
kExprI32LoadMem, 0, 0, // load from grown memory
kExprI32Const, 1, // -
kExprI32Add, // increase counter
kExprI32StoreMem, 0, 0, // store counter in memory
// Decrease loop variable.
- kExprGetLocal, 0, // -
+ kExprLocalGet, 0, // -
kExprI32Const, 1, // -
kExprI32Sub, // -
- kExprSetLocal, 0, // decrease <param0>
+ kExprLocalSet, 0, // decrease <param0>
kExprBr, 1, // continue
kExprEnd, // end if
kExprEnd, // end loop
// Return the value
- kExprGetLocal, 2, // -
+ kExprLocalGet, 2, // -
kExprI32LoadMem, 0, 0 // load from grown memory
// clang-format on
])
@@ -225,13 +225,13 @@ print('\n=== grow_memory in indirect calls ===');
builder.addMemory(initialMemoryPages, maximumMemoryPages, true);
let kGrowFunction =
builder.addFunction('grow', kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprMemoryGrow, kMemoryZero])
+ .addBody([kExprLocalGet, 0, kExprMemoryGrow, kMemoryZero])
.exportFunc()
.index;
builder.addFunction('main', kSig_i_ii)
.addBody([
- kExprGetLocal, 1, // get number of new pages
- kExprGetLocal, 0, // get index of the function
+ kExprLocalGet, 1, // get number of new pages
+ kExprLocalGet, 0, // get index of the function
kExprCallIndirect, 0, kTableZero, // call the function
kExprDrop, // drop the result of grow
kExprMemorySize, kMemoryZero // get the memory size
@@ -255,21 +255,21 @@ print('\n=== grow_memory in indirect calls ===');
builder.addMemory(initialMemoryPages, maximumMemoryPages, true);
let kGrowFunction =
builder.addFunction('grow', kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprMemoryGrow, kMemoryZero])
+ .addBody([kExprLocalGet, 0, kExprMemoryGrow, kMemoryZero])
.exportFunc()
.index;
builder.addFunction('load', kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprI32LoadMem, 0, 0])
+ .addBody([kExprLocalGet, 0, kExprI32LoadMem, 0, 0])
.exportFunc();
let sig = makeSig([kWasmI32, kWasmI32, kWasmI32, kWasmI32], []);
builder.addFunction('main', sig)
.addBody([
- kExprGetLocal, 1, // get number of new pages
- kExprGetLocal, 0, // get index of the function
+ kExprLocalGet, 1, // get number of new pages
+ kExprLocalGet, 0, // get index of the function
kExprCallIndirect, 0, kTableZero, // call the function
kExprDrop, // drop the result of grow
- kExprGetLocal, 2, // get index
- kExprGetLocal, 3, // get value
+ kExprLocalGet, 2, // get index
+ kExprLocalGet, 3, // get value
kExprI32StoreMem, 0, 0 // store
])
.exportFunc();
@@ -311,7 +311,7 @@ print('\n=== grow_memory in indirect calls ===');
kExprI32Const, index, // put index on stack
kExprI32Const, oldValue, // put old value on stack
kExprI32StoreMem, 0, 0, // store
- kExprGetLocal, 0, // get index of the function
+ kExprLocalGet, 0, // get index of the function
kExprCallIndirect, 0, kTableZero, // call the function
kExprI32Const, index, // put index on stack
kExprI32LoadMem, 0, 0 // load from grown memory
@@ -332,25 +332,25 @@ print('\n=== grow_memory in indirect calls ===');
builder.addMemory(initialMemoryPages, maximumMemoryPages, true);
let kGrowFunction =
builder.addFunction('grow', kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprMemoryGrow, kMemoryZero])
+ .addBody([kExprLocalGet, 0, kExprMemoryGrow, kMemoryZero])
.exportFunc()
.index;
builder.addFunction('main', kSig_i_iii)
.addBody([
// clang-format off
kExprLoop, kWasmStmt, // while
- kExprGetLocal, 1, // -
+ kExprLocalGet, 1, // -
kExprIf, kWasmStmt, // if <param1> != 0
// Grow memory.
- kExprGetLocal, 2, // get number of new pages
- kExprGetLocal, 0, // get index of the function
+ kExprLocalGet, 2, // get number of new pages
+ kExprLocalGet, 0, // get index of the function
kExprCallIndirect, 0, kTableZero, // call the function
kExprDrop, // drop the result of grow
// Decrease loop variable.
- kExprGetLocal, 1, // -
+ kExprLocalGet, 1, // -
kExprI32Const, 1, // -
kExprI32Sub, // -
- kExprSetLocal, 1, // decrease <param1>
+ kExprLocalSet, 1, // decrease <param1>
kExprBr, 1, // continue
kExprEnd, // end if
kExprEnd, // end loop
@@ -378,13 +378,13 @@ print('\n=== grow_memory in indirect calls ===');
builder.addMemory(initialMemoryPages, maximumMemoryPages, true);
let kGrowFunction =
builder.addFunction('grow', kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprMemoryGrow, kMemoryZero])
+ .addBody([kExprLocalGet, 0, kExprMemoryGrow, kMemoryZero])
.exportFunc()
.index;
builder.addFunction('store', kSig_i_ii)
.addBody([
- kExprGetLocal, 0, kExprGetLocal, 1, kExprI32StoreMem, 0, 0,
- kExprGetLocal, 1
+ kExprLocalGet, 0, kExprLocalGet, 1, kExprI32StoreMem, 0, 0,
+ kExprLocalGet, 1
])
.exportFunc();
builder
@@ -394,30 +394,30 @@ print('\n=== grow_memory in indirect calls ===');
.addBody([
// clang-format off
kExprLoop, kWasmStmt, // while
- kExprGetLocal, 1, // -
+ kExprLocalGet, 1, // -
kExprIf, kWasmStmt, // if <param1> != 0
// Grow memory.
- kExprGetLocal, 2, // get number of new pages
- kExprGetLocal, 0, // get index of the function
+ kExprLocalGet, 2, // get number of new pages
+ kExprLocalGet, 0, // get index of the function
kExprCallIndirect, 0, kTableZero, // call the function
kExprDrop, // drop the result of grow
// Increase counter in memory.
- kExprGetLocal, 3, // put index (for store)
- kExprGetLocal, 3, // put index (for load)
+ kExprLocalGet, 3, // put index (for store)
+ kExprLocalGet, 3, // put index (for load)
kExprI32LoadMem, 0, 0, // load from grown memory
kExprI32Const, 1, // -
kExprI32Add, // increase counter
kExprI32StoreMem, 0, 0, // store counter in memory
// Decrease loop variable.
- kExprGetLocal, 1, // -
+ kExprLocalGet, 1, // -
kExprI32Const, 1, // -
kExprI32Sub, // -
- kExprSetLocal, 1, // decrease <param1>
+ kExprLocalSet, 1, // decrease <param1>
kExprBr, 1, // continue
kExprEnd, // end if
kExprEnd, // end loop
// Return the value
- kExprGetLocal, 3, // -
+ kExprLocalGet, 3, // -
kExprI32LoadMem, 0, 0 // load from grown memory
// clang-format on
])
diff --git a/deps/v8/test/mjsunit/wasm/grow-memory-in-loop.js b/deps/v8/test/mjsunit/wasm/grow-memory-in-loop.js
index ed04e23c63..143b555b17 100644
--- a/deps/v8/test/mjsunit/wasm/grow-memory-in-loop.js
+++ b/deps/v8/test/mjsunit/wasm/grow-memory-in-loop.js
@@ -14,8 +14,8 @@ function generateBuilder() {
builder.addMemory(initialPages, maximumPages, true);
builder.addFunction('store', kSig_i_ii)
.addBody([
- kExprGetLocal, 0, kExprGetLocal, 1, kExprI32StoreMem, 0, 0,
- kExprGetLocal, 1
+ kExprLocalGet, 0, kExprLocalGet, 1, kExprI32StoreMem, 0, 0,
+ kExprLocalGet, 1
])
.exportFunc();
return builder;
@@ -31,17 +31,17 @@ function generateBuilder() {
.addBody([
// clang-format off
kExprLoop, kWasmStmt, // while
- kExprGetLocal, 0, // -
+ kExprLocalGet, 0, // -
kExprIf, kWasmStmt, // if <param0> != 0
// Grow memory.
kExprI32Const, deltaPages, // -
kExprMemoryGrow, kMemoryZero, // grow memory
kExprDrop, // drop the result of grow
// Decrease loop variable.
- kExprGetLocal, 0, // -
+ kExprLocalGet, 0, // -
kExprI32Const, 1, // -
kExprI32Sub, // -
- kExprSetLocal, 0, // decrease <param0>
+ kExprLocalSet, 0, // decrease <param0>
kExprBr, 1, // continue
kExprEnd, // end if
kExprEnd, // end loop
@@ -83,17 +83,17 @@ function generateBuilder() {
kExprMemoryGrow, kMemoryZero, // grow memory
kExprDrop, // drop the result of grow
kExprLoop, kWasmStmt, // while
- kExprGetLocal, 0, // -
+ kExprLocalGet, 0, // -
kExprIf, kWasmStmt, // if <param0> != 0
// Grow memory.
kExprI32Const, deltaPagesIn, // -
kExprMemoryGrow, kMemoryZero, // grow memory
kExprDrop, // drop the result of grow
// Decrease loop variable.
- kExprGetLocal, 0, // -
+ kExprLocalGet, 0, // -
kExprI32Const, 1, // -
kExprI32Sub, // -
- kExprSetLocal, 0, // decrease <param0>
+ kExprLocalSet, 0, // decrease <param0>
kExprBr, 1, // continue
kExprEnd, // end if
kExprEnd, // end loop
@@ -132,29 +132,29 @@ function generateBuilder() {
.addBody([
// clang-format off
kExprLoop, kWasmStmt, // while
- kExprGetLocal, 0, // -
+ kExprLocalGet, 0, // -
kExprIf, kWasmStmt, // if <param0> != 0
// Grow memory.
kExprI32Const, deltaPages, // -
kExprMemoryGrow, kMemoryZero, // grow memory
kExprDrop, // drop the result of grow
// Increase counter in memory.
- kExprGetLocal, 1, // put index (for store)
- kExprGetLocal, 1, // put index (for load)
+ kExprLocalGet, 1, // put index (for store)
+ kExprLocalGet, 1, // put index (for load)
kExprI32LoadMem, 0, 0, // load from grown memory
kExprI32Const, 1, // -
kExprI32Add, // increase counter
kExprI32StoreMem, 0, 0, // store counter in memory
// Decrease loop variable.
- kExprGetLocal, 0, // -
+ kExprLocalGet, 0, // -
kExprI32Const, 1, // -
kExprI32Sub, // -
- kExprSetLocal, 0, // decrease <param0>
+ kExprLocalSet, 0, // decrease <param0>
kExprBr, 1, // continue
kExprEnd, // end if
kExprEnd, // end loop
// Increase counter in memory.
- kExprGetLocal, 1, // -
+ kExprLocalGet, 1, // -
kExprI32LoadMem, 0, 0 // load from grown memory
// clang-format on
])
@@ -195,37 +195,37 @@ function generateBuilder() {
kExprMemoryGrow, kMemoryZero, // grow memory
kExprDrop, // drop the result of grow
// Increase counter in memory.
- kExprGetLocal, 1, // put index (for store)
- kExprGetLocal, 1, // put index (for load)
+ kExprLocalGet, 1, // put index (for store)
+ kExprLocalGet, 1, // put index (for load)
kExprI32LoadMem, 0, 0, // load from grown memory
kExprI32Const, 1, // -
kExprI32Add, // increase value on stack
kExprI32StoreMem, 0, 0, // store new value
// Start loop.
kExprLoop, kWasmStmt, // while
- kExprGetLocal, 0, // -
+ kExprLocalGet, 0, // -
kExprIf, kWasmStmt, // if <param0> != 0
// Grow memory.
kExprI32Const, deltaPagesIn, // -
kExprMemoryGrow, kMemoryZero, // grow memory
kExprDrop, // drop the result of grow
// Increase counter in memory.
- kExprGetLocal, 1, // put index (for store)
- kExprGetLocal, 1, // put index (for load)
+ kExprLocalGet, 1, // put index (for store)
+ kExprLocalGet, 1, // put index (for load)
kExprI32LoadMem, 0, 0, // load from grown memory
kExprI32Const, 1, // -
kExprI32Add, // increase value on stack
kExprI32StoreMem, 0, 0, // store new value
// Decrease loop variable.
- kExprGetLocal, 0, // -
+ kExprLocalGet, 0, // -
kExprI32Const, 1, // -
kExprI32Sub, // -
- kExprSetLocal, 0, // decrease <param0>
+ kExprLocalSet, 0, // decrease <param0>
kExprBr, 1, // continue
kExprEnd, // end if
kExprEnd, // end loop
// Return counter from memory.
- kExprGetLocal, 1, // put index on stack
+ kExprLocalGet, 1, // put index on stack
kExprI32LoadMem, 0, 0 // load from grown memory
// clang-format on
])
diff --git a/deps/v8/test/mjsunit/wasm/grow-memory.js b/deps/v8/test/mjsunit/wasm/grow-memory.js
index 0e5618a2b1..6d0e7e5c5f 100644
--- a/deps/v8/test/mjsunit/wasm/grow-memory.js
+++ b/deps/v8/test/mjsunit/wasm/grow-memory.js
@@ -10,28 +10,28 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
function genMemoryGrowBuilder() {
var builder = new WasmModuleBuilder();
builder.addFunction("grow_memory", kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprMemoryGrow, kMemoryZero])
+ .addBody([kExprLocalGet, 0, kExprMemoryGrow, kMemoryZero])
.exportFunc();
builder.addFunction("load", kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprI32LoadMem, 0, 0])
+ .addBody([kExprLocalGet, 0, kExprI32LoadMem, 0, 0])
.exportFunc();
builder.addFunction("store", kSig_i_ii)
- .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprI32StoreMem, 0, 0,
- kExprGetLocal, 1])
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1, kExprI32StoreMem, 0, 0,
+ kExprLocalGet, 1])
.exportFunc();
builder.addFunction("load16", kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprI32LoadMem16U, 0, 0])
+ .addBody([kExprLocalGet, 0, kExprI32LoadMem16U, 0, 0])
.exportFunc();
builder.addFunction("store16", kSig_i_ii)
- .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprI32StoreMem16, 0, 0,
- kExprGetLocal, 1])
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1, kExprI32StoreMem16, 0, 0,
+ kExprLocalGet, 1])
.exportFunc();
builder.addFunction("load8", kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprI32LoadMem8U, 0, 0])
+ .addBody([kExprLocalGet, 0, kExprI32LoadMem8U, 0, 0])
.exportFunc();
builder.addFunction("store8", kSig_i_ii)
- .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprI32StoreMem8, 0, 0,
- kExprGetLocal, 1])
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1, kExprI32StoreMem8, 0, 0,
+ kExprLocalGet, 1])
.exportFunc();
return builder;
}
diff --git a/deps/v8/test/mjsunit/wasm/grow-shared-memory.js b/deps/v8/test/mjsunit/wasm/grow-shared-memory.js
index bbd180b39c..3c9d72b499 100644
--- a/deps/v8/test/mjsunit/wasm/grow-shared-memory.js
+++ b/deps/v8/test/mjsunit/wasm/grow-shared-memory.js
@@ -2,9 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(v8:8832): Enable --stress-opt on these tests
-// Flags: --wasm-grow-shared-memory
-// Flags: --experimental-wasm-threads --no-stress-opt
+// Flags: --wasm-grow-shared-memory --experimental-wasm-threads
load("test/mjsunit/wasm/wasm-module-builder.js");
@@ -137,7 +135,7 @@ let workerHelpers = assertTrue.toString() + assertIsWasmSharedMemory.toString();
var builder = new WasmModuleBuilder();
builder.addImportedMemory("m", "memory", 5, 100, "shared");
builder.addFunction("grow", kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprMemoryGrow, kMemoryZero])
+ .addBody([kExprLocalGet, 0, kExprMemoryGrow, kMemoryZero])
.exportFunc();
var module = new WebAssembly.Module(builder.toBuffer());
let obj = {memory: memory, module: module};
@@ -169,7 +167,7 @@ let workerHelpers = assertTrue.toString() + assertIsWasmSharedMemory.toString();
var builder = new WasmModuleBuilder();
builder.addImportedMemory("m", "memory", 5, 100, "shared");
builder.addFunction("grow", kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprMemoryGrow, kMemoryZero])
+ .addBody([kExprLocalGet, 0, kExprMemoryGrow, kMemoryZero])
.exportFunc();
var module = new WebAssembly.Module(builder.toBuffer());
let obj = {memory: memory, module: module};
@@ -200,10 +198,10 @@ let workerHelpers = assertTrue.toString() + assertIsWasmSharedMemory.toString();
var builder = new WasmModuleBuilder();
builder.addImportedMemory("m", "memory", 5, 100, "shared");
builder.addFunction("grow_twice", kSig_i_i)
- .addBody([kExprGetLocal, 0,
+ .addBody([kExprLocalGet, 0,
kExprMemoryGrow, kMemoryZero,
kExprDrop,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprMemoryGrow, kMemoryZero])
.exportFunc();
var module = new WebAssembly.Module(builder.toBuffer());
@@ -239,10 +237,10 @@ let workerHelpers = assertTrue.toString() + assertIsWasmSharedMemory.toString();
var builder = new WasmModuleBuilder();
builder.addImportedMemory("m", "memory", 5, 100, "shared");
builder.addFunction("grow_and_size", kSig_i_i)
- .addBody([kExprGetLocal, 0,
+ .addBody([kExprLocalGet, 0,
kExprMemoryGrow, kMemoryZero,
kExprDrop,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprMemoryGrow, kMemoryZero,
kExprDrop,
kExprMemorySize, kMemoryZero])
@@ -298,13 +296,13 @@ let workerHelpers = assertTrue.toString() + assertIsWasmSharedMemory.toString();
var builder = new WasmModuleBuilder();
builder.addImportedMemory("m", "memory", 5, 100, "shared");
builder.addFunction("grow", kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprMemoryGrow, kMemoryZero])
+ .addBody([kExprLocalGet, 0, kExprMemoryGrow, kMemoryZero])
.exportFunc();
builder.addFunction("atomic_load", kSig_i_i)
- .addBody([kExprGetLocal, 0, kAtomicPrefix, kExprI32AtomicLoad, 2, 0])
+ .addBody([kExprLocalGet, 0, kAtomicPrefix, kExprI32AtomicLoad, 2, 0])
.exportFunc();
builder.addFunction("atomic_store", kSig_v_ii)
- .addBody([kExprGetLocal, 0, kExprGetLocal, 1,
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1,
kAtomicPrefix, kExprI32AtomicStore, 2, 0])
.exportFunc();
var module = new WebAssembly.Module(builder.toBuffer());
diff --git a/deps/v8/test/mjsunit/wasm/huge-memory.js b/deps/v8/test/mjsunit/wasm/huge-memory.js
index bf037b0c92..6c145d70cb 100644
--- a/deps/v8/test/mjsunit/wasm/huge-memory.js
+++ b/deps/v8/test/mjsunit/wasm/huge-memory.js
@@ -16,8 +16,8 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
builder.addMemory(num_pages, num_pages, true);
builder.addFunction("geti", kSig_i_ii)
.addBody([
- kExprGetLocal, 0,
- kExprGetLocal, 1,
+ kExprLocalGet, 0,
+ kExprLocalGet, 1,
kExprI32Mul,
kExprI32LoadMem, 0, 0,
])
diff --git a/deps/v8/test/mjsunit/wasm/import-function.js b/deps/v8/test/mjsunit/wasm/import-function.js
index ec187aff4a..6bbad8a222 100644
--- a/deps/v8/test/mjsunit/wasm/import-function.js
+++ b/deps/v8/test/mjsunit/wasm/import-function.js
@@ -13,8 +13,8 @@ function testCallImport(func, check) {
builder.addImport("q", "func", sig_index);
builder.addFunction("main", sig_index)
.addBody([
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
kExprCallFunction, 0]) // --
.exportAs("main");
@@ -186,8 +186,8 @@ function testCallBinopVoid(type, func, check) {
builder.addImport("q", "func", makeSig_v_xx(type));
builder.addFunction("main", makeSig_r_xx(kWasmI32, type))
.addBody([
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
kExprCallFunction, 0, // --
kExprI32Const, 39, // --
])
@@ -244,7 +244,7 @@ function testCallPrint() {
.addBody([
kExprI32Const, 27, // --
kExprCallFunction, 0, // --
- kExprGetLocal, 0, // --
+ kExprLocalGet, 0, // --
kExprCallFunction, 1 // --
])
.exportFunc();
diff --git a/deps/v8/test/mjsunit/wasm/import-memory.js b/deps/v8/test/mjsunit/wasm/import-memory.js
index fc688dc7ce..08100efabd 100644
--- a/deps/v8/test/mjsunit/wasm/import-memory.js
+++ b/deps/v8/test/mjsunit/wasm/import-memory.js
@@ -51,7 +51,7 @@ var kV8MaxPages = 32767;
builder.exportMemoryAs("exported_mem");
builder.addFunction("foo", kSig_i_i)
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprI32LoadMem, 0, 0])
.exportAs("foo");
i1 = builder.instantiate();
@@ -63,7 +63,7 @@ var kV8MaxPages = 32767;
builder.addImportedMemory("fil", "imported_mem");
builder.addFunction("bar", kSig_i_i)
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprI32LoadMem, 0, 0])
.exportAs("bar");
i2 = builder.instantiate({fil: {imported_mem: i1.exports.exported_mem}});
@@ -89,11 +89,11 @@ var kV8MaxPages = 32767;
let builder = new WasmModuleBuilder();
builder.addImportedMemory("gaz", "mine");
builder.addFunction("load", kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprI32LoadMem, 0, 0])
+ .addBody([kExprLocalGet, 0, kExprI32LoadMem, 0, 0])
.exportFunc();
builder.addFunction("store", kSig_i_ii)
- .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprI32StoreMem, 0, 0,
- kExprGetLocal, 1])
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1, kExprI32StoreMem, 0, 0,
+ kExprLocalGet, 1])
.exportFunc();
var offset;
let instance = builder.instantiate({gaz: {mine: memory}});
@@ -119,11 +119,11 @@ var kV8MaxPages = 32767;
let builder = new WasmModuleBuilder();
builder.addImportedMemory("mine", "dog", 0, 20);
builder.addFunction("load", kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprI32LoadMem, 0, 0])
+ .addBody([kExprLocalGet, 0, kExprI32LoadMem, 0, 0])
.exportFunc();
builder.addFunction("store", kSig_i_ii)
- .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprI32StoreMem, 0, 0,
- kExprGetLocal, 1])
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1, kExprI32StoreMem, 0, 0,
+ kExprLocalGet, 1])
.exportFunc();
var offset;
let instance = builder.instantiate({mine: {dog: memory}});
@@ -157,11 +157,11 @@ var kV8MaxPages = 32767;
let builder = new WasmModuleBuilder();
builder.addImportedMemory("mine", "fro");
builder.addFunction("load", kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprI32LoadMem, 0, 0])
+ .addBody([kExprLocalGet, 0, kExprI32LoadMem, 0, 0])
.exportFunc();
builder.addFunction("store", kSig_i_ii)
- .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprI32StoreMem, 0, 0,
- kExprGetLocal, 1])
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1, kExprI32StoreMem, 0, 0,
+ kExprLocalGet, 1])
.exportFunc();
var offset;
let instance = builder.instantiate({mine: {fro: memory}});
@@ -187,7 +187,7 @@ var kV8MaxPages = 32767;
assertEquals(2*kPageSize, memory.buffer.byteLength);
let builder = new WasmModuleBuilder();
builder.addFunction("grow", kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprMemoryGrow, kMemoryZero])
+ .addBody([kExprLocalGet, 0, kExprMemoryGrow, kMemoryZero])
.exportFunc();
builder.addImportedMemory("cat", "mine");
let instance = builder.instantiate({cat: {mine: memory}});
@@ -217,7 +217,7 @@ var kV8MaxPages = 32767;
.addBody([kExprMemorySize, kMemoryZero])
.exportFunc();
builder.addFunction("grow", kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprMemoryGrow, kMemoryZero])
+ .addBody([kExprLocalGet, 0, kExprMemoryGrow, kMemoryZero])
.exportFunc();
instance = builder.instantiate({fur: {
imported_mem: exp_instance.exports.exported_mem}});
@@ -238,7 +238,7 @@ var kV8MaxPages = 32767;
.addBody([kExprMemorySize, kMemoryZero])
.exportAs("mem_size");
builder.addFunction("grow", kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprMemoryGrow, kMemoryZero])
+ .addBody([kExprLocalGet, 0, kExprMemoryGrow, kMemoryZero])
.exportFunc();
var module = new WebAssembly.Module(builder.toBuffer());
var instances = [];
@@ -279,7 +279,7 @@ var kV8MaxPages = 32767;
.addBody([kExprMemorySize, kMemoryZero])
.exportFunc();
builder.addFunction("grow", kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprMemoryGrow, kMemoryZero])
+ .addBody([kExprLocalGet, 0, kExprMemoryGrow, kMemoryZero])
.exportFunc();
var instances = [];
for (var i = 0; i < 5; i++) {
@@ -344,7 +344,7 @@ var kV8MaxPages = 32767;
.addBody([kExprMemorySize, kMemoryZero])
.exportFunc();
builder.addFunction("grow", kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprMemoryGrow, kMemoryZero])
+ .addBody([kExprLocalGet, 0, kExprMemoryGrow, kMemoryZero])
.exportFunc();
var instances = [];
for (var i = 0; i < 10; i++) {
@@ -379,7 +379,7 @@ var kV8MaxPages = 32767;
builder.addMemory(1, kSpecMaxPages, true);
builder.exportMemoryAs("exported_mem");
builder.addFunction("grow", kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprMemoryGrow, kMemoryZero])
+ .addBody([kExprLocalGet, 0, kExprMemoryGrow, kMemoryZero])
.exportFunc();
instance_1 = builder.instantiate();
}
@@ -387,7 +387,7 @@ var kV8MaxPages = 32767;
let builder = new WasmModuleBuilder();
builder.addImportedMemory("doo", "imported_mem");
builder.addFunction("grow", kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprMemoryGrow, kMemoryZero])
+ .addBody([kExprLocalGet, 0, kExprMemoryGrow, kMemoryZero])
.exportFunc();
instance_2 = builder.instantiate({
doo: {imported_mem: instance_1.exports.exported_mem}});
@@ -407,7 +407,7 @@ var kV8MaxPages = 32767;
.addBody([kExprMemorySize, kMemoryZero])
.exportFunc();
builder.addFunction("grow", kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprMemoryGrow, kMemoryZero])
+ .addBody([kExprLocalGet, 0, kExprMemoryGrow, kMemoryZero])
.exportFunc();
instance = builder.instantiate();
assertEquals(kPageSize, instance.exports.exported_mem.buffer.byteLength);
diff --git a/deps/v8/test/mjsunit/wasm/import-mutable-global.js b/deps/v8/test/mjsunit/wasm/import-mutable-global.js
index 715549a41f..70ce50be00 100644
--- a/deps/v8/test/mjsunit/wasm/import-mutable-global.js
+++ b/deps/v8/test/mjsunit/wasm/import-mutable-global.js
@@ -11,7 +11,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
let builder = new WasmModuleBuilder();
builder.addImportedGlobal("mod", "g", kWasmI32);
builder.addFunction("main", kSig_i_v)
- .addBody([kExprGetGlobal, 0])
+ .addBody([kExprGlobalGet, 0])
.exportAs("main");
let main = builder.instantiate({mod: {g: global}}).exports.main;
@@ -54,10 +54,10 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
function addGlobalGetterAndSetter(builder, index, name, type) {
builder.addFunction('get' + name, makeSig([], [type]))
- .addBody([kExprGetGlobal, index])
+ .addBody([kExprGlobalGet, index])
.exportFunc();
builder.addFunction('set' + name, makeSig([type], []))
- .addBody([kExprGetLocal, 0, kExprSetGlobal, index])
+ .addBody([kExprLocalGet, 0, kExprGlobalSet, index])
.exportFunc();
}
@@ -137,20 +137,20 @@ function addGlobalGetterAndSetter(builder, index, name, type) {
const index = 0;
builder.addFunction('geti64_hi', makeSig([], [kWasmI32]))
.addBody([
- kExprGetGlobal, index,
+ kExprGlobalGet, index,
kExprI64Const, 32, kExprI64ShrU,
kExprI32ConvertI64])
.exportFunc();
builder.addFunction('geti64_lo', makeSig([], [kWasmI32]))
- .addBody([kExprGetGlobal, index, kExprI32ConvertI64])
+ .addBody([kExprGlobalGet, index, kExprI32ConvertI64])
.exportFunc();
builder.addFunction("seti64", makeSig([kWasmI32, kWasmI32], []))
.addBody([
- kExprGetLocal, 1, kExprI64UConvertI32,
- kExprGetLocal, 0, kExprI64UConvertI32,
+ kExprLocalGet, 1, kExprI64UConvertI32,
+ kExprLocalGet, 0, kExprI64UConvertI32,
kExprI64Const, 32, kExprI64Shl,
kExprI64Ior,
- kExprSetGlobal, index])
+ kExprGlobalSet, index])
.exportFunc();
};
diff --git a/deps/v8/test/mjsunit/wasm/import-table.js b/deps/v8/test/mjsunit/wasm/import-table.js
index 098d03d4d6..6693559c8f 100644
--- a/deps/v8/test/mjsunit/wasm/import-table.js
+++ b/deps/v8/test/mjsunit/wasm/import-table.js
@@ -34,7 +34,7 @@ let kTableSize = 50;
let f15 = addConstFunc(builder, 15);
let call = builder.addFunction("call", kSig_i_i)
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprCallIndirect, signums.i_v, kTableZero
])
.exportAs("call");
@@ -57,7 +57,7 @@ let kTableSize = 50;
let f21 = addConstFunc(builder, 21);
let call = builder.addFunction("call", kSig_i_i)
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprCallIndirect, signums.i_v, kTableZero
])
.exportAs("call");
@@ -92,7 +92,7 @@ function addConstFuncUsingGlobal(builder, val) {
let g = builder.addGlobal(kWasmI32, false);
g.init = val;
return builder.addFunction("global" + val, kSig_i_v)
- .addBody([kExprGetGlobal, g.index]).index;
+ .addBody([kExprGlobalGet, g.index]).index;
}
(function TestAliasedImportedTableInstanceGlobals() {
@@ -106,7 +106,7 @@ function addConstFuncUsingGlobal(builder, val) {
let f14 = addConstFuncUsingGlobal(builder, 14);
let call = builder.addFunction("call", kSig_i_i)
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprCallIndirect, signums.i_v, kTableZero
])
.exportAs("call");
@@ -129,7 +129,7 @@ function addConstFuncUsingGlobal(builder, val) {
let f22 = addConstFuncUsingGlobal(builder, 22);
let call = builder.addFunction("call", kSig_i_i)
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprCallIndirect, signums.i_v, kTableZero
])
.exportAs("call");
@@ -186,7 +186,7 @@ function addConstFuncUsingMemory(builder, val) {
let f13 = addConstFuncUsingMemory(builder, 13);
let call = builder.addFunction("call", kSig_i_i)
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprCallIndirect, signums.i_v, kTableZero
])
.exportAs("call");
@@ -211,7 +211,7 @@ function addConstFuncUsingMemory(builder, val) {
let f23 = addConstFuncUsingMemory(builder, 23);
let call = builder.addFunction("call", kSig_i_i)
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprCallIndirect, signums.i_v, kTableZero
])
.exportAs("call");
diff --git a/deps/v8/test/mjsunit/wasm/indirect-call-non-zero-table.js b/deps/v8/test/mjsunit/wasm/indirect-call-non-zero-table.js
index 414ca19c99..69fb4dcf43 100644
--- a/deps/v8/test/mjsunit/wasm/indirect-call-non-zero-table.js
+++ b/deps/v8/test/mjsunit/wasm/indirect-call-non-zero-table.js
@@ -41,28 +41,28 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
.index;
builder.addFunction('call1', kSig_i_i)
- .addBody([kExprGetLocal, 0, // function index
+ .addBody([kExprLocalGet, 0, // function index
kExprCallIndirect, sig_index, table1])
.exportAs('call1');
builder.addFunction('return_call1', kSig_i_i)
- .addBody([kExprGetLocal, 0, // function index
+ .addBody([kExprLocalGet, 0, // function index
kExprReturnCallIndirect, sig_index, table1])
.exportAs('return_call1');
builder.addFunction('call2', kSig_i_i)
- .addBody([kExprGetLocal, 0, // function index
+ .addBody([kExprLocalGet, 0, // function index
kExprCallIndirect, sig_index, table2])
.exportAs('call2');
builder.addFunction('return_call2', kSig_i_i)
- .addBody([kExprGetLocal, 0, // function index
+ .addBody([kExprLocalGet, 0, // function index
kExprReturnCallIndirect, sig_index, table2])
.exportAs('return_call2');
builder.addFunction('call_invalid_sig', kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprGetLocal, 0, // function index + param
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 0, // function index + param
kExprCallIndirect, other_sig, table2])
.exportAs('call_invalid_sig');
builder.addFunction('return_call_invalid_sig', kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprGetLocal, 0, // function index + param
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 0, // function index + param
kExprReturnCallIndirect, other_sig, table2])
.exportAs('return_call_invalid_sig');
@@ -119,10 +119,10 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
const sig_index = builder.addType(kSig_i_v);
const f1 = builder.addFunction("foo", sig_index)
- .addBody([kExprGetGlobal, g, kExprI32Const, 12, kExprI32Add]);
+ .addBody([kExprGlobalGet, g, kExprI32Const, 12, kExprI32Add]);
builder.addFunction('call', kSig_i_i)
- .addBody([kExprGetLocal, 0, // function index
+ .addBody([kExprLocalGet, 0, // function index
kExprCallIndirect, sig_index, t1])
.exportAs('call');
@@ -167,14 +167,14 @@ function js_div(a, b) { return (a / b) | 0; }
let sig_index = builder.addType(kSig_i_ii);
builder.addFunction("placeholder", sig_index)
- .addBody([kExprGetLocal, 0]);
+ .addBody([kExprLocalGet, 0]);
builder.addElementSegment(table_index, g, true, [div]);
builder.addFunction("main", kSig_i_ii)
.addBody([
kExprI32Const, 55, // --
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
kExprCallIndirect, 0, table_index]) // --
.exportAs("main");
diff --git a/deps/v8/test/mjsunit/wasm/indirect-calls.js b/deps/v8/test/mjsunit/wasm/indirect-calls.js
index e9f560a019..603d7561ec 100644
--- a/deps/v8/test/mjsunit/wasm/indirect-calls.js
+++ b/deps/v8/test/mjsunit/wasm/indirect-calls.js
@@ -15,20 +15,20 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
builder.addImport("q", "add", sig_index);
var f = builder.addFunction("add", sig_index)
.addBody([
- kExprGetLocal, 0, kExprGetLocal, 1, kExprCallFunction, 0
+ kExprLocalGet, 0, kExprLocalGet, 1, kExprCallFunction, 0
]);
print("internal add index = " + f.index);
builder.addFunction("sub", sig_index)
.addBody([
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
kExprI32Sub, // --
]);
builder.addFunction("main", kSig_i_iii)
.addBody([
- kExprGetLocal, 1,
- kExprGetLocal, 2,
- kExprGetLocal, 0,
+ kExprLocalGet, 1,
+ kExprLocalGet, 2,
+ kExprLocalGet, 0,
kExprCallIndirect, sig_index, kTableZero
])
.exportFunc()
@@ -68,20 +68,20 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
var mul = builder.addImport("q", "mul", sig_i_ii);
var add = builder.addFunction("add", sig_i_ii)
.addBody([
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
kExprI32Add // --
]);
var popcnt = builder.addFunction("popcnt", sig_i_i)
.addBody([
- kExprGetLocal, 0, // --
+ kExprLocalGet, 0, // --
kExprI32Popcnt // --
]);
var main = builder.addFunction("main", kSig_i_iii)
.addBody([
- kExprGetLocal, 1,
- kExprGetLocal, 2,
- kExprGetLocal, 0,
+ kExprLocalGet, 1,
+ kExprLocalGet, 2,
+ kExprLocalGet, 0,
kExprCallIndirect, sig_i_ii, kTableZero
])
.exportFunc();
@@ -106,20 +106,20 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
function AddFunctions(builder) {
var mul = builder.addFunction("mul", kSig_i_ii)
.addBody([
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
kExprI32Mul // --
]);
var add = builder.addFunction("add", kSig_i_ii)
.addBody([
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
kExprI32Add // --
]);
var sub = builder.addFunction("sub", kSig_i_ii)
.addBody([
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
kExprI32Sub // --
]);
return {mul: mul, add: add, sub: sub};
@@ -135,8 +135,8 @@ function AddFunctions(builder) {
builder.addFunction("main", kSig_i_ii)
.addBody([
kExprI32Const, 33, // --
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
kExprCallIndirect, 0, kTableZero]) // --
.exportAs("main");
@@ -163,8 +163,8 @@ function AddFunctions(builder) {
builder.addFunction("main", kSig_i_ii)
.addBody([
kExprI32Const, 33, // --
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
kExprCallIndirect, 0, kTableZero]) // --
.exportAs("main");
@@ -200,8 +200,8 @@ function AddFunctions(builder) {
builder.addFunction("main", kSig_i_ii)
.addBody([
kExprI32Const, 33, // --
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
kExprCallIndirect, 0, kTableZero]) // --
.exportAs("main");
diff --git a/deps/v8/test/mjsunit/wasm/indirect-sig-mismatch.js b/deps/v8/test/mjsunit/wasm/indirect-sig-mismatch.js
index 9e8ddac1c5..ea148c5087 100644
--- a/deps/v8/test/mjsunit/wasm/indirect-sig-mismatch.js
+++ b/deps/v8/test/mjsunit/wasm/indirect-sig-mismatch.js
@@ -65,14 +65,14 @@ function caller_module() {
builder.addFunction("call1", sig_i_i)
.addBody([
- kExprGetLocal, 0, // --
+ kExprLocalGet, 0, // --
kExprCallIndirect, sig_i_v, kTableZero]) // --
.exportAs("call1");
builder.addFunction("call2", sig_i_i)
.addBody([
kExprI32Const, 11, // --
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprCallIndirect, sig_i_i, kTableZero]) // --
.exportAs("call2");
@@ -80,7 +80,7 @@ function caller_module() {
.addBody([
kExprI32Const, 21,
kExprI32Const, 22,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprCallIndirect, sig_i_ii, kTableZero]) // --
.exportAs("call3");
diff --git a/deps/v8/test/mjsunit/wasm/indirect-tables.js b/deps/v8/test/mjsunit/wasm/indirect-tables.js
index 58df978859..e48157001b 100644
--- a/deps/v8/test/mjsunit/wasm/indirect-tables.js
+++ b/deps/v8/test/mjsunit/wasm/indirect-tables.js
@@ -10,20 +10,20 @@ function AddFunctions(builder) {
let sig_index = builder.addType(kSig_i_ii);
let mul = builder.addFunction("mul", sig_index)
.addBody([
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
kExprI32Mul // --
]);
let add = builder.addFunction("add", sig_index)
.addBody([
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
kExprI32Add // --
]);
let sub = builder.addFunction("sub", sig_index)
.addBody([
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
kExprI32Sub // --
]);
return {mul: mul, add: add, sub: sub};
@@ -41,8 +41,8 @@ function js_div(a, b) { return (a / b) | 0; }
builder.addFunction("main", kSig_i_ii)
.addBody([
kExprI32Const, 33, // --
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
kExprCallIndirect, 0, kTableZero]) // --
.exportAs("main");
@@ -121,8 +121,8 @@ function js_div(a, b) { return (a / b) | 0; }
builder.addFunction("main", kSig_i_ii)
.addBody([
kExprI32Const, 33, // --
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
kExprCallIndirect, 0, kTableZero]) // --
.exportAs("main");
@@ -184,8 +184,8 @@ function js_div(a, b) { return (a / b) | 0; }
builder.addFunction("main", kSig_i_ii)
.addBody([
kExprI32Const, 55, // --
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
kExprCallIndirect, 0, kTableZero]) // --
.exportAs("main");
@@ -249,11 +249,11 @@ function js_div(a, b) { return (a / b) | 0; }
let sig_index = builder.addType(kSig_i_v);
let f = builder.addFunction("f", sig_index)
.addBody([
- kExprGetGlobal, g
+ kExprGlobalGet, g
]);
builder.addFunction("main", kSig_i_ii)
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprCallIndirect, sig_index, kTableZero]) // --
.exportAs("main");
builder.addElementSegment(0, g, true, [f.index]);
@@ -292,7 +292,7 @@ function js_div(a, b) { return (a / b) | 0; }
builder.addFunction("main", kSig_i_ii)
.addBody([
- kExprGetLocal, 0, // --
+ kExprLocalGet, 0, // --
kExprCallIndirect, sig_index1, kTableZero]) // --
.exportAs("main");
@@ -311,7 +311,7 @@ function js_div(a, b) { return (a / b) | 0; }
builder.addFunction("main", kSig_i_ii)
.addBody([
- kExprGetLocal, 0, // --
+ kExprLocalGet, 0, // --
kExprCallIndirect, sig_index2, kTableZero]) // --
.exportAs("main");
@@ -404,11 +404,11 @@ function js_div(a, b) { return (a / b) | 0; }
let sig_index = builder.addType(kSig_i_v);
builder.addFunction("g", sig_index)
.addBody([
- kExprGetGlobal, g
+ kExprGlobalGet, g
]);
builder.addFunction("main", kSig_i_ii)
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprCallIndirect, sig_index, kTableZero]) // --
.exportAs("main");
builder.addElementSegment(0, g, true, [g]);
@@ -572,7 +572,7 @@ function js_div(a, b) { return (a / b) | 0; }
let sig_index = builder0.addType(kSig_i_v);
builder0.addFunction('main', kSig_i_i)
.addBody([
- kExprGetLocal, 0, // -
+ kExprLocalGet, 0, // -
kExprCallIndirect, sig_index, kTableZero
])
.exportAs('main');
@@ -584,7 +584,7 @@ function js_div(a, b) { return (a / b) | 0; }
// instance1 imports the table and adds a function to it.
let builder1 = new WasmModuleBuilder();
builder1.setName('module_1');
- builder1.addFunction('f', kSig_i_i).addBody([kExprGetLocal, 0]);
+ builder1.addFunction('f', kSig_i_i).addBody([kExprLocalGet, 0]);
builder1.addImportedTable('z', 'table');
builder1.addElementSegment(0, 0, false, [0]);
let module1 = new WebAssembly.Module(builder1.toBuffer());
@@ -611,7 +611,7 @@ function js_div(a, b) { return (a / b) | 0; }
let builder = new WasmModuleBuilder();
let sig = builder.addType(kSig_i_v);
builder.addFunction('main', kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprCallIndirect, sig, kTableZero])
+ .addBody([kExprLocalGet, 0, kExprCallIndirect, sig, kTableZero])
.exportAs('main');
builder.addImportedMemory('', 'memory', 1);
@@ -653,7 +653,7 @@ function js_div(a, b) { return (a / b) | 0; }
let builder = new WasmModuleBuilder();
let sig = builder.addType(kSig_i_v);
builder.addFunction('main', kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprCallIndirect, sig, kTableZero])
+ .addBody([kExprLocalGet, 0, kExprCallIndirect, sig, kTableZero])
.exportAs('main');
builder.addImportedTable('', 'table');
@@ -711,8 +711,8 @@ function js_div(a, b) { return (a / b) | 0; }
let builder = new WasmModuleBuilder();
builder.addFunction("mul", kSig_i_ii)
.addBody(
- [kExprGetLocal, 0,
- kExprGetLocal, 1,
+ [kExprLocalGet, 0,
+ kExprLocalGet, 1,
kExprI32Mul])
.exportFunc();
return builder.instantiate().exports.mul;
@@ -725,8 +725,8 @@ function js_div(a, b) { return (a / b) | 0; }
builder.addFunction("main", kSig_i_ii)
.addBody([
kExprI32Const, 33, // --
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
kExprCallIndirect, 0, kTableZero]) // --
.exportAs("main");
@@ -756,8 +756,8 @@ function js_div(a, b) { return (a / b) | 0; }
let builder = new WasmModuleBuilder();
builder.addFunction("mul", kSig_i_ii)
.addBody(
- [kExprGetLocal, 0,
- kExprGetLocal, 1,
+ [kExprLocalGet, 0,
+ kExprLocalGet, 1,
kExprI32Mul])
.exportFunc();
return builder.instantiate().exports.mul;
@@ -775,8 +775,8 @@ function js_div(a, b) { return (a / b) | 0; }
builder.addFunction("main", kSig_i_ii)
.addBody([
kExprI32Const, 44, // --
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
kExprCallIndirect, 0, kTableZero]) // --
.exportAs("main");
@@ -817,7 +817,7 @@ function js_div(a, b) { return (a / b) | 0; }
builder.addImport("q", "f1", kSig_i_v);
builder.addFunction("main", kSig_i_i)
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprCallIndirect, 0, kTableZero
])
.exportFunc();
@@ -879,7 +879,7 @@ function js_div(a, b) { return (a / b) | 0; }
]);
builder.addFunction("main", kSig_i_i)
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprCallIndirect, 0, kTableZero
])
.exportFunc();
diff --git a/deps/v8/test/mjsunit/wasm/instance-memory-gc-stress.js b/deps/v8/test/mjsunit/wasm/instance-memory-gc-stress.js
index 29b65bc9b8..401be71504 100644
--- a/deps/v8/test/mjsunit/wasm/instance-memory-gc-stress.js
+++ b/deps/v8/test/mjsunit/wasm/instance-memory-gc-stress.js
@@ -17,7 +17,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
.addBody([kExprMemorySize, kMemoryZero])
.exportFunc();
builder.addFunction("grow", kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprMemoryGrow, kMemoryZero])
+ .addBody([kExprLocalGet, 0, kExprMemoryGrow, kMemoryZero])
.exportFunc();
var instances = [];
for (var i = 0; i < 5; i++) {
diff --git a/deps/v8/test/mjsunit/wasm/instantiate-module-basic.js b/deps/v8/test/mjsunit/wasm/instantiate-module-basic.js
index 1c5f10a832..7d4b848465 100644
--- a/deps/v8/test/mjsunit/wasm/instantiate-module-basic.js
+++ b/deps/v8/test/mjsunit/wasm/instantiate-module-basic.js
@@ -130,8 +130,8 @@ assertFalse(WebAssembly.validate(bytes(88, 88, 88, 88, 88, 88, 88, 88)));
builder.addFunction('main', kSig_i_i)
.addBody([
- kExprGetLocal, 0, kExprI32LoadMem, 0, 0, kExprI32Const, 1,
- kExprCallIndirect, signature, kTableZero, kExprGetLocal, 0,
+ kExprLocalGet, 0, kExprI32LoadMem, 0, 0, kExprI32Const, 1,
+ kExprCallIndirect, signature, kTableZero, kExprLocalGet, 0,
kExprI32LoadMem, 0, 0, kExprCallFunction, 0, kExprI32Add
])
.exportFunc();
@@ -139,7 +139,7 @@ assertFalse(WebAssembly.validate(bytes(88, 88, 88, 88, 88, 88, 88, 88)));
// writer(mem[i]);
// return mem[i] + some_value();
builder.addFunction('_wrap_writer', signature).addBody([
- kExprGetLocal, 0, kExprCallFunction, 1
+ kExprLocalGet, 0, kExprCallFunction, 1
]);
builder.appendToTable([2, 3]);
@@ -176,11 +176,11 @@ assertFalse(WebAssembly.validate(bytes(88, 88, 88, 88, 88, 88, 88, 88)));
var builder = new WasmModuleBuilder();
builder.addGlobal(kWasmI32, true);
builder.addFunction('read', kSig_i_v)
- .addBody([kExprGetGlobal, 0])
+ .addBody([kExprGlobalGet, 0])
.exportFunc();
builder.addFunction('write', kSig_v_i)
- .addBody([kExprGetLocal, 0, kExprSetGlobal, 0])
+ .addBody([kExprLocalGet, 0, kExprGlobalSet, 0])
.exportFunc();
var module = new WebAssembly.Module(builder.toBuffer());
diff --git a/deps/v8/test/mjsunit/wasm/interpreter-mixed.js b/deps/v8/test/mjsunit/wasm/interpreter-mixed.js
index 573e1e1d9e..27df605d46 100644
--- a/deps/v8/test/mjsunit/wasm/interpreter-mixed.js
+++ b/deps/v8/test/mjsunit/wasm/interpreter-mixed.js
@@ -29,9 +29,9 @@ function checkStack(stack, expected_lines) {
// grow_memory can be called from interpreted or compiled code, and changes
// should be reflected in either execution.
var builder = new WasmModuleBuilder();
- var grow_body = [kExprGetLocal, 0, kExprMemoryGrow, kMemoryZero];
- var load_body = [kExprGetLocal, 0, kExprI32LoadMem, 0, 0];
- var store_body = [kExprGetLocal, 0, kExprGetLocal, 1, kExprI32StoreMem, 0, 0];
+ var grow_body = [kExprLocalGet, 0, kExprMemoryGrow, kMemoryZero];
+ var load_body = [kExprLocalGet, 0, kExprI32LoadMem, 0, 0];
+ var store_body = [kExprLocalGet, 0, kExprLocalGet, 1, kExprI32StoreMem, 0, 0];
builder.addFunction('grow_memory', kSig_i_i).addBody(grow_body).exportFunc();
builder.addFunction('load', kSig_i_i).addBody(load_body).exportFunc();
builder.addFunction('store', kSig_v_ii).addBody(store_body).exportFunc();
@@ -96,7 +96,7 @@ function createTwoInstancesCallingEachOther(inner_throws = false) {
let id_imp = builder1.addImport('q', 'id', kSig_i_i);
let plus_one = builder1.addFunction('plus_one', kSig_i_i)
.addBody([
- kExprGetLocal, 0, // -
+ kExprLocalGet, 0, // -
kExprI32Const, 1, // -
kExprI32Add, // -
kExprCallFunction, id_imp
@@ -114,7 +114,7 @@ function createTwoInstancesCallingEachOther(inner_throws = false) {
let plus_two = builder2.addFunction('plus_two', kSig_i_i)
.addBody([
// Call import, add one more.
- kExprGetLocal, 0, // -
+ kExprLocalGet, 0, // -
kExprCallFunction, plus_one_imp, // -
kExprI32Const, 1, // -
kExprI32Add
diff --git a/deps/v8/test/mjsunit/wasm/interpreter.js b/deps/v8/test/mjsunit/wasm/interpreter.js
index c1c03a4dd0..43ecc4a33a 100644
--- a/deps/v8/test/mjsunit/wasm/interpreter.js
+++ b/deps/v8/test/mjsunit/wasm/interpreter.js
@@ -59,10 +59,10 @@ function checkStack(stack, expected_lines) {
builder.addFunction('main', makeSig([kWasmI32, kWasmF64], [kWasmF32]))
.addBody([
// call #0 with arg 0 and arg 0 + 1
- kExprGetLocal, 0, kExprGetLocal, 0, kExprI32Const, 1, kExprI32Add,
+ kExprLocalGet, 0, kExprLocalGet, 0, kExprI32Const, 1, kExprI32Add,
kExprCallFunction, 0,
// call #1 with arg 1
- kExprGetLocal, 1, kExprCallFunction, 1,
+ kExprLocalGet, 1, kExprCallFunction, 1,
// convert returned value to f32
kExprF32UConvertI32,
// add the two values
@@ -151,28 +151,28 @@ function checkStack(stack, expected_lines) {
builder.addGlobal(kWasmF32, true); // 2
builder.addGlobal(kWasmF64, true); // 3
builder.addFunction('get_i32', kSig_i_v)
- .addBody([kExprGetGlobal, 0])
+ .addBody([kExprGlobalGet, 0])
.exportFunc();
builder.addFunction('get_i64', kSig_d_v)
- .addBody([kExprGetGlobal, 1, kExprF64SConvertI64])
+ .addBody([kExprGlobalGet, 1, kExprF64SConvertI64])
.exportFunc();
builder.addFunction('get_f32', kSig_d_v)
- .addBody([kExprGetGlobal, 2, kExprF64ConvertF32])
+ .addBody([kExprGlobalGet, 2, kExprF64ConvertF32])
.exportFunc();
builder.addFunction('get_f64', kSig_d_v)
- .addBody([kExprGetGlobal, 3])
+ .addBody([kExprGlobalGet, 3])
.exportFunc();
builder.addFunction('set_i32', kSig_v_i)
- .addBody([kExprGetLocal, 0, kExprSetGlobal, 0])
+ .addBody([kExprLocalGet, 0, kExprGlobalSet, 0])
.exportFunc();
builder.addFunction('set_i64', kSig_v_d)
- .addBody([kExprGetLocal, 0, kExprI64SConvertF64, kExprSetGlobal, 1])
+ .addBody([kExprLocalGet, 0, kExprI64SConvertF64, kExprGlobalSet, 1])
.exportFunc();
builder.addFunction('set_f32', kSig_v_d)
- .addBody([kExprGetLocal, 0, kExprF32ConvertF64, kExprSetGlobal, 2])
+ .addBody([kExprLocalGet, 0, kExprF32ConvertF64, kExprGlobalSet, 2])
.exportFunc();
builder.addFunction('set_f64', kSig_v_d)
- .addBody([kExprGetLocal, 0, kExprSetGlobal, 3])
+ .addBody([kExprLocalGet, 0, kExprGlobalSet, 3])
.exportFunc();
var instance = builder.instantiate();
// Initially, all should be zero.
@@ -205,7 +205,7 @@ function checkStack(stack, expected_lines) {
var builder = new WasmModuleBuilder();
builder.addImport('mod', 'func', kSig_v_i);
builder.addFunction('main', kSig_v_i)
- .addBody([kExprGetLocal, 0, kExprCallFunction, 0])
+ .addBody([kExprLocalGet, 0, kExprCallFunction, 0])
.exportFunc();
instance = builder.instantiate({mod: {func: func}});
// Test that this does not mess up internal state by executing it three times.
@@ -239,14 +239,14 @@ function checkStack(stack, expected_lines) {
var sig_i_i = builder.addType(kSig_i_i);
var mul = builder.addImport('q', 'mul', sig_i_ii);
var add = builder.addFunction('add', sig_i_ii).addBody([
- kExprGetLocal, 0, kExprGetLocal, 1, kExprI32Add
+ kExprLocalGet, 0, kExprLocalGet, 1, kExprI32Add
]);
var mismatch =
- builder.addFunction('sig_mismatch', sig_i_i).addBody([kExprGetLocal, 0]);
+ builder.addFunction('sig_mismatch', sig_i_i).addBody([kExprLocalGet, 0]);
var main = builder.addFunction('main', kSig_i_iii)
.addBody([
// Call indirect #0 with args <#1, #2>.
- kExprGetLocal, 1, kExprGetLocal, 2, kExprGetLocal, 0,
+ kExprLocalGet, 1, kExprLocalGet, 2, kExprLocalGet, 0,
kExprCallIndirect, sig_i_ii, kTableZero
])
.exportFunc();
@@ -281,7 +281,7 @@ function checkStack(stack, expected_lines) {
builder.addFunction('main', kSig_v_i)
.addBody([
// Call indirect #0 with arg #0, drop result.
- kExprGetLocal, 0, kExprCallIndirect, sig_l_v, kTableZero, kExprDrop
+ kExprLocalGet, 0, kExprCallIndirect, sig_l_v, kTableZero, kExprDrop
])
.exportFunc();
builder.appendToTable([imp, direct.index, indirect.index]);
@@ -409,7 +409,7 @@ function checkStack(stack, expected_lines) {
var builder = new WasmModuleBuilder();
var imp = builder.addImport('mod', 'the_name_of_my_import', kSig_i_i);
builder.addFunction('main', kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprCallFunction, imp])
+ .addBody([kExprLocalGet, 0, kExprCallFunction, imp])
.exportAs('main');
print('module');
return new WebAssembly.Module(builder.toBuffer());
@@ -525,7 +525,7 @@ function checkStack(stack, expected_lines) {
const sig_index = builder0.addType(kSig_i_v);
builder0.addFunction('main', kSig_i_i)
.addBody([
- kExprGetLocal, 0, // --
+ kExprLocalGet, 0, // --
kExprCallIndirect, sig_index, kTableZero
]) // --
.exportAs('main');
@@ -549,7 +549,7 @@ function checkStack(stack, expected_lines) {
print(arguments.callee.name);
const builder = new WasmModuleBuilder();
builder.addFunction('main', kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprI32Const, 7, kExprI32Add])
+ .addBody([kExprLocalGet, 0, kExprI32Const, 7, kExprI32Add])
.exportFunc();
const wire_bytes = builder.toBuffer();
diff --git a/deps/v8/test/mjsunit/wasm/lazy-compilation.js b/deps/v8/test/mjsunit/wasm/lazy-compilation.js
index c7cd40d05d..c45fb6deb0 100644
--- a/deps/v8/test/mjsunit/wasm/lazy-compilation.js
+++ b/deps/v8/test/mjsunit/wasm/lazy-compilation.js
@@ -24,7 +24,7 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
builder1.addFunction('store', kSig_v_i)
.addBody([
kExprI32Const, 0, // i32.const 1
- kExprGetLocal, 0, // get_local 0
+ kExprLocalGet, 0, // get_local 0
kExprI32StoreMem, 0, 0, // i32.store offset=0 align=0
])
.exportFunc();
@@ -35,7 +35,7 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
builder2.addMemory(1, 1, true);
builder2.addImport('mod', 'store', kSig_v_i);
builder2.addFunction('call_store', kSig_v_i)
- .addBody([kExprGetLocal, 0, kExprCallFunction, 0])
+ .addBody([kExprLocalGet, 0, kExprCallFunction, 0])
.exportFunc();
const instance2 = builder2.instantiate({mod: {store: instance1.exports.store}});
const mem2 = new Int32Array(instance2.exports.memory.buffer);
@@ -75,7 +75,7 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
builder1.addFunction('store', kSig_v_i)
.addBody([
kExprI32Const, 0, // i32.const 1
- kExprGetLocal, 0, // get_local 0
+ kExprLocalGet, 0, // get_local 0
kExprI32StoreMem, 0, 0, // i32.store offset=0 align=0
])
.exportFunc();
diff --git a/deps/v8/test/mjsunit/wasm/liftoff-trap-handler.js b/deps/v8/test/mjsunit/wasm/liftoff-trap-handler.js
index 3ce74816ea..eadfaacca4 100644
--- a/deps/v8/test/mjsunit/wasm/liftoff-trap-handler.js
+++ b/deps/v8/test/mjsunit/wasm/liftoff-trap-handler.js
@@ -13,13 +13,13 @@ function testCompileLoadStore() {
const builder = new WasmModuleBuilder();
// These functions generate statically out of bounds accesses.
builder.addFunction("load", kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprI32LoadMem, 0, 0x80, 0x80, 0x80, 1])
+ .addBody([kExprLocalGet, 0, kExprI32LoadMem, 0, 0x80, 0x80, 0x80, 1])
.exportFunc();
builder.addFunction("store", kSig_i_ii)
- .addBody([kExprGetLocal, 0,
- kExprGetLocal, 1,
+ .addBody([kExprLocalGet, 0,
+ kExprLocalGet, 1,
kExprI32StoreMem, 0, 0x80, 0x80, 0x80, 1,
- kExprGetLocal, 1])
+ kExprLocalGet, 1])
.exportFunc();
builder.addMemory(1, 1, false);
const instance = builder.instantiate();
diff --git a/deps/v8/test/mjsunit/wasm/liftoff.js b/deps/v8/test/mjsunit/wasm/liftoff.js
index 51b30878d3..04eeffbea2 100644
--- a/deps/v8/test/mjsunit/wasm/liftoff.js
+++ b/deps/v8/test/mjsunit/wasm/liftoff.js
@@ -10,7 +10,7 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
print(arguments.callee.name);
const builder = new WasmModuleBuilder();
builder.addFunction('i32_add', kSig_i_ii)
- .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprI32Add])
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1, kExprI32Add])
.exportFunc();
const module = new WebAssembly.Module(builder.toBuffer());
@@ -26,7 +26,7 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
print(arguments.callee.name);
const builder = new WasmModuleBuilder();
builder.addFunction('i32_add', kSig_i_ii)
- .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprI32Add])
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1, kExprI32Add])
.exportFunc();
const instance = builder.instantiate();
@@ -38,7 +38,7 @@ async function testLiftoffAsync() {
print(arguments.callee.name);
const builder = new WasmModuleBuilder();
builder.addFunction('i32_add', kSig_i_ii)
- .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprI32Add])
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1, kExprI32Add])
.exportFunc();
print('Compiling...');
diff --git a/deps/v8/test/mjsunit/wasm/loop-rotation.js b/deps/v8/test/mjsunit/wasm/loop-rotation.js
index 92ad1f31c7..7805f5ccf5 100644
--- a/deps/v8/test/mjsunit/wasm/loop-rotation.js
+++ b/deps/v8/test/mjsunit/wasm/loop-rotation.js
@@ -12,10 +12,10 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
builder.addFunction("main", kSig_v_i)
.addBody([
kExprLoop, kWasmStmt,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprI32Const, 1,
kExprI32Sub,
- kExprTeeLocal, 0,
+ kExprLocalTee, 0,
kExprBrIf, 0,
kExprEnd,
])
@@ -33,10 +33,10 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
builder.addFunction("main", kSig_v_i)
.addBody([
kExprLoop, kWasmStmt,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprI32Const, 1,
kExprI32Sub,
- kExprTeeLocal, 0,
+ kExprLocalTee, 0,
kExprBrIf, 1,
kExprBr, 0,
kExprEnd,
@@ -56,10 +56,10 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
builder.addFunction("main", kSig_v_i)
.addBody([
kExprLoop, kWasmStmt,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprI32Const, 1,
kExprI32Sub,
- kExprTeeLocal, 0,
+ kExprLocalTee, 0,
kExprBrIf, 1,
kExprI32Const, 0,
kExprI32Const, 0,
diff --git a/deps/v8/test/mjsunit/wasm/many-modules.js b/deps/v8/test/mjsunit/wasm/many-modules.js
new file mode 100644
index 0000000000..66db04237a
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/many-modules.js
@@ -0,0 +1,45 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// No reason to stress-opt this; save some time.
+// Flags: --wasm-far-jump-table --no-stress-opt
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+// We generate the module bytes once to make this test more efficient,
+// especially on simulator builds. The bytes contain a sentinel which is later
+// patched to different constants. This makes the modules distinct and forces
+// the engine to create different code for them.
+
+// This is the sentinel placed in the bytes. It's a 5 byte LEB-encoded integer.
+const sentinel = wasmSignedLeb(0x12345678);
+assertEquals(5, sentinel.length);
+
+const builder = new WasmModuleBuilder();
+builder.addFunction('f', kSig_i_i).addBody([kExprI32Const, ...sentinel]);
+const module_bytes = builder.toBuffer();
+
+// Checks whether {module_bytes[i .. i+sentinel.length]} matches {sentinel}.
+const has_sentinel = (i, k = 0) => module_bytes[i + k] == sentinel[k] &&
+ (k == sentinel.length - 1 || has_sentinel(i, k + 1));
+// Now find the sentinel.
+const find_sentinel = i =>
+ module_bytes.slice(i).findIndex((e, i) => has_sentinel(i));
+const sentinel_position = find_sentinel(0);
+assertTrue(has_sentinel(sentinel_position), 'found sentinel');
+assertEquals(-1, find_sentinel(sentinel_position + 1), 'exactly one sentinel');
+
+// Generating {num_modules} modules should not run out of memory, since the code
+// space needed per module is quite low.
+const num_modules = 10000;
+// Keep all generated modules alive.
+const modules = [];
+// Reset sentinel section to nops so that shorter LEBs will just be followed by
+// nops. This resion will be patched in the loop with values of increasing size.
+module_bytes.set(Array(sentinel.length).fill(_ => kExprNop), sentinel_position);
+for (let i = 0; i < num_modules; ++i) {
+ if (i % 50 == 0) print(i);
+ module_bytes.set(wasmSignedLeb(i), sentinel_position);
+ modules.push(new WebAssembly.Module(module_bytes));
+}
diff --git a/deps/v8/test/mjsunit/wasm/many-parameters.js b/deps/v8/test/mjsunit/wasm/many-parameters.js
index 46b231943d..7813ad453c 100644
--- a/deps/v8/test/mjsunit/wasm/many-parameters.js
+++ b/deps/v8/test/mjsunit/wasm/many-parameters.js
@@ -37,7 +37,7 @@ types.forEach((type, type_idx) => {
let body = [];
for (let i = 0; i < num_params; ++i)
- body.push(kExprGetLocal, (i + shift) % num_params);
+ body.push(kExprLocalGet, (i + shift) % num_params);
for (let i = 0; i < num_const_params; ++i)
body.push(...type_const[type_idx](num_params + i));
body.push(kExprCallFunction, 0);
diff --git a/deps/v8/test/mjsunit/wasm/memory-external-call.js b/deps/v8/test/mjsunit/wasm/memory-external-call.js
index 853cdf616a..1bb4bb1ecc 100644
--- a/deps/v8/test/mjsunit/wasm/memory-external-call.js
+++ b/deps/v8/test/mjsunit/wasm/memory-external-call.js
@@ -25,12 +25,12 @@ function generateBuilder(add_memory, import_sig) {
// Add the memory if we expect a module builder with memory and load/store.
builder.addMemory(initialMemoryPages, maximumMemoryPages, true);
builder.addFunction('load', kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprI32LoadMem, 0, 0])
+ .addBody([kExprLocalGet, 0, kExprI32LoadMem, 0, 0])
.exportFunc();
builder.addFunction('store', kSig_i_ii)
.addBody([
- kExprGetLocal, 0, kExprGetLocal, 1, kExprI32StoreMem, 0, 0,
- kExprGetLocal, 1
+ kExprLocalGet, 0, kExprLocalGet, 1, kExprI32StoreMem, 0, 0,
+ kExprLocalGet, 1
])
.exportFunc();
}
@@ -83,14 +83,14 @@ function assertMemoryIndependence(load_a, store_a, load_b, store_b) {
builder.addMemory(kPages, kPages, true);
builder.addFunction("store", kSig_v_ii)
.addBody([
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
kExprI32StoreMem, 0, 0, // --
]) // --
.exportFunc();
builder.addFunction("load", kSig_i_i)
.addBody([
- kExprGetLocal, 0, // --
+ kExprLocalGet, 0, // --
kExprI32LoadMem, 0, 0, // --
]) // --
.exportFunc();
@@ -103,14 +103,14 @@ function assertMemoryIndependence(load_a, store_a, load_b, store_b) {
builder.addMemory(kPages, kPages, true);
builder.addFunction("store", kSig_v_ii)
.addBody([
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
kExprCallFunction, 0, // --
]) // --
.exportFunc();
builder.addFunction("load", kSig_i_i)
.addBody([
- kExprGetLocal, 0, // --
+ kExprLocalGet, 0, // --
kExprI32LoadMem, 0, 0, // --
]) // --
.exportFunc();
@@ -152,7 +152,7 @@ function assertMemoryIndependence(load_a, store_a, load_b, store_b) {
// Function to invoke the imported function and add 1 to the result.
first_module.addFunction('plus_one', kSig_i_i)
.addBody([
- kExprGetLocal, 0, // -
+ kExprLocalGet, 0, // -
kExprCallFunction, other_fn_idx, // call the imported function
kExprI32Const, 1, // -
kExprI32Add, // add 1 to the result
@@ -185,7 +185,7 @@ function assertMemoryIndependence(load_a, store_a, load_b, store_b) {
// Function to invoke the imported function and add 1 to the result.
first_module.addFunction('plus_one', kSig_i_i)
.addBody([
- kExprGetLocal, 0, // -
+ kExprLocalGet, 0, // -
kExprCallFunction, other_fn_idx, // call the imported function
kExprI32Const, 1, // -
kExprI32Add, // add 1 to the result
@@ -221,14 +221,14 @@ function assertMemoryIndependence(load_a, store_a, load_b, store_b) {
// Function to invoke the imported function and add 1 to the result.
first_module.addFunction('sandwich', kSig_i_iii)
.addBody([
- kExprGetLocal, 0, // param0 (index)
- kExprGetLocal, 1, // param1 (first_value)
+ kExprLocalGet, 0, // param0 (index)
+ kExprLocalGet, 1, // param1 (first_value)
kExprI32StoreMem, 0, 0, // store value in first_instance
- kExprGetLocal, 0, // param0 (index)
- kExprGetLocal, 2, // param2 (second_value)
+ kExprLocalGet, 0, // param0 (index)
+ kExprLocalGet, 2, // param2 (second_value)
kExprCallFunction, other_fn_idx, // call the imported function
kExprDrop, // drop the return value
- kExprGetLocal, 0, // param0 (index)
+ kExprLocalGet, 0, // param0 (index)
kExprI32LoadMem, 0, 0, // load from first_instance
kExprReturn // -
])
@@ -263,14 +263,14 @@ function assertMemoryIndependence(load_a, store_a, load_b, store_b) {
builder.addMemory(kPages, kPages, true);
builder.addFunction("store", kSig_v_ii)
.addBody([
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
kExprI32StoreMem, 0, 0, // --
]) // --
.exportFunc();
builder.addFunction("load", kSig_i_i)
.addBody([
- kExprGetLocal, 0, // --
+ kExprLocalGet, 0, // --
kExprI32LoadMem, 0, 0, // --
]) // --
.exportFunc();
@@ -308,9 +308,9 @@ function assertMemoryIndependence(load_a, store_a, load_b, store_b) {
var sig_index = builder.addType(kSig_v_ii);
builder.addFunction("store", kSig_v_iii)
.addBody([
- kExprGetLocal, 1,
- kExprGetLocal, 2,
- kExprGetLocal, 0,
+ kExprLocalGet, 1,
+ kExprLocalGet, 2,
+ kExprLocalGet, 0,
kExprCallIndirect, sig_index, kTableZero,
]).exportFunc();
diff --git a/deps/v8/test/mjsunit/wasm/memory-instance-validation.js b/deps/v8/test/mjsunit/wasm/memory-instance-validation.js
index ef65840532..a19b94b132 100644
--- a/deps/v8/test/mjsunit/wasm/memory-instance-validation.js
+++ b/deps/v8/test/mjsunit/wasm/memory-instance-validation.js
@@ -17,7 +17,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
.addBody([kExprMemorySize, kMemoryZero])
.exportFunc();
builder.addFunction("grow", kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprMemoryGrow, kMemoryZero])
+ .addBody([kExprLocalGet, 0, kExprMemoryGrow, kMemoryZero])
.exportFunc();
var instances = [];
for (var i = 0; i < 5; i++) {
diff --git a/deps/v8/test/mjsunit/wasm/memory_1gb_oob.js b/deps/v8/test/mjsunit/wasm/memory_1gb_oob.js
index f2b22d97ab..a365b419f9 100644
--- a/deps/v8/test/mjsunit/wasm/memory_1gb_oob.js
+++ b/deps/v8/test/mjsunit/wasm/memory_1gb_oob.js
@@ -40,14 +40,14 @@ const indexes = (() => {
0|((offset >>> 28) & m)];
builder.addFunction("load", makeSig([kWasmI32], [type]))
.addBody([ // --
- kExprGetLocal, 0, // --
+ kExprLocalGet, 0, // --
load_opcode, 0, ...offset_bytes, // --
]) // --
.exportFunc();
builder.addFunction("store", makeSig([kWasmI32, type], []))
.addBody([ // --
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
store_opcode, 0, ...offset_bytes, // --
]) // --
.exportFunc();
diff --git a/deps/v8/test/mjsunit/wasm/memory_2gb_oob.js b/deps/v8/test/mjsunit/wasm/memory_2gb_oob.js
index 6baf0f3c7e..d5be98aa18 100644
--- a/deps/v8/test/mjsunit/wasm/memory_2gb_oob.js
+++ b/deps/v8/test/mjsunit/wasm/memory_2gb_oob.js
@@ -40,14 +40,14 @@ const indexes = (() => {
0|((offset >>> 28) & m)];
builder.addFunction("load", makeSig([kWasmI32], [type]))
.addBody([ // --
- kExprGetLocal, 0, // --
+ kExprLocalGet, 0, // --
load_opcode, 0, ...offset_bytes, // --
]) // --
.exportFunc();
builder.addFunction("store", makeSig([kWasmI32, type], []))
.addBody([ // --
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
store_opcode, 0, ...offset_bytes, // --
]) // --
.exportFunc();
diff --git a/deps/v8/test/mjsunit/wasm/memory_4gb_oob.js b/deps/v8/test/mjsunit/wasm/memory_4gb_oob.js
index 39b9f95d9c..e9533b8d6c 100644
--- a/deps/v8/test/mjsunit/wasm/memory_4gb_oob.js
+++ b/deps/v8/test/mjsunit/wasm/memory_4gb_oob.js
@@ -38,14 +38,14 @@ const indexes = (() => {
0|((offset >>> 28) & m)];
builder.addFunction("load", makeSig([kWasmI32], [type]))
.addBody([ // --
- kExprGetLocal, 0, // --
+ kExprLocalGet, 0, // --
load_opcode, 0, ...offset_bytes, // --
]) // --
.exportFunc();
builder.addFunction("store", makeSig([kWasmI32, type], []))
.addBody([ // --
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
store_opcode, 0, ...offset_bytes, // --
]) // --
.exportFunc();
diff --git a/deps/v8/test/mjsunit/wasm/module-memory.js b/deps/v8/test/mjsunit/wasm/module-memory.js
index 3dd580d269..0f870e7815 100644
--- a/deps/v8/test/mjsunit/wasm/module-memory.js
+++ b/deps/v8/test/mjsunit/wasm/module-memory.js
@@ -18,18 +18,18 @@ function genModule(memory) {
// main body: while(i) { if(mem[i]) return -1; i -= 4; } return 0;
// TODO(titzer): this manual bytecode has a copy of test-run-wasm.cc
/**/ kExprLoop, kWasmStmt, // --
- /* */ kExprGetLocal, 0, // --
+ /* */ kExprLocalGet, 0, // --
/* */ kExprIf, kWasmStmt, // --
- /* */ kExprGetLocal, 0, // --
+ /* */ kExprLocalGet, 0, // --
/* */ kExprI32LoadMem, 0, 0, // --
/* */ kExprIf, kWasmStmt, // --
/* */ kExprI32Const, 127, // --
/* */ kExprReturn, // --
/* */ kExprEnd, // --
- /* */ kExprGetLocal, 0, // --
+ /* */ kExprLocalGet, 0, // --
/* */ kExprI32Const, 4, // --
/* */ kExprI32Sub, // --
- /* */ kExprSetLocal, 0, // --
+ /* */ kExprLocalSet, 0, // --
/* */ kExprBr, 1, // --
/* */ kExprEnd, // --
/* */ kExprEnd, // --
@@ -52,9 +52,7 @@ function testPokeMemory() {
var array = new Int8Array(buffer);
assertEquals(kMemSize, array.length);
- for (var i = 0; i < kMemSize; i++) {
- assertEquals(0, array[i]);
- }
+ assertTrue(array.every((e => e === 0)));
for (var i = 0; i < 10; i++) {
assertEquals(0, main(kMemSize - 4));
@@ -99,9 +97,7 @@ function testPokeOuterMemory() {
var array = new Int8Array(buffer.buffer);
assertEquals(kMemSize, array.length);
- for (var i = 0; i < kMemSize; i++) {
- assertEquals(0, array[i]);
- }
+ assertTrue(array.every((e => e === 0)));
for (var i = 0; i < 10; i++) {
assertEquals(0, main(kMemSize - 4));
@@ -139,33 +135,30 @@ function testOOBThrows() {
builder.addMemory(1, 1, true);
builder.addFunction("geti", kSig_i_ii)
.addBody([
- kExprGetLocal, 0,
- kExprGetLocal, 1,
+ kExprLocalGet, 0,
+ kExprLocalGet, 1,
kExprI32LoadMem, 0, 0,
kExprI32StoreMem, 0, 0,
- kExprGetLocal, 1,
+ kExprLocalGet, 1,
kExprI32LoadMem, 0, 0,
])
.exportFunc();
var module = builder.instantiate();
- var offset;
- function read() { return module.exports.geti(0, offset); }
- function write() { return module.exports.geti(offset, 0); }
+ let read = offset => module.exports.geti(0, offset);
+ let write = offset => module.exports.geti(offset, 0);
- for (offset = 0; offset < 65533; offset++) {
- assertEquals(0, read());
- assertEquals(0, write());
- }
+ assertEquals(0, read(65532));
+ assertEquals(0, write(65532));
// Note that this test might be run concurrently in multiple Isolates, which
// makes an exact comparison of the expected trap count unreliable. But is is
// still possible to check the lower bound for the expected trap count.
- for (offset = 65534; offset < 66536; offset++) {
+ for (let offset = 65534; offset < 66536; offset++) {
const trap_count = %GetWasmRecoveredTrapCount();
- assertTraps(kTrapMemOutOfBounds, read);
- assertTraps(kTrapMemOutOfBounds, write);
+ assertTraps(kTrapMemOutOfBounds, () => read(offset));
+ assertTraps(kTrapMemOutOfBounds, () => write(offset));
if (%IsWasmTrapHandlerEnabled()) {
assertTrue(trap_count + 2 <= %GetWasmRecoveredTrapCount());
}
diff --git a/deps/v8/test/mjsunit/wasm/multi-value.js b/deps/v8/test/mjsunit/wasm/multi-value.js
index 31f9e8149b..e6a7ae99a5 100644
--- a/deps/v8/test/mjsunit/wasm/multi-value.js
+++ b/deps/v8/test/mjsunit/wasm/multi-value.js
@@ -15,8 +15,8 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
builder.addFunction("main", kSig_i_ii)
.addBody([
kExprBlock, sig_ii_v,
- kExprGetLocal, 0,
- kExprGetLocal, 1,
+ kExprLocalGet, 0,
+ kExprLocalGet, 1,
kExprEnd,
kExprI32Add])
.exportAs("main");
@@ -33,8 +33,8 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
builder.addFunction("main", kSig_i_ii)
.addBody([
- kExprGetLocal, 0,
- kExprGetLocal, 1,
+ kExprLocalGet, 0,
+ kExprLocalGet, 1,
kExprBlock, sig_i_ii,
kExprI32Add,
kExprEnd])
@@ -54,8 +54,8 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
builder.addFunction("main", kSig_i_ii)
.addBody([
kExprBlock, sig_ii_v,
- kExprGetLocal, 0,
- kExprGetLocal, 1,
+ kExprLocalGet, 0,
+ kExprLocalGet, 1,
kExprBr, 0,
kExprEnd,
kExprI32Add])
@@ -76,8 +76,8 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
builder.addFunction("main", kSig_i_ii)
.addBody([
kExprLoop, sig_ii_v,
- kExprGetLocal, 0,
- kExprGetLocal, 1,
+ kExprLocalGet, 0,
+ kExprLocalGet, 1,
kExprEnd,
kExprI32Add])
.exportAs("main");
@@ -94,8 +94,8 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
builder.addFunction("main", kSig_i_ii)
.addBody([
- kExprGetLocal, 0,
- kExprGetLocal, 1,
+ kExprLocalGet, 0,
+ kExprLocalGet, 1,
kExprLoop, sig_i_ii,
kExprI32Add,
kExprEnd])
@@ -114,13 +114,13 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
let sig_ii_ii = builder.addType(kSig_ii_ii);
builder.addFunction("dup", kSig_ii_i)
- .addBody([kExprGetLocal, 0, kExprGetLocal, 0]);
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 0]);
builder.addFunction("swap", kSig_ii_ii)
- .addBody([kExprGetLocal, 1, kExprGetLocal, 0]);
+ .addBody([kExprLocalGet, 1, kExprLocalGet, 0]);
builder.addFunction("main", kSig_i_ii)
.addBody([
- kExprGetLocal, 0,
- kExprGetLocal, 1,
+ kExprLocalGet, 0,
+ kExprLocalGet, 1,
kExprLoop, sig_ii_ii,
kExprCallFunction, 1, // swap
kExprCallFunction, 0, // dup
@@ -164,13 +164,13 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
builder.addFunction("main", kSig_i_ii)
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprIf, sig_ii_v,
- kExprGetLocal, 0,
- kExprGetLocal, 1,
+ kExprLocalGet, 0,
+ kExprLocalGet, 1,
kExprElse,
- kExprGetLocal, 1,
- kExprGetLocal, 0,
+ kExprLocalGet, 1,
+ kExprLocalGet, 0,
kExprEnd,
kExprI32Sub])
.exportAs("main");
@@ -188,9 +188,9 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
builder.addFunction("main", kSig_i_ii)
.addBody([
- kExprGetLocal, 0,
- kExprGetLocal, 1,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
+ kExprLocalGet, 1,
+ kExprLocalGet, 0,
kExprIf, sig_i_ii,
kExprI32Add,
kExprElse,
@@ -212,14 +212,14 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
builder.addFunction("main", kSig_i_ii)
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprIf, sig_ii_v,
- kExprGetLocal, 0,
- kExprGetLocal, 1,
+ kExprLocalGet, 0,
+ kExprLocalGet, 1,
kExprBr, 0,
kExprElse,
- kExprGetLocal, 1,
- kExprGetLocal, 0,
+ kExprLocalGet, 1,
+ kExprLocalGet, 0,
kExprBr, 0,
kExprEnd,
kExprI32Sub])
@@ -231,6 +231,27 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
assertEquals(instance.exports.main(0, 3), 3);
})();
+(function MultiIfParamOneArmedTest() {
+ print("MultiIfParamOneArmedTest");
+ let builder = new WasmModuleBuilder();
+ let sig_i_i = builder.addType(kSig_i_i);
+
+ builder.addFunction("main", kSig_i_i)
+ .addBody([
+ kExprLocalGet, 0,
+ kExprLocalGet, 0,
+ kExprIf, sig_i_i,
+ kExprI32Const, 5,
+ kExprI32Add,
+ kExprEnd])
+ .exportAs("main");
+
+ let module = new WebAssembly.Module(builder.toBuffer());
+ let instance = new WebAssembly.Instance(module);
+ assertEquals(instance.exports.main(0), 0);
+ assertEquals(instance.exports.main(1), 6);
+})();
+
(function MultiResultTest() {
print("MultiResultTest");
let builder = new WasmModuleBuilder();
@@ -239,15 +260,15 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
builder.addFunction("callee", kSig_iii_ii)
.addBody([
- kExprGetLocal, 0,
- kExprGetLocal, 1,
- kExprGetLocal, 0,
- kExprGetLocal, 1,
+ kExprLocalGet, 0,
+ kExprLocalGet, 1,
+ kExprLocalGet, 0,
+ kExprLocalGet, 1,
kExprI32Sub]);
builder.addFunction("main", kSig_i_ii)
.addBody([
- kExprGetLocal, 0,
- kExprGetLocal, 1,
+ kExprLocalGet, 0,
+ kExprLocalGet, 1,
kExprCallFunction, 0,
kExprI32Mul,
kExprI32Add])
@@ -272,14 +293,14 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
builder.addFunction("callee", kSig_ii_i)
.addBody([
- kExprGetLocal, 0,
- kExprGetLocal, 0,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
+ kExprLocalGet, 0,
+ kExprLocalGet, 0,
kExprI32Add,
kExprReturn]);
builder.addFunction("main", kSig_i_i)
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprCallFunction, 0,
kExprI32Mul])
.exportAs("main");
@@ -300,14 +321,14 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
builder.addFunction("callee", kSig_ii_i)
.addBody([
- kExprGetLocal, 0,
- kExprGetLocal, 0,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
+ kExprLocalGet, 0,
+ kExprLocalGet, 0,
kExprI32Add,
kExprBr, 0]);
builder.addFunction("main", kSig_i_i)
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprCallFunction, 0,
kExprI32Mul])
.exportAs("main");
@@ -320,26 +341,26 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
assertEquals(instance.exports.main(10), 200);
})();
-(function MultiJSReturnTest() {
+(function MultiWasmToJSReturnTest() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
let sig_fi_if = makeSig([kWasmI32, kWasmF32], [kWasmF32, kWasmI32]);
builder.addFunction("swap", sig_fi_if)
.addBody([
- kExprGetLocal, 1,
- kExprGetLocal, 0])
+ kExprLocalGet, 1,
+ kExprLocalGet, 0])
.exportAs("swap");
builder.addFunction("addsubmul", kSig_iii_i)
.addBody([
- kExprGetLocal, 0,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
+ kExprLocalGet, 0,
kExprI32Add,
- kExprGetLocal, 0,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
+ kExprLocalGet, 0,
kExprI32Sub,
- kExprGetLocal, 0,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
+ kExprLocalGet, 0,
kExprI32Mul])
.exportAs("addsubmul");
@@ -350,3 +371,75 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
assertEquals(instance.exports.addsubmul(4), [8, 0, 16]);
assertEquals(instance.exports.addsubmul(5), [10, 0, 25]);
})();
+
+(function MultiJSToWasmReturnTest() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ function swap(x, y) { return [y, x]; }
+ function swap_proxy(x, y) {
+ return new Proxy([y, x], {
+ get: function(obj, prop) { return Reflect.get(obj, prop); },
+ });
+ }
+ function proxy_throw(x, y) {
+ return new Proxy([y, x], {
+ get: function(obj, prop) {
+ if (prop == 1) {
+ throw new Error("abc");
+ }
+ return Reflect.get(obj, prop); },
+ });
+ }
+ function drop_first(x, y) {
+ return [y];
+ }
+ function repeat(x, y) {
+ return [x, y, x, y];
+ }
+ function not_receiver(x, y) {
+ return 0;
+ }
+ function not_iterable(x, y) {
+ a = [x, y];
+ a[Symbol.iterator] = undefined;
+ return a;
+ }
+ function* generator(x, y) {
+ yield x;
+ yield y;
+ }
+ function* generator_throw(x, y) {
+ yield x;
+ throw new Error("def");
+ }
+
+ builder.addImport('imports', 'f', kSig_ii_ii);
+ builder.addFunction("main", kSig_ii_ii)
+ .addBody([
+ kExprLocalGet, 0,
+ kExprLocalGet, 1,
+ kExprCallFunction, 0])
+ .exportAs("main")
+
+ let module = new WebAssembly.Module(builder.toBuffer());
+
+ var instance = new WebAssembly.Instance(module, { 'imports' : { 'f' : swap } });
+ assertEquals(instance.exports.main(1, 2), [2, 1]);
+ instance = new WebAssembly.Instance(module, { 'imports' : { 'f' : swap_proxy } });
+ assertEquals(instance.exports.main(1, 2), [2, 1]);
+ instance = new WebAssembly.Instance(module, { 'imports' : { 'f' : generator } });
+ assertEquals(instance.exports.main(1, 2), [1, 2]);
+
+ instance = new WebAssembly.Instance(module, { 'imports' : { 'f' : drop_first } });
+ assertThrows(() => instance.exports.main(1, 2), TypeError, "multi-return length mismatch");
+ instance = new WebAssembly.Instance(module, { 'imports' : { 'f' : repeat } });
+ assertThrows(() => instance.exports.main(1, 2), TypeError, "multi-return length mismatch");
+ instance = new WebAssembly.Instance(module, { 'imports' : { 'f' : proxy_throw } });
+ assertThrows(() => instance.exports.main(1, 2), Error, "abc");
+ instance = new WebAssembly.Instance(module, { 'imports' : { 'f' : not_receiver } });
+ assertThrows(() => instance.exports.main(1, 2), TypeError, /not iterable/);
+ instance = new WebAssembly.Instance(module, { 'imports' : { 'f' : not_iterable } });
+ assertThrows(() => instance.exports.main(1, 2), TypeError, /not iterable/);
+ instance = new WebAssembly.Instance(module, { 'imports' : { 'f' : generator_throw } });
+ assertThrows(() => instance.exports.main(1, 2), Error, "def");
+})();
diff --git a/deps/v8/test/mjsunit/wasm/multiple-code-spaces.js b/deps/v8/test/mjsunit/wasm/multiple-code-spaces.js
new file mode 100644
index 0000000000..f180cf6234
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/multiple-code-spaces.js
@@ -0,0 +1,54 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --randomize-all-allocations
+// Flags: --wasm-far-jump-table --wasm-max-initial-code-space-reservation=1
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+// Instantiate bigger modules, until at least four separate code spaces have
+// been allocated.
+// Each function calls through many of the previous functions to execute the
+// jump table(s) sufficiently.
+
+let num_functions = 50;
+while (true) {
+ print(`Trying ${num_functions} functions...`);
+ if (num_functions > 1e6) {
+ throw new Error('We should have hit four code spaces by now');
+ }
+ const builder = new WasmModuleBuilder();
+ builder.addMemory(1, 1, false);
+ builder.addFunction('f0', kSig_i_i).addBody([kExprLocalGet, 0]);
+ // Generate some code per function to fill the code space.
+ // Each function contains a number of loads that will not be executed
+ // (inside an "if (i == 0)" block). They increase the code size a bit so we
+ // do not need too many functions.
+ // Each function f<n> with argument {i} then calls f<n/10> with argument
+ // {i + 1} and returns whatever that function returns.
+ const body_template = [
+ kExprLocalGet, 0, kExprI32Eqz, kExprIf, kWasmStmt, // if (i == 0)
+ kExprLocalGet, 0 // get i
+ ];
+ for (let i = 0; i < 1000; ++i) body_template.push(kExprI32LoadMem, 0, 0);
+ body_template.push(
+ kExprDrop, kExprEnd, // end if
+ kExprLocalGet, 0, kExprI32Const, 1, kExprI32Add, // i + 1
+ kExprCallFunction // call f<?>
+ );
+ for (let i = 1; i < num_functions; ++i) {
+ const body = body_template.slice();
+ body.push(...wasmSignedLeb(Math.floor(i / 10)));
+ builder.addFunction('f' + i, kSig_i_i).addBody(body);
+ }
+ builder.addExport('f', num_functions - 1);
+ const instance = builder.instantiate();
+ let expected = 17;
+ for (let i = num_functions - 1; i > 0; i = Math.floor(i / 10)) ++expected;
+ assertEquals(expected, instance.exports.f(17));
+ const num_code_spaces = %WasmNumCodeSpaces(instance);
+ print(`--> ${num_code_spaces} code spaces.`);
+ if (num_code_spaces >= 4) break;
+ num_functions *= 2;
+}
diff --git a/deps/v8/test/mjsunit/wasm/origin-trial-flags.js b/deps/v8/test/mjsunit/wasm/origin-trial-flags.js
index b9ce6f7f94..eae8ceb58c 100644
--- a/deps/v8/test/mjsunit/wasm/origin-trial-flags.js
+++ b/deps/v8/test/mjsunit/wasm/origin-trial-flags.js
@@ -13,7 +13,7 @@ function instantiateModuleWithThreads() {
builder.addMemory(2, 10, false, shared);
builder.addFunction('main', kSig_i_ii)
.addBody([
- kExprGetLocal, 0, kExprGetLocal, 1, kAtomicPrefix, kExprI32AtomicAdd, 2,
+ kExprLocalGet, 0, kExprLocalGet, 1, kAtomicPrefix, kExprI32AtomicAdd, 2,
0
])
.exportFunc();
diff --git a/deps/v8/test/mjsunit/wasm/parallel_compilation.js b/deps/v8/test/mjsunit/wasm/parallel_compilation.js
index 6eca124bc4..7a1da58e8f 100644
--- a/deps/v8/test/mjsunit/wasm/parallel_compilation.js
+++ b/deps/v8/test/mjsunit/wasm/parallel_compilation.js
@@ -50,7 +50,7 @@ function assertFunction(module, func) {
for (i = 0; i < 1000; i++) {
builder.addFunction("sub" + i, kSig_i_i)
.addBody([ // --
- kExprGetLocal, 0, // --
+ kExprLocalGet, 0, // --
kExprI32Const, i % 61, // --
kExprI32Sub]) // --
.exportFunc()
@@ -74,8 +74,8 @@ function assertFunction(module, func) {
f[0] = builder.addFunction("add0", kSig_i_ii)
.addBody([
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
kExprI32Add, // --
])
.exportFunc()
@@ -84,8 +84,8 @@ function assertFunction(module, func) {
for (i = 1; i < 256; i++) {
f[i] = builder.addFunction("add" + i, kSig_i_ii)
.addBody([ // --
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
kExprCallFunction, f[i >>> 1].index]) // --
.exportFunc()
}
diff --git a/deps/v8/test/mjsunit/wasm/params.js b/deps/v8/test/mjsunit/wasm/params.js
index 33858429c4..eed893fd57 100644
--- a/deps/v8/test/mjsunit/wasm/params.js
+++ b/deps/v8/test/mjsunit/wasm/params.js
@@ -17,7 +17,7 @@ function testSelect2(type) {
var builder = new WasmModuleBuilder();
builder.addFunction("select", makeSig_r_xx(type, type))
- .addBody([kExprGetLocal, which])
+ .addBody([kExprLocalGet, which])
.exportFunc()
var select = builder.instantiate().exports.select;
@@ -79,7 +79,7 @@ function testSelect10(t) {
var builder = new WasmModuleBuilder();
builder.addFunction("select", makeSig([t,t,t,t,t,t,t,t,t,t], [t]))
- .addBody([kExprGetLocal, which])
+ .addBody([kExprLocalGet, which])
.exportFunc();
var select = builder.instantiate().exports.select;
diff --git a/deps/v8/test/mjsunit/wasm/receiver.js b/deps/v8/test/mjsunit/wasm/receiver.js
index 10e8855927..de8954ff98 100644
--- a/deps/v8/test/mjsunit/wasm/receiver.js
+++ b/deps/v8/test/mjsunit/wasm/receiver.js
@@ -13,8 +13,8 @@ function testCallImport(func, expected, a, b) {
builder.addImport("mod", "func", sig_index);
builder.addFunction("main", sig_index)
.addBody([
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
kExprCallFunction, 0]) // --
.exportAs("main");
diff --git a/deps/v8/test/mjsunit/wasm/return-calls.js b/deps/v8/test/mjsunit/wasm/return-calls.js
index 22d2860df1..7dd56ef02f 100644
--- a/deps/v8/test/mjsunit/wasm/return-calls.js
+++ b/deps/v8/test/mjsunit/wasm/return-calls.js
@@ -18,15 +18,15 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
// f_aux(N,X) => f_aux(N-1,X*N)
let fact_aux = builder.addFunction("fact_aux",kSig_i_ii);
fact_aux.addBody([
- kExprGetLocal, 0, kExprI32Const, 1, kExprI32LeS,
+ kExprLocalGet, 0, kExprI32Const, 1, kExprI32LeS,
kExprIf, kWasmI32,
- kExprGetLocal, 1,
+ kExprLocalGet, 1,
kExprElse,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprI32Const, 1,
kExprI32Sub,
- kExprGetLocal, 0,
- kExprGetLocal, 1,
+ kExprLocalGet, 0,
+ kExprLocalGet, 1,
kExprI32Mul,
kExprReturnCall, fact_aux.index,
kExprEnd
@@ -35,7 +35,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
//main(N)=>fact_aux(N,1)
let main = builder.addFunction("main", kSig_i_i)
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprI32Const, 1,
kExprReturnCall,0
]).exportFunc();
@@ -63,18 +63,18 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
let f_ind = builder.addFunction("f_ind",kSig_i_iii).
addBody([
- kExprGetLocal, 0, kExprI32Const, 1, kExprI32LeS,
+ kExprLocalGet, 0, kExprI32Const, 1, kExprI32LeS,
kExprIf, kWasmI32,
- kExprGetLocal, 1,
+ kExprLocalGet, 1,
kExprElse,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprI32Const, 1,
kExprI32Sub,
- kExprGetLocal, 0,
- kExprGetLocal, 1,
+ kExprLocalGet, 0,
+ kExprLocalGet, 1,
kExprI32Mul,
- kExprGetLocal, 2,
- kExprGetLocal, 2,
+ kExprLocalGet, 2,
+ kExprLocalGet, 2,
kExprReturnCallIndirect, sig_i_iii, kTableZero,
kExprEnd
]);
@@ -82,7 +82,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
//main(N)=>fact_aux(N,1)
let main = builder.addFunction("main", kSig_i_i)
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprI32Const, 1,
kExprI32Const, f_ind.index,
kExprReturnCall, f_ind.index
@@ -109,9 +109,9 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
let main = builder.addFunction("main", kSig_i_iii)
.addBody([
- kExprGetLocal, 1,
- kExprGetLocal, 2,
- kExprGetLocal, 0,
+ kExprLocalGet, 1,
+ kExprLocalGet, 2,
+ kExprLocalGet, 0,
kExprReturnCall, pick
])
.exportFunc();
@@ -141,9 +141,9 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
let main = builder.addFunction("main", kSig_i_iii)
.addBody([
- kExprGetLocal, 1,
- kExprGetLocal, 2,
- kExprGetLocal, 0,
+ kExprLocalGet, 1,
+ kExprLocalGet, 2,
+ kExprLocalGet, 0,
kExprI32Const, tableIndex,
kExprReturnCallIndirect, sig_i_iii, kTableZero
])
diff --git a/deps/v8/test/mjsunit/wasm/shared-arraybuffer-worker-simple-gc.js b/deps/v8/test/mjsunit/wasm/shared-arraybuffer-worker-simple-gc.js
new file mode 100644
index 0000000000..a32e6f4d15
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/shared-arraybuffer-worker-simple-gc.js
@@ -0,0 +1,84 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-gc
+
+const kNumIterations = 10;
+
+function NewWorker() {
+ let script =
+`onmessage = (msg) => {
+ if (msg.memory) postMessage("ack");
+ if (msg.quit) postMessage("bye");
+ gc();
+}`;
+ return new Worker(script, {type: 'string'});
+}
+
+function PingWorker(worker, memory) {
+ worker.postMessage({memory: memory});
+ assertEquals("ack", worker.getMessage());
+ worker.postMessage({quit: true});
+ assertEquals("bye", worker.getMessage());
+}
+
+function AllocMemory() {
+ return new SharedArrayBuffer(1024);
+}
+
+function RunSingleWorkerSingleMemoryTest() {
+ print(arguments.callee.name);
+ let worker = NewWorker();
+ let first = AllocMemory();
+ for (let i = 0; i < kNumIterations; i++) {
+ print(`iteration ${i}`);
+ PingWorker(worker, first);
+ gc();
+ }
+ worker.terminate();
+}
+
+function RunSingleWorkerTwoMemoryTest() {
+ print(arguments.callee.name);
+ let worker = NewWorker();
+ let first = AllocMemory(), second = AllocMemory();
+ for (let i = 0; i < kNumIterations; i++) {
+ print(`iteration ${i}`);
+ PingWorker(worker, first);
+ PingWorker(worker, second);
+ gc();
+ }
+ worker.terminate();
+}
+
+function RunSingleWorkerMultipleMemoryTest() {
+ print(arguments.callee.name);
+ let worker = NewWorker();
+ let first = AllocMemory();
+ for (let i = 0; i < kNumIterations; i++) {
+ print(`iteration ${i}`);
+ PingWorker(worker, first);
+ PingWorker(worker, AllocMemory());
+ gc();
+ }
+ worker.terminate();
+}
+
+function RunMultipleWorkerMultipleMemoryTest() {
+ print(arguments.callee.name);
+ let first = AllocMemory();
+ for (let i = 0; i < kNumIterations; i++) {
+ print(`iteration ${i}`);
+ let worker = NewWorker();
+ PingWorker(worker, first);
+ PingWorker(worker, AllocMemory());
+ worker.terminate();
+ gc();
+ }
+}
+
+RunSingleWorkerSingleMemoryTest();
+RunSingleWorkerTwoMemoryTest();
+RunSingleWorkerMultipleMemoryTest();
+RunMultipleWorkerMultipleMemoryTest();
diff --git a/deps/v8/test/mjsunit/wasm/shared-memory-gc-stress.js b/deps/v8/test/mjsunit/wasm/shared-memory-gc-stress.js
index 8721d8d066..1dbbcb9ff6 100644
--- a/deps/v8/test/mjsunit/wasm/shared-memory-gc-stress.js
+++ b/deps/v8/test/mjsunit/wasm/shared-memory-gc-stress.js
@@ -10,11 +10,11 @@ function AllocMemory(pages, max = pages) {
}
function RunSomeAllocs(total, retained, pages, max = pages) {
- print(`-------iterations = ${total}, retained = $ { retained } -------`);
+ print(`-------iterations = ${total}, retained = ${retained} -------`);
var array = new Array(retained);
for (var i = 0; i < total; i++) {
if ((i % 25) == 0)
- print(`iteration $ { i }`);
+ print(`iteration ${i}`);
let pair = AllocMemory(pages, max);
// For some iterations, retain the memory, view, or both.
switch (i % 3) {
diff --git a/deps/v8/test/mjsunit/wasm/shared-memory-worker-gc.js b/deps/v8/test/mjsunit/wasm/shared-memory-worker-gc.js
index 376917b6ee..6afc6115f8 100644
--- a/deps/v8/test/mjsunit/wasm/shared-memory-worker-gc.js
+++ b/deps/v8/test/mjsunit/wasm/shared-memory-worker-gc.js
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --experimental-wasm-threads
+// Flags: --experimental-wasm-threads --expose-gc
-const kNumMessages = 5000;
+const kNumMessages = 1000;
function AllocMemory(pages = 1, max = pages) {
return new WebAssembly.Memory({initial : pages, maximum : max, shared : true});
@@ -15,6 +15,7 @@ function AllocMemory(pages = 1, max = pages) {
`onmessage =
function(msg) {
if (msg.memory) postMessage({memory : msg.memory});
+ gc();
}`, {type : 'string'});
let time = performance.now();
@@ -30,5 +31,6 @@ function AllocMemory(pages = 1, max = pages) {
if (msg.memory) {
assertInstanceof(msg.memory, WebAssembly.Memory);
}
+ gc();
}
})();
diff --git a/deps/v8/test/mjsunit/wasm/shared-memory-worker-simple-gc.js b/deps/v8/test/mjsunit/wasm/shared-memory-worker-simple-gc.js
new file mode 100644
index 0000000000..53229861cc
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/shared-memory-worker-simple-gc.js
@@ -0,0 +1,85 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-threads --expose-gc
+
+const kNumIterations = 10;
+
+function NewWorker() {
+ let script =
+`onmessage = (msg) => {
+ if (msg.memory) postMessage("ack");
+ if (msg.quit) postMessage("bye");
+ gc();
+}`;
+ return new Worker(script, {type: 'string'});
+}
+
+function PingWorker(worker, memory) {
+ worker.postMessage({memory: memory});
+ assertEquals("ack", worker.getMessage());
+ worker.postMessage({quit: true});
+ assertEquals("bye", worker.getMessage());
+}
+
+function AllocMemory() {
+ let pages = 1, max = 1;
+ return new WebAssembly.Memory({initial : pages, maximum : max, shared : true});
+}
+
+function RunSingleWorkerSingleMemoryTest() {
+ print(arguments.callee.name);
+ let worker = NewWorker();
+ let first = AllocMemory();
+ for (let i = 0; i < kNumIterations; i++) {
+ print(`iteration ${i}`);
+ PingWorker(worker, first);
+ gc();
+ }
+ worker.terminate();
+}
+
+function RunSingleWorkerTwoMemoryTest() {
+ print(arguments.callee.name);
+ let worker = NewWorker();
+ let first = AllocMemory(), second = AllocMemory();
+ for (let i = 0; i < kNumIterations; i++) {
+ print(`iteration ${i}`);
+ PingWorker(worker, first);
+ PingWorker(worker, second);
+ gc();
+ }
+ worker.terminate();
+}
+
+function RunSingleWorkerMultipleMemoryTest() {
+ print(arguments.callee.name);
+ let worker = NewWorker();
+ let first = AllocMemory();
+ for (let i = 0; i < kNumIterations; i++) {
+ print(`iteration ${i}`);
+ PingWorker(worker, first);
+ PingWorker(worker, AllocMemory());
+ gc();
+ }
+ worker.terminate();
+}
+
+function RunMultipleWorkerMultipleMemoryTest() {
+ print(arguments.callee.name);
+ let first = AllocMemory();
+ for (let i = 0; i < kNumIterations; i++) {
+ print(`iteration ${i}`);
+ let worker = NewWorker();
+ PingWorker(worker, first);
+ PingWorker(worker, AllocMemory());
+ worker.terminate();
+ gc();
+ }
+}
+
+RunSingleWorkerSingleMemoryTest();
+RunSingleWorkerTwoMemoryTest();
+RunSingleWorkerMultipleMemoryTest();
+RunMultipleWorkerMultipleMemoryTest();
diff --git a/deps/v8/test/mjsunit/wasm/shared-memory.js b/deps/v8/test/mjsunit/wasm/shared-memory.js
index 80e894b28f..696b0412ae 100644
--- a/deps/v8/test/mjsunit/wasm/shared-memory.js
+++ b/deps/v8/test/mjsunit/wasm/shared-memory.js
@@ -73,8 +73,8 @@ function assertMemoryIsValid(memory, shared) {
let builder = new WasmModuleBuilder();
builder.addFunction("main", kSig_i_ii)
.addBody([
- kExprGetLocal, 0,
- kExprGetLocal, 1,
+ kExprLocalGet, 0,
+ kExprLocalGet, 1,
kAtomicPrefix,
kExprI32AtomicAdd]);
builder.addImportedMemory("m", "imported_mem");
@@ -119,8 +119,8 @@ function assertMemoryIsValid(memory, shared) {
builder.addMemory(2, 10, false, "shared");
builder.addFunction("main", kSig_i_ii)
.addBody([
- kExprGetLocal, 0,
- kExprGetLocal, 1,
+ kExprLocalGet, 0,
+ kExprLocalGet, 1,
kAtomicPrefix,
kExprI32AtomicAdd, 2, 0])
.exportFunc();
diff --git a/deps/v8/test/mjsunit/wasm/stackwalk.js b/deps/v8/test/mjsunit/wasm/stackwalk.js
index 91951ff4c3..e1cd6522ec 100644
--- a/deps/v8/test/mjsunit/wasm/stackwalk.js
+++ b/deps/v8/test/mjsunit/wasm/stackwalk.js
@@ -13,8 +13,8 @@ function makeFFI(func) {
builder.addImport("mom", "func", sig_index);
builder.addFunction("main", sig_index)
.addBody([
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
kExprCallFunction, 0, // --
])
.exportFunc()
diff --git a/deps/v8/test/mjsunit/wasm/start-function.js b/deps/v8/test/mjsunit/wasm/start-function.js
index e17c8f1785..bda9d085c5 100644
--- a/deps/v8/test/mjsunit/wasm/start-function.js
+++ b/deps/v8/test/mjsunit/wasm/start-function.js
@@ -29,9 +29,9 @@ function assertVerifies(sig, body) {
assertVerifies(kSig_v_v, [kExprNop]);
// Arguments aren't allowed to start functions.
-assertThrows(() => {instantiate(kSig_i_i, [kExprGetLocal, 0]);});
-assertThrows(() => {instantiate(kSig_i_ii, [kExprGetLocal, 0]);});
-assertThrows(() => {instantiate(kSig_i_dd, [kExprGetLocal, 0]);});
+assertThrows(() => {instantiate(kSig_i_i, [kExprLocalGet, 0]);});
+assertThrows(() => {instantiate(kSig_i_ii, [kExprLocalGet, 0]);});
+assertThrows(() => {instantiate(kSig_i_dd, [kExprLocalGet, 0]);});
assertThrows(() => {instantiate(kSig_i_v, [kExprI32Const, 0]);});
(function testInvalidIndex() {
diff --git a/deps/v8/test/mjsunit/wasm/streaming-api.js b/deps/v8/test/mjsunit/wasm/streaming-api.js
index 3decc1a70c..01e6637b4d 100644
--- a/deps/v8/test/mjsunit/wasm/streaming-api.js
+++ b/deps/v8/test/mjsunit/wasm/streaming-api.js
@@ -10,7 +10,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
builder.addFunction("main", kSig_i_i)
- .addBody([kExprGetLocal, 0])
+ .addBody([kExprLocalGet, 0])
.exportAs("main");
let bytes = builder.toBuffer();
assertPromiseResult(WebAssembly.compileStreaming(Promise.resolve(bytes)).then(
@@ -22,7 +22,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
builder.addFunction("main", kSig_i_i)
- .addBody([kExprGetLocal, 0])
+ .addBody([kExprLocalGet, 0])
.exportAs("main");
let bytes = builder.toBuffer();
assertPromiseResult(WebAssembly.instantiateStreaming(Promise.resolve(bytes)).then(
@@ -47,8 +47,8 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
builder.addFunction("main", kSig_i_i)
- .addBody([kExprGetLocal, 0,
- kExprGetLocal, 0,
+ .addBody([kExprLocalGet, 0,
+ kExprLocalGet, 0,
kExprF32Mul])
.exportAs("main");
let bytes = builder.toBuffer();
diff --git a/deps/v8/test/mjsunit/wasm/streaming-error-position.js b/deps/v8/test/mjsunit/wasm/streaming-error-position.js
index b6d008cd13..77c1b46e85 100644
--- a/deps/v8/test/mjsunit/wasm/streaming-error-position.js
+++ b/deps/v8/test/mjsunit/wasm/streaming-error-position.js
@@ -303,11 +303,11 @@ function testErrorPosition(bytes, pos, test_name) {
1, // functions count
4, // body size
0, // locals count
- kExprGetLocal, 0, // Access a non-existing local
+ kExprLocalGet, 0, // Access a non-existing local
kExprEnd // --
]);
- // Find error at the index of kExprGetLocal.
+ // Find error at the index of kExprLocalGet.
let pos = bytes.length - 1 - 1;
testErrorPosition(bytes, pos, 'testInvalidCode');
})();
@@ -334,7 +334,7 @@ function testErrorPosition(bytes, pos, test_name) {
0, // section length (too big)
]);
- // Find error at the index of kExprGetLocal.
+ // Find error at the index of kExprLocalGet.
let pos = bytes.length - 1;
testErrorPosition(bytes, pos, 'testCodeSectionSizeZero');
})();
diff --git a/deps/v8/test/mjsunit/wasm/table-access.js b/deps/v8/test/mjsunit/wasm/table-access.js
index b91934d949..3e718cf06c 100644
--- a/deps/v8/test/mjsunit/wasm/table-access.js
+++ b/deps/v8/test/mjsunit/wasm/table-access.js
@@ -11,14 +11,14 @@ function addTableWithAccessors(builder, type, size, name) {
const table = builder.addTable(type, size);
const set_sig = makeSig([kWasmI32, type], []);
builder.addFunction('set_' + name, set_sig)
- .addBody([kExprGetLocal, 0,
- kExprGetLocal, 1,
+ .addBody([kExprLocalGet, 0,
+ kExprLocalGet, 1,
kExprTableSet, table.index])
.exportFunc();
const get_sig = makeSig([kWasmI32], [type]);
builder.addFunction('get_' + name, get_sig)
- .addBody([kExprGetLocal, 0, kExprTableGet, table.index])
+ .addBody([kExprLocalGet, 0, kExprTableGet, table.index])
.exportFunc();
}
@@ -109,10 +109,10 @@ const dummy_func = exports.set_table_func1;
const f2 = builder.addFunction('f', kSig_i_v).addBody([kExprI32Const, value2]);
const f3 = builder.addFunction('f', kSig_i_v).addBody([kExprI32Const, value3]);
builder.addFunction('get_t1', kSig_a_i)
- .addBody([kExprGetLocal, 0, kExprTableGet, t1])
+ .addBody([kExprLocalGet, 0, kExprTableGet, t1])
.exportFunc();
builder.addFunction('get_t2', kSig_a_i)
- .addBody([kExprGetLocal, 0, kExprTableGet, t2])
+ .addBody([kExprLocalGet, 0, kExprTableGet, t2])
.exportFunc();
const offset1 = 3;
diff --git a/deps/v8/test/mjsunit/wasm/table-copy-anyref.js b/deps/v8/test/mjsunit/wasm/table-copy-anyref.js
index d5cddb3ed6..8b2546a594 100644
--- a/deps/v8/test/mjsunit/wasm/table-copy-anyref.js
+++ b/deps/v8/test/mjsunit/wasm/table-copy-anyref.js
@@ -17,7 +17,7 @@ builder.addTable(kWasmAnyFunc, 1000);
builder.addFunction('copy', kSig_v_iii)
.addBody([
- kExprGetLocal, 0, kExprGetLocal, 1, kExprGetLocal, 2, kNumericPrefix,
+ kExprLocalGet, 0, kExprLocalGet, 1, kExprLocalGet, 2, kNumericPrefix,
kExprTableCopy, kTableZero, kTableZero
])
.exportFunc();
diff --git a/deps/v8/test/mjsunit/wasm/table-copy.js b/deps/v8/test/mjsunit/wasm/table-copy.js
index db0dc83191..ead06f4f23 100644
--- a/deps/v8/test/mjsunit/wasm/table-copy.js
+++ b/deps/v8/test/mjsunit/wasm/table-copy.js
@@ -38,7 +38,7 @@ function assertCall(call, ...elems) {
for (let i = 0; i < kTableSize; i++) {
let f = builder.addFunction("", kSig_i_v)
.addBody([
- kExprGetGlobal, g,
+ kExprGlobalGet, g,
...wasmI32Const(i),
kExprI32Add
]);
@@ -47,15 +47,15 @@ function assertCall(call, ...elems) {
builder.addFunction("copy", sig_v_iii)
.addBody([
- kExprGetLocal, 0,
- kExprGetLocal, 1,
- kExprGetLocal, 2,
+ kExprLocalGet, 0,
+ kExprLocalGet, 1,
+ kExprLocalGet, 2,
kNumericPrefix, kExprTableCopy, kTableZero, kTableZero])
.exportAs("copy");
builder.addFunction("call", sig_i_i)
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprCallIndirect, sig_i_v, kTableZero])
.exportAs("call");
diff --git a/deps/v8/test/mjsunit/wasm/table-fill.js b/deps/v8/test/mjsunit/wasm/table-fill.js
index ed5938f908..64c4d7732d 100644
--- a/deps/v8/test/mjsunit/wasm/table-fill.js
+++ b/deps/v8/test/mjsunit/wasm/table-fill.js
@@ -32,13 +32,13 @@ const internal_func = builder.addTable(kWasmAnyFunc, size, maximum).index;
for (index of [import_ref, internal_ref]) {
builder.addFunction(`fill${index}`, kSig_v_iri)
.addBody([
- kExprGetLocal, 0, kExprGetLocal, 1, kExprGetLocal, 2, kNumericPrefix,
+ kExprLocalGet, 0, kExprLocalGet, 1, kExprLocalGet, 2, kNumericPrefix,
kExprTableFill, index
])
.exportFunc();
builder.addFunction(`get${index}`, kSig_r_i)
- .addBody([kExprGetLocal, 0, kExprTableGet, index])
+ .addBody([kExprLocalGet, 0, kExprTableGet, index])
.exportFunc();
}
@@ -47,13 +47,13 @@ const sig_index = builder.addType(kSig_i_v);
for (index of [import_func, internal_func]) {
builder.addFunction(`fill${index}`, kSig_v_iai)
.addBody([
- kExprGetLocal, 0, kExprGetLocal, 1, kExprGetLocal, 2, kNumericPrefix,
+ kExprLocalGet, 0, kExprLocalGet, 1, kExprLocalGet, 2, kNumericPrefix,
kExprTableFill, index
])
.exportFunc();
builder.addFunction(`call${index}`, kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprCallIndirect, sig_index, index])
+ .addBody([kExprLocalGet, 0, kExprCallIndirect, sig_index, index])
.exportFunc();
}
diff --git a/deps/v8/test/mjsunit/wasm/table-grow-from-wasm.js b/deps/v8/test/mjsunit/wasm/table-grow-from-wasm.js
index 5b37af32c2..8018407348 100644
--- a/deps/v8/test/mjsunit/wasm/table-grow-from-wasm.js
+++ b/deps/v8/test/mjsunit/wasm/table-grow-from-wasm.js
@@ -28,8 +28,8 @@ function testGrowInternalAnyRefTable(table_index) {
builder.addTable(kWasmAnyRef, initial_size).index;
}
builder.addFunction('grow', kSig_i_ri)
- .addBody([kExprGetLocal, 0,
- kExprGetLocal, 1,
+ .addBody([kExprLocalGet, 0,
+ kExprLocalGet, 1,
kNumericPrefix, kExprTableGrow, table_index])
.exportFunc();
@@ -38,7 +38,7 @@ function testGrowInternalAnyRefTable(table_index) {
.exportFunc();
builder.addFunction('get', kSig_r_i)
- .addBody([kExprGetLocal, 0, kExprTableGet, table_index])
+ .addBody([kExprLocalGet, 0, kExprTableGet, table_index])
.exportFunc();
const instance = builder.instantiate();
@@ -75,8 +75,8 @@ function testGrowInternalAnyFuncTable(table_index) {
builder.addTable(kWasmAnyFunc, size).index;
}
builder.addFunction('grow', kSig_i_ai)
- .addBody([kExprGetLocal, 0,
- kExprGetLocal, 1,
+ .addBody([kExprLocalGet, 0,
+ kExprLocalGet, 1,
kNumericPrefix, kExprTableGrow, table_index])
.exportFunc();
@@ -86,7 +86,7 @@ function testGrowInternalAnyFuncTable(table_index) {
const sig_index = builder.addType(kSig_i_v);
builder.addFunction('call', kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprCallIndirect, sig_index, table_index])
+ .addBody([kExprLocalGet, 0, kExprCallIndirect, sig_index, table_index])
.exportFunc();
const instance = builder.instantiate();
@@ -118,8 +118,8 @@ testGrowInternalAnyFuncTable(9);
const builder = new WasmModuleBuilder();
const table_index = builder.addImportedTable("imp", "table", size, undefined, kWasmAnyRef);
builder.addFunction('grow', kSig_i_ri)
- .addBody([kExprGetLocal, 0,
- kExprGetLocal, 1,
+ .addBody([kExprLocalGet, 0,
+ kExprLocalGet, 1,
kNumericPrefix, kExprTableGrow, table_index])
.exportFunc();
@@ -164,26 +164,26 @@ testGrowInternalAnyFuncTable(9);
const internal_func = builder.addTable(kWasmAnyFunc, initial, maximum).index;
builder.addFunction('grow_imported_ref', kSig_i_ri)
- .addBody([kExprGetLocal, 0,
- kExprGetLocal, 1,
+ .addBody([kExprLocalGet, 0,
+ kExprLocalGet, 1,
kNumericPrefix, kExprTableGrow, import_ref])
.exportFunc();
builder.addFunction('grow_imported_func', kSig_i_ai)
- .addBody([kExprGetLocal, 0,
- kExprGetLocal, 1,
+ .addBody([kExprLocalGet, 0,
+ kExprLocalGet, 1,
kNumericPrefix, kExprTableGrow, import_func])
.exportFunc();
builder.addFunction('grow_internal_ref', kSig_i_ri)
- .addBody([kExprGetLocal, 0,
- kExprGetLocal, 1,
+ .addBody([kExprLocalGet, 0,
+ kExprLocalGet, 1,
kNumericPrefix, kExprTableGrow, internal_ref])
.exportFunc();
builder.addFunction('grow_internal_func', kSig_i_ai)
- .addBody([kExprGetLocal, 0,
- kExprGetLocal, 1,
+ .addBody([kExprLocalGet, 0,
+ kExprLocalGet, 1,
kNumericPrefix, kExprTableGrow, internal_func])
.exportFunc();
diff --git a/deps/v8/test/mjsunit/wasm/table-grow.js b/deps/v8/test/mjsunit/wasm/table-grow.js
index a8508b4bdd..d2b7970bfa 100644
--- a/deps/v8/test/mjsunit/wasm/table-grow.js
+++ b/deps/v8/test/mjsunit/wasm/table-grow.js
@@ -10,20 +10,20 @@ function addFunctions(builder) {
let sig_index = builder.addType(kSig_i_ii);
let mul = builder.addFunction("mul", sig_index)
.addBody([
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
kExprI32Mul // --
]);
let add = builder.addFunction("add", sig_index)
.addBody([
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
kExprI32Add // --
]);
let sub = builder.addFunction("sub", sig_index)
.addBody([
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
kExprI32Sub // --
]);
return {mul: mul, add: add, sub: sub};
@@ -45,7 +45,7 @@ function addMain(builder) {
builder.addFunction("main", kSig_i_i)
.addBody([
kExprI32Const, 0,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprCallIndirect, 0, kTableZero])
.exportAs("main");
}
@@ -53,7 +53,7 @@ function addMain(builder) {
let id = (() => { // identity exported function
let builder = new WasmModuleBuilder();
builder.addFunction("id", kSig_i_i)
- .addBody([kExprGetLocal, 0])
+ .addBody([kExprLocalGet, 0])
.exportAs("id");
let module = new WebAssembly.Module(builder.toBuffer());
return (new WebAssembly.Instance(builder.toModule())).exports.id;
@@ -125,8 +125,8 @@ let id = (() => { // identity exported function
builder.addFunction("main", kSig_i_ii)
.addBody([
kExprI32Const, 15, // --
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
kExprCallIndirect, 0, kTableZero]) // --
.exportAs("main");
@@ -254,14 +254,14 @@ let id = (() => { // identity exported function
builder.addImportedTable("x", "table", 1, kMaxTableSize);
builder.addFunction("add", index_i_ii)
.addBody([
- kExprGetLocal, 0,
- kExprGetLocal, 1,
+ kExprLocalGet, 0,
+ kExprLocalGet, 1,
kExprI32Add]);
builder.addFunction("main", index_i_i)
.addBody([
kExprI32Const, 5,
kExprI32Const, 5,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprCallIndirect, index_i_ii, kTableZero])
.exportAs("main");
builder.addElementSegment(0, 0, false, [0]);
diff --git a/deps/v8/test/mjsunit/wasm/test-wasm-module-builder.js b/deps/v8/test/mjsunit/wasm/test-wasm-module-builder.js
index 96d3a0bac5..e43eaf7258 100644
--- a/deps/v8/test/mjsunit/wasm/test-wasm-module-builder.js
+++ b/deps/v8/test/mjsunit/wasm/test-wasm-module-builder.js
@@ -44,7 +44,7 @@ function instantiate(buffer, ffi) {
let builder = new WasmModuleBuilder();
builder.addFunction(undefined, kSig_i_i)
.addLocals({i32_count: 1})
- .addBody([kExprGetLocal, 0, kExprSetLocal, 1, kExprGetLocal, 1])
+ .addBody([kExprLocalGet, 0, kExprLocalSet, 1, kExprLocalGet, 1])
.exportAs('main');
var buffer = builder.toBuffer(debug);
@@ -67,7 +67,7 @@ function instantiate(buffer, ffi) {
let builder = new WasmModuleBuilder();
builder.addFunction(undefined, makeSig_r_x(p.type, p.type))
.addLocals(p.locals)
- .addBody([kExprGetLocal, 0, kExprSetLocal, 1, kExprGetLocal, 1])
+ .addBody([kExprLocalGet, 0, kExprLocalSet, 1, kExprLocalGet, 1])
.exportAs('main');
var buffer = builder.toBuffer(debug);
@@ -81,10 +81,10 @@ function instantiate(buffer, ffi) {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
builder.addFunction('add', kSig_i_ii).addBody([
- kExprGetLocal, 0, kExprGetLocal, 1, kExprI32Add
+ kExprLocalGet, 0, kExprLocalGet, 1, kExprI32Add
]);
builder.addFunction('main', kSig_i_ii)
- .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprCallFunction, 0])
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1, kExprCallFunction, 0])
.exportAs('main');
var instance = builder.instantiate();
@@ -96,11 +96,11 @@ function instantiate(buffer, ffi) {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
builder.addFunction('add', kSig_i_ii).addBody([
- kExprGetLocal, 0, kExprGetLocal, 1, kExprI32Add
+ kExprLocalGet, 0, kExprLocalGet, 1, kExprI32Add
]);
builder.addFunction('main', kSig_i_iii)
.addBody([
- kExprGetLocal, 1, kExprGetLocal, 2, kExprGetLocal, 0, kExprCallIndirect,
+ kExprLocalGet, 1, kExprLocalGet, 2, kExprLocalGet, 0, kExprCallIndirect,
0, kTableZero
])
.exportAs('main');
@@ -117,7 +117,7 @@ function instantiate(buffer, ffi) {
let builder = new WasmModuleBuilder();
builder.addMemory(1, 1, false);
builder.addFunction('load', kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprI32LoadMem, 0, 0])
+ .addBody([kExprLocalGet, 0, kExprI32LoadMem, 0, 0])
.exportAs('load');
builder.addDataSegment(0, [9, 9, 9, 9]);
diff --git a/deps/v8/test/mjsunit/wasm/trap-location.js b/deps/v8/test/mjsunit/wasm/trap-location.js
index d893f97d62..91cb0d0721 100644
--- a/deps/v8/test/mjsunit/wasm/trap-location.js
+++ b/deps/v8/test/mjsunit/wasm/trap-location.js
@@ -54,27 +54,27 @@ builder.addFunction("main", kSig_i_i)
.addBody([
// offset 1
kExprBlock, kWasmI32,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprI32Const, 2,
kExprI32LtU,
kExprIf, kWasmStmt,
// offset 9
kExprI32Const, 0x7e /* -2 */,
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprI32DivU,
// offset 15
kExprI32LoadMem, 0, 0,
kExprBr, 1,
kExprEnd,
// offset 21
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprI32Const, 2,
kExprI32Eq,
kExprIf, kWasmStmt,
kExprUnreachable,
kExprEnd,
// offset 30
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprCallIndirect, sig_index, kTableZero,
kExprEnd,
])
diff --git a/deps/v8/test/mjsunit/wasm/type-reflection-with-anyref.js b/deps/v8/test/mjsunit/wasm/type-reflection-with-anyref.js
index b7a7ee7969..863a59aaa4 100644
--- a/deps/v8/test/mjsunit/wasm/type-reflection-with-anyref.js
+++ b/deps/v8/test/mjsunit/wasm/type-reflection-with-anyref.js
@@ -57,13 +57,13 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
table.set(0, f1);
builder.addFunction('call0', kSig_i_i)
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprCallIndirect, sig_index, table_index0
])
.exportFunc();
builder.addFunction('call1', kSig_i_i)
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprCallIndirect, sig_index, table_index1
])
.exportFunc();
diff --git a/deps/v8/test/mjsunit/wasm/type-reflection-with-mv.js b/deps/v8/test/mjsunit/wasm/type-reflection-with-mv.js
new file mode 100644
index 0000000000..0a7e98492f
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/type-reflection-with-mv.js
@@ -0,0 +1,80 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-type-reflection --expose-gc --experimental-wasm-mv
+
+(function TestFunctionConstructedCoercions() {
+ let obj1 = { valueOf: _ => 123.45 };
+ let obj2 = { toString: _ => "456" };
+ let gcer = { valueOf: _ => gc() };
+ let testcases = [
+ { params: { sig: [],
+ val: [],
+ exp: [], },
+ result: { sig: ["i32", "f32"],
+ val: [42.7, "xyz"],
+ exp: [42, NaN] },
+ },
+ { params: { sig: [],
+ val: [],
+ exp: [], },
+ result: { sig: ["i32", "f32", "f64"],
+ val: (function* () { yield obj1; yield obj2; yield "789" })(),
+ exp: [123, 456, 789], },
+ },
+ { params: { sig: [],
+ val: [],
+ exp: [], },
+ result: { sig: ["i32", "f32", "f64"],
+ val: new Proxy([gcer, {}, "xyz"], {
+ get: function(obj, prop) { return Reflect.get(obj, prop); }
+ }),
+ exp: [0, NaN, NaN], },
+ },
+ ];
+ testcases.forEach(function({params, result}) {
+ let p = params.sig; let r = result.sig; var params_after;
+ function testFun() { params_after = arguments; return result.val; }
+ let fun = new WebAssembly.Function({parameters:p, results:r}, testFun);
+ let result_after = fun.apply(undefined, params.val);
+ assertArrayEquals(params.exp, params_after);
+ assertEquals(result.exp, result_after);
+ });
+})();
+
+(function TestFunctionConstructedCoercionsThrow() {
+ let proxy_throw = new Proxy([1, 2], {
+ get: function(obj, prop) {
+ if (prop == 1) {
+ throw new Error("abc");
+ }
+ return Reflect.get(obj, prop); },
+ });
+ function* generator_throw() {
+ yield 1;
+ throw new Error("def");
+ }
+ let testcases = [
+ { val: 0,
+ error: Error,
+ msg: /not iterable/ },
+ { val: [1],
+ error: TypeError,
+ msg: /multi-return length mismatch/ },
+ { val: [1, 2, 3],
+ error: TypeError,
+ msg: /multi-return length mismatch/ },
+ { val: proxy_throw,
+ error: Error,
+ msg: /abc/ },
+ { val: generator_throw(),
+ error: Error,
+ msg: /def/ },
+ ];
+ testcases.forEach(function({val, error, msg}) {
+ fun = new WebAssembly.Function({parameters:[], results:["i32", "i32"]},
+ () => val);
+ assertThrows(fun, error, msg);
+ })
+})();
diff --git a/deps/v8/test/mjsunit/wasm/type-reflection.js b/deps/v8/test/mjsunit/wasm/type-reflection.js
index a9a0b87143..bac877d187 100644
--- a/deps/v8/test/mjsunit/wasm/type-reflection.js
+++ b/deps/v8/test/mjsunit/wasm/type-reflection.js
@@ -533,7 +533,7 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
table.set(0, fun1);
builder.addFunction('main', kSig_i_i)
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprCallIndirect, sig_index, table_index
])
.exportFunc();
@@ -554,7 +554,7 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
table.set(0, fun);
builder.addFunction('main', kSig_v_i)
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprCallIndirect, sig_index, table_index,
kExprDrop
])
diff --git a/deps/v8/test/mjsunit/wasm/unicode.js b/deps/v8/test/mjsunit/wasm/unicode.js
index 2b5f5ce9fe..73dc0608c6 100644
--- a/deps/v8/test/mjsunit/wasm/unicode.js
+++ b/deps/v8/test/mjsunit/wasm/unicode.js
@@ -9,7 +9,7 @@ function checkImport(
var builder = new WasmModuleBuilder();
builder.addImport(imported_module_name, imported_function_name, kSig_i_i);
builder.addFunction('call_imp', kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprCallFunction, 0])
+ .addBody([kExprLocalGet, 0, kExprCallFunction, 0])
.exportFunc();
let imp = i => i + 3;
@@ -29,10 +29,10 @@ function checkExports(
exported_name_add) {
var builder = new WasmModuleBuilder();
builder.addFunction(internal_name_mul, kSig_i_ii)
- .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprI32Mul])
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1, kExprI32Mul])
.exportAs(exported_name_mul);
builder.addFunction(internal_name_add, kSig_i_ii)
- .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprI32Add])
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1, kExprI32Add])
.exportAs(exported_name_add);
let instance = builder.instantiate();
diff --git a/deps/v8/test/mjsunit/wasm/user-properties-exported.js b/deps/v8/test/mjsunit/wasm/user-properties-exported.js
index 80f2077f3c..0b2f249e05 100644
--- a/deps/v8/test/mjsunit/wasm/user-properties-exported.js
+++ b/deps/v8/test/mjsunit/wasm/user-properties-exported.js
@@ -13,7 +13,7 @@ load("test/mjsunit/wasm/user-properties-common.js");
var builder = new WasmModuleBuilder();
builder.addFunction("exp", kSig_i_i)
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprCallFunction, 0])
.exportAs("exp");
let module1 = builder.toModule();
diff --git a/deps/v8/test/mjsunit/wasm/user-properties-module.js b/deps/v8/test/mjsunit/wasm/user-properties-module.js
index 69a1f898d7..84727e1b53 100644
--- a/deps/v8/test/mjsunit/wasm/user-properties-module.js
+++ b/deps/v8/test/mjsunit/wasm/user-properties-module.js
@@ -14,7 +14,7 @@ load("test/mjsunit/wasm/user-properties-common.js");
builder.addImport("m", "f", kSig_i_i);
builder.addFunction("main", kSig_i_i)
.addBody([
- kExprGetLocal, 0,
+ kExprLocalGet, 0,
kExprCallFunction, 0])
.exportAs("main");
builder.addMemory(1, 1, false)
diff --git a/deps/v8/test/mjsunit/wasm/wasm-math-intrinsic.js b/deps/v8/test/mjsunit/wasm/wasm-math-intrinsic.js
index 3b1a333c7f..3d9512cf85 100644
--- a/deps/v8/test/mjsunit/wasm/wasm-math-intrinsic.js
+++ b/deps/v8/test/mjsunit/wasm/wasm-math-intrinsic.js
@@ -140,7 +140,7 @@ function genUnop(name, sig) {
builder.addImport('Math', name, sig_index);
builder.addFunction('main', sig_index)
.addBody([
- kExprGetLocal, 0, // --
+ kExprLocalGet, 0, // --
kExprCallFunction, 0
]) // --
.exportAs('main');
@@ -155,8 +155,8 @@ function genBinop(name, sig) {
builder.addImport('Math', name, sig_index);
builder.addFunction('main', sig_index)
.addBody([
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
kExprCallFunction, 0
]) // --
.exportAs('main');
diff --git a/deps/v8/test/mjsunit/wasm/wasm-module-builder.js b/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
index 45af969d09..b4d7cae41b 100644
--- a/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
+++ b/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
@@ -84,6 +84,7 @@ let kSharedHasMaximumFlag = 3;
let kActiveNoIndex = 0;
let kPassive = 1;
let kActiveWithIndex = 2;
+let kPassiveWithElements = 5;
// Function declaration flags
let kDeclFunctionName = 0x01;
@@ -209,11 +210,11 @@ let kExprReturnCall = 0x12;
let kExprReturnCallIndirect = 0x13;
let kExprDrop = 0x1a;
let kExprSelect = 0x1b;
-let kExprGetLocal = 0x20;
-let kExprSetLocal = 0x21;
-let kExprTeeLocal = 0x22;
-let kExprGetGlobal = 0x23;
-let kExprSetGlobal = 0x24;
+let kExprLocalGet = 0x20;
+let kExprLocalSet = 0x21;
+let kExprLocalTee = 0x22;
+let kExprGlobalGet = 0x23;
+let kExprGlobalSet = 0x24;
let kExprTableGet = 0x25;
let kExprTableSet = 0x26;
let kExprI32LoadMem = 0x28;
@@ -464,6 +465,9 @@ let kExprI64AtomicCompareExchange16U = 0x4d;
let kExprI64AtomicCompareExchange32U = 0x4e;
// Simd opcodes.
+let kExprS128LoadMem = 0x00;
+let kExprS128StoreMem = 0x01;
+let kExprI32x4Splat = 0x0c;
let kExprF32x4Min = 0x9e;
// Compilation hint constants.
@@ -1093,7 +1097,7 @@ class WasmModuleBuilder {
}
} else {
// Emit a global-index initializer.
- section.emit_u8(kExprGetGlobal);
+ section.emit_u8(kExprGlobalGet);
section.emit_u32v(global.init_index);
}
section.emit_u8(kExprEnd); // end of init expression
@@ -1158,19 +1162,22 @@ class WasmModuleBuilder {
section.emit_u32v(init.table);
}
if (init.is_global) {
- section.emit_u8(kExprGetGlobal);
+ section.emit_u8(kExprGlobalGet);
} else {
section.emit_u8(kExprI32Const);
}
section.emit_u32v(init.base);
section.emit_u8(kExprEnd);
+ if (init.table != 0) {
+ section.emit_u8(kExternalFunction);
+ }
section.emit_u32v(init.array.length);
for (let index of init.array) {
section.emit_u32v(index);
}
} else {
// Passive segment.
- section.emit_u8(kPassive); // flags
+ section.emit_u8(kPassiveWithElements); // flags
section.emit_u8(kWasmAnyFunc);
section.emit_u32v(init.array.length);
for (let index of init.array) {
@@ -1290,7 +1297,7 @@ class WasmModuleBuilder {
section.emit_u8(0); // linear memory index 0 / flags
if (seg.is_global) {
// initializer is a global variable
- section.emit_u8(kExprGetGlobal);
+ section.emit_u8(kExprGlobalGet);
section.emit_u32v(seg.addr);
} else {
// initializer is a constant
diff --git a/deps/v8/test/mjsunit/wasm/worker-interpreter.js b/deps/v8/test/mjsunit/wasm/worker-interpreter.js
index ccf6d279a0..9a7ab60756 100644
--- a/deps/v8/test/mjsunit/wasm/worker-interpreter.js
+++ b/deps/v8/test/mjsunit/wasm/worker-interpreter.js
@@ -9,7 +9,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
(function TestPostInterpretedModule() {
let builder = new WasmModuleBuilder();
let add = builder.addFunction("add", kSig_i_ii)
- .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprI32Add])
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1, kExprI32Add])
.exportFunc();
let module = builder.toModule();
diff --git a/deps/v8/test/mjsunit/wasm/worker-module.js b/deps/v8/test/mjsunit/wasm/worker-module.js
index f626263b25..76d84daaba 100644
--- a/deps/v8/test/mjsunit/wasm/worker-module.js
+++ b/deps/v8/test/mjsunit/wasm/worker-module.js
@@ -9,7 +9,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
(function TestPostModule() {
let builder = new WasmModuleBuilder();
builder.addFunction("add", kSig_i_ii)
- .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprI32Add])
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1, kExprI32Add])
.exportFunc();
let module = builder.toModule();
diff --git a/deps/v8/test/mkgrokdump/mkgrokdump.cc b/deps/v8/test/mkgrokdump/mkgrokdump.cc
index 8c07576d3a..5f39a063e4 100644
--- a/deps/v8/test/mkgrokdump/mkgrokdump.cc
+++ b/deps/v8/test/mkgrokdump/mkgrokdump.cc
@@ -26,10 +26,9 @@ static const char* kHeader =
"\n"
"# List of known V8 instance types.\n";
-// Non-snapshot builds allocate objects to different places.
// Debug builds emit debug code, affecting code object sizes.
// Embedded builtins cause objects to be allocated in different locations.
-#if defined(V8_EMBEDDED_BUILTINS) && defined(V8_USE_SNAPSHOT) && !defined(DEBUG)
+#if defined(V8_EMBEDDED_BUILTINS) && !defined(DEBUG)
static const char* kBuild = "shipping";
#else
static const char* kBuild = "non-shipping";
@@ -97,6 +96,14 @@ static void DumpKnownObject(FILE* out, i::Heap* heap, const char* space_name,
#undef RO_ROOT_LIST_CASE
}
+static void DumpSpaceFirstPageAddress(FILE* out, i::PagedSpace* space) {
+ const char* name = space->name();
+ i::Address first_page = reinterpret_cast<i::Address>(space->first_page());
+ i::Tagged_t compressed = i::CompressTagged(first_page);
+ uintptr_t unsigned_compressed = static_cast<uint32_t>(compressed);
+ i::PrintF(out, " 0x%08" V8PRIxPTR ": \"%s\",\n", unsigned_compressed, name);
+}
+
static int DumpHeapConstants(FILE* out, const char* argv0) {
// Start up V8.
std::unique_ptr<v8::Platform> platform = v8::platform::NewDefaultPlatform();
@@ -164,6 +171,29 @@ static int DumpHeapConstants(FILE* out, const char* argv0) {
i::PrintF(out, "}\n");
}
+ if (COMPRESS_POINTERS_BOOL) {
+ // Dump a list of addresses for the first page of each space that contains
+ // objects in the other tables above. This is only useful if two
+ // assumptions hold:
+ // 1. Those pages are positioned deterministically within the heap
+ // reservation block during snapshot deserialization.
+ // 2. Those pages cannot ever be moved (such as by compaction).
+ i::PrintF(out,
+ "\n# Lower 32 bits of first page addresses for various heap "
+ "spaces.\n");
+ i::PrintF(out, "HEAP_FIRST_PAGES = {\n");
+ i::PagedSpaceIterator it(heap);
+ for (i::PagedSpace* s = it.Next(); s != nullptr; s = it.Next()) {
+ // Code page is different on Windows vs Linux (bug v8:9844), so skip it.
+ if (s->identity() == i::CODE_SPACE) {
+ continue;
+ }
+ DumpSpaceFirstPageAddress(out, s);
+ }
+ DumpSpaceFirstPageAddress(out, read_only_heap->read_only_space());
+ i::PrintF(out, "}\n");
+ }
+
// Dump frame markers
i::PrintF(out, "\n# List of known V8 Frame Markers.\n");
#define DUMP_MARKER(T, class) i::PrintF(out, " \"%s\",\n", #T);
diff --git a/deps/v8/test/mozilla/mozilla.status b/deps/v8/test/mozilla/mozilla.status
index 5a1c89ac9e..78630381f9 100644
--- a/deps/v8/test/mozilla/mozilla.status
+++ b/deps/v8/test/mozilla/mozilla.status
@@ -1011,6 +1011,8 @@
#BUG(3837): Crashes due to C stack overflow.
'js1_5/extensions/regress-355497': [SKIP],
+ # Slow test
+ 'js1_5/Regress/regress-80981': [PASS, SLOW],
}], # 'arch == arm and simulator_run'
['arch == arm64 and simulator_run', {
@@ -1035,6 +1037,9 @@
#BUG(3152): Avoid C stack overflow.
'js1_5/extensions/regress-355497': [FAIL_OK, '--sim-stack-size=512'],
+
+ # Slow without pointer compression
+ 'js1_5/Regress/regress-80981': [PASS, ['not pointer_compression', SLOW]],
}], # 'arch == arm64 and simulator_run'
['system == android', {
diff --git a/deps/v8/test/test262/OWNERS b/deps/v8/test/test262/OWNERS
index 246672d0ff..56c71e23b1 100644
--- a/deps/v8/test/test262/OWNERS
+++ b/deps/v8/test/test262/OWNERS
@@ -1,2 +1,3 @@
adamk@chromium.org
gsathya@chromium.org
+syg@chromium.org
diff --git a/deps/v8/test/test262/test262.status b/deps/v8/test/test262/test262.status
index 7ccb304a0b..3f82c49d1e 100644
--- a/deps/v8/test/test262/test262.status
+++ b/deps/v8/test/test262/test262.status
@@ -65,7 +65,13 @@
# Intl tests which require flags.
# https://bugs.chromium.org/p/v8/issues/detail?id=9154
'intl402/NumberFormat/numbering-system-options': ['--harmony-intl-add-calendar-numbering-system'],
+ 'intl402/DateTimeFormat/constructor-calendar-numberingSystem-order': ['--harmony-intl-add-calendar-numbering-system'],
'intl402/DateTimeFormat/numbering-system-calendar-options': ['--harmony-intl-add-calendar-numbering-system'],
+ 'intl402/DateTimeFormat/constructor-options-throwing-getters': ['--harmony-intl-add-calendar-numbering-system'],
+ 'intl402/NumberFormat/constructor-options-throwing-getters': ['--harmony-intl-add-calendar-numbering-system'],
+ 'intl402/NumberFormat/constructor-numberingSystem-order': ['--harmony-intl-add-calendar-numbering-system'],
+ 'intl402/DateTimeFormat/prototype/formatToParts/pattern-on-calendar': ['--harmony-intl-other-calendars'],
+ 'intl402/DateTimeFormat/prototype/formatToParts/related-year': ['--harmony-intl-other-calendars'],
# https://bugs.chromium.org/p/v8/issues/detail?id=9084
'intl402/supportedLocalesOf-consistent-with-resolvedOptions': [FAIL],
@@ -457,9 +463,6 @@
'language/expressions/async-generator/generator-created-after-decl-inst': [FAIL],
'language/statements/async-generator/generator-created-after-decl-inst': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=9611
- 'language/statements/class/elements/private-field-is-visible-in-computed-properties': [SKIP],
-
# https://github.com/tc39/test262/issues/2034
'language/expressions/postfix-decrement/arguments': [SKIP],
'language/expressions/postfix-decrement/arguments-nostrict': [SKIP],
@@ -484,23 +487,14 @@
# https://bugs.chromium.org/p/v8/issues/detail?id=9049
'language/comments/hashbang/use-strict': [SKIP],
- # https://bugs.chromium.org/p/v8/issues/detail?id=9229
- 'language/expressions/class/elements/syntax/early-errors/grammar-private-environment-on-class-heritage': [FAIL],
- 'language/expressions/class/elements/syntax/early-errors/grammar-private-environment-on-class-heritage-chained-usage': [FAIL],
- 'language/expressions/class/elements/syntax/early-errors/grammar-private-environment-on-class-heritage-function-expression': [FAIL],
- 'language/expressions/class/elements/syntax/early-errors/grammar-private-environment-on-class-heritage-recursive': [FAIL],
- 'language/statements/class/elements/syntax/early-errors/grammar-private-environment-on-class-heritage': [FAIL],
- 'language/statements/class/elements/syntax/early-errors/grammar-private-environment-on-class-heritage-chained-usage': [FAIL],
- 'language/statements/class/elements/syntax/early-errors/grammar-private-environment-on-class-heritage-function-expression': [FAIL],
- 'language/statements/class/elements/syntax/early-errors/grammar-private-environment-on-class-heritage-recursive': [FAIL],
-
# https://bugs.chromium.org/p/v8/issues/detail?id=8179
+ # Temporarily removed pending https://github.com/tc39/test262/issues/2339
#
# These tests require exception handling support which is currently
# blocked on landing https://chromium-review.googlesource.com/c/v8/v8/+/1655655
'built-ins/FinalizationGroup/FinalizationGroupCleanupIteratorPrototype/next-job-not-active-throws': [FAIL],
'built-ins/FinalizationGroup/prototype/cleanupSome/poisoned-callback-throws': [FAIL],
- 'built-ins/FinalizationGroup/prototype/cleanupSome/poisoned-cleanup-callback-throws': [FAIL],
+ # 'built-ins/FinalizationGroup/prototype/cleanupSome/poisoned-cleanup-callback-throws': [FAIL],
# https://bugs.chromium.org/p/v8/issues/detail?id=8179
#
@@ -510,23 +504,28 @@
'built-ins/FinalizationGroup/prototype/register/return-undefined-register-itself': [FAIL],
# https://bugs.chromium.org/p/v8/issues/detail?id=8179
+ # Temporarily removed pending https://github.com/tc39/test262/issues/2339
#
# This test has target === holdings which throws, changing holdings to
# { y } makes the test pass.
- 'built-ins/FinalizationGroup/prototype/cleanupSome/cleanup-prevented-with-reference': [FAIL],
+ # 'built-ins/FinalizationGroup/prototype/cleanupSome/cleanup-prevented-with-reference': [FAIL],
# https://github.com/tc39/test262/issues/2256
- 'built-ins/FinalizationGroup/prototype/cleanupSome/cleanup-prevented-with-unregister': [FAIL],
+ # Temporarily removed pending https://github.com/tc39/test262/issues/2339
+ # 'built-ins/FinalizationGroup/prototype/cleanupSome/cleanup-prevented-with-unregister': [FAIL],
# https://github.com/tc39/test262/issues/2239
- 'built-ins/WeakRef/prototype/deref/gc-cleanup-not-prevented-with-wr-deref': [FAIL],
- 'built-ins/FinalizationGroup/prototype/cleanupSome/gc-cleanup-not-prevented-with-wr-deref': [FAIL],
+ # Temporarily removed pending https://github.com/tc39/test262/issues/2339
+ # 'built-ins/WeakRef/prototype/deref/gc-cleanup-not-prevented-with-wr-deref': [FAIL],
+ # 'built-ins/FinalizationGroup/prototype/cleanupSome/gc-cleanup-not-prevented-with-wr-deref': [FAIL],
# https://github.com/tc39/test262/issues/2255
- 'built-ins/FinalizationGroup/prototype/cleanupSome/iterator-holdings-multiple-values': [FAIL],
+ # Temporarily removed pending https://github.com/tc39/test262/issues/2339
+ # 'built-ins/FinalizationGroup/prototype/cleanupSome/iterator-holdings-multiple-values': [FAIL],
# https://github.com/tc39/test262/issues/2260
- 'built-ins/FinalizationGroup/prototype/cleanupSome/return-undefined-with-gc': [FAIL],
+ # Temporarily removed pending https://github.com/tc39/test262/issues/2339
+ # 'built-ins/FinalizationGroup/prototype/cleanupSome/return-undefined-with-gc': [FAIL],
# https://bugs.chromium.org/p/v8/issues/detail?id=9612
'intl402/DateTimeFormat/prototype/formatRange/fractionalSecondDigits': [FAIL],
@@ -534,267 +533,30 @@
# https://bugs.chromium.org/p/v8/issues/detail?id=9613
'intl402/Intl/getCanonicalLocales/canonicalized-tags': [FAIL],
'intl402/Intl/getCanonicalLocales/grandfathered': [FAIL],
- 'intl402/Intl/getCanonicalLocales/invalid-tags': [FAIL],
'intl402/Intl/getCanonicalLocales/non-iana-canon': [FAIL],
'intl402/Intl/getCanonicalLocales/preferred-grandfathered': [FAIL],
'intl402/Intl/getCanonicalLocales/preferred-variant': [FAIL],
- 'intl402/language-tags-invalid': [FAIL],
- 'intl402/ListFormat/constructor/constructor/locales-valid': [FAIL],
'intl402/Locale/constructor-non-iana-canon': [FAIL],
+ 'intl402/Locale/likely-subtags-grandfathered': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=9742
+ 'intl402/Locale/getters': [FAIL],
+
+ # https://github.com/tc39/test262/pull/2349
'intl402/Locale/constructor-options-region-valid': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=9741
'intl402/Locale/constructor-tag': [FAIL],
- 'intl402/Locale/getters': [FAIL],
- 'intl402/Locale/likely-subtags-grandfathered': [FAIL],
- 'intl402/PluralRules/prototype/resolvedOptions/order': [FAIL],
- 'intl402/RelativeTimeFormat/constructor/constructor/locales-valid': [FAIL],
- 'intl402/Segmenter/constructor/constructor/locales-valid': [FAIL],
-
- # https://bugs.chromium.org/p/v8/issues/detail?id=9647
- 'language/expressions/assignment/dstr/ident-name-prop-name-literal-break-escaped': [FAIL],
- 'language/expressions/assignment/dstr/ident-name-prop-name-literal-case-escaped': [FAIL],
- 'language/expressions/assignment/dstr/ident-name-prop-name-literal-catch-escaped': [FAIL],
- 'language/expressions/assignment/dstr/ident-name-prop-name-literal-class-escaped': [FAIL],
- 'language/expressions/assignment/dstr/ident-name-prop-name-literal-const-escaped': [FAIL],
- 'language/expressions/assignment/dstr/ident-name-prop-name-literal-continue-escaped': [FAIL],
- 'language/expressions/assignment/dstr/ident-name-prop-name-literal-debugger-escaped': [FAIL],
- 'language/expressions/assignment/dstr/ident-name-prop-name-literal-default-escaped-ext': [FAIL],
- 'language/expressions/assignment/dstr/ident-name-prop-name-literal-default-escaped': [FAIL],
- 'language/expressions/assignment/dstr/ident-name-prop-name-literal-delete-escaped': [FAIL],
- 'language/expressions/assignment/dstr/ident-name-prop-name-literal-do-escaped': [FAIL],
- 'language/expressions/assignment/dstr/ident-name-prop-name-literal-else-escaped': [FAIL],
- 'language/expressions/assignment/dstr/ident-name-prop-name-literal-enum-escaped': [FAIL],
- 'language/expressions/assignment/dstr/ident-name-prop-name-literal-export-escaped': [FAIL],
- 'language/expressions/assignment/dstr/ident-name-prop-name-literal-extends-escaped-ext': [FAIL],
- 'language/expressions/assignment/dstr/ident-name-prop-name-literal-extends-escaped': [FAIL],
- 'language/expressions/assignment/dstr/ident-name-prop-name-literal-finally-escaped': [FAIL],
- 'language/expressions/assignment/dstr/ident-name-prop-name-literal-for-escaped': [FAIL],
- 'language/expressions/assignment/dstr/ident-name-prop-name-literal-function-escaped': [FAIL],
- 'language/expressions/assignment/dstr/ident-name-prop-name-literal-if-escaped': [FAIL],
- 'language/expressions/assignment/dstr/ident-name-prop-name-literal-import-escaped': [FAIL],
- 'language/expressions/assignment/dstr/ident-name-prop-name-literal-in-escaped': [FAIL],
- 'language/expressions/assignment/dstr/ident-name-prop-name-literal-instanceof-escaped': [FAIL],
- 'language/expressions/assignment/dstr/ident-name-prop-name-literal-new-escaped': [FAIL],
- 'language/expressions/assignment/dstr/ident-name-prop-name-literal-return-escaped': [FAIL],
- 'language/expressions/assignment/dstr/ident-name-prop-name-literal-super-escaped': [FAIL],
- 'language/expressions/assignment/dstr/ident-name-prop-name-literal-switch-escaped': [FAIL],
- 'language/expressions/assignment/dstr/ident-name-prop-name-literal-this-escaped': [FAIL],
- 'language/expressions/assignment/dstr/ident-name-prop-name-literal-throw-escaped': [FAIL],
- 'language/expressions/assignment/dstr/ident-name-prop-name-literal-try-escaped': [FAIL],
- 'language/expressions/assignment/dstr/ident-name-prop-name-literal-typeof-escaped': [FAIL],
- 'language/expressions/assignment/dstr/ident-name-prop-name-literal-var-escaped': [FAIL],
- 'language/expressions/assignment/dstr/ident-name-prop-name-literal-void-escaped': [FAIL],
- 'language/expressions/assignment/dstr/ident-name-prop-name-literal-while-escaped': [FAIL],
- 'language/expressions/assignment/dstr/ident-name-prop-name-literal-with-escaped': [FAIL],
- 'language/expressions/assignment/member-expr-ident-name-break-escaped': [FAIL],
- 'language/expressions/assignment/member-expr-ident-name-case-escaped': [FAIL],
- 'language/expressions/assignment/member-expr-ident-name-catch-escaped': [FAIL],
- 'language/expressions/assignment/member-expr-ident-name-class-escaped': [FAIL],
- 'language/expressions/assignment/member-expr-ident-name-const-escaped': [FAIL],
- 'language/expressions/assignment/member-expr-ident-name-continue-escaped': [FAIL],
- 'language/expressions/assignment/member-expr-ident-name-debugger-escaped': [FAIL],
- 'language/expressions/assignment/member-expr-ident-name-default-escaped-ext': [FAIL],
- 'language/expressions/assignment/member-expr-ident-name-default-escaped': [FAIL],
- 'language/expressions/assignment/member-expr-ident-name-delete-escaped': [FAIL],
- 'language/expressions/assignment/member-expr-ident-name-do-escaped': [FAIL],
- 'language/expressions/assignment/member-expr-ident-name-else-escaped': [FAIL],
- 'language/expressions/assignment/member-expr-ident-name-enum-escaped': [FAIL],
- 'language/expressions/assignment/member-expr-ident-name-export-escaped': [FAIL],
- 'language/expressions/assignment/member-expr-ident-name-extends-escaped-ext': [FAIL],
- 'language/expressions/assignment/member-expr-ident-name-extends-escaped': [FAIL],
- 'language/expressions/assignment/member-expr-ident-name-finally-escaped': [FAIL],
- 'language/expressions/assignment/member-expr-ident-name-for-escaped': [FAIL],
- 'language/expressions/assignment/member-expr-ident-name-function-escaped': [FAIL],
- 'language/expressions/assignment/member-expr-ident-name-if-escaped': [FAIL],
- 'language/expressions/assignment/member-expr-ident-name-import-escaped': [FAIL],
- 'language/expressions/assignment/member-expr-ident-name-in-escaped': [FAIL],
- 'language/expressions/assignment/member-expr-ident-name-instanceof-escaped': [FAIL],
- 'language/expressions/assignment/member-expr-ident-name-new-escaped': [FAIL],
- 'language/expressions/assignment/member-expr-ident-name-return-escaped': [FAIL],
- 'language/expressions/assignment/member-expr-ident-name-super-escaped': [FAIL],
- 'language/expressions/assignment/member-expr-ident-name-switch-escaped': [FAIL],
- 'language/expressions/assignment/member-expr-ident-name-this-escaped': [FAIL],
- 'language/expressions/assignment/member-expr-ident-name-throw-escaped': [FAIL],
- 'language/expressions/assignment/member-expr-ident-name-try-escaped': [FAIL],
- 'language/expressions/assignment/member-expr-ident-name-typeof-escaped': [FAIL],
- 'language/expressions/assignment/member-expr-ident-name-var-escaped': [FAIL],
- 'language/expressions/assignment/member-expr-ident-name-void-escaped': [FAIL],
- 'language/expressions/assignment/member-expr-ident-name-while-escaped': [FAIL],
- 'language/expressions/assignment/member-expr-ident-name-with-escaped': [FAIL],
- 'language/expressions/class/ident-name-method-def-break-escaped': [FAIL],
- 'language/expressions/class/ident-name-method-def-case-escaped': [FAIL],
- 'language/expressions/class/ident-name-method-def-catch-escaped': [FAIL],
- 'language/expressions/class/ident-name-method-def-class-escaped': [FAIL],
- 'language/expressions/class/ident-name-method-def-const-escaped': [FAIL],
- 'language/expressions/class/ident-name-method-def-continue-escaped': [FAIL],
- 'language/expressions/class/ident-name-method-def-debugger-escaped': [FAIL],
- 'language/expressions/class/ident-name-method-def-default-escaped-ext': [FAIL],
- 'language/expressions/class/ident-name-method-def-default-escaped': [FAIL],
- 'language/expressions/class/ident-name-method-def-delete-escaped': [FAIL],
- 'language/expressions/class/ident-name-method-def-do-escaped': [FAIL],
- 'language/expressions/class/ident-name-method-def-else-escaped': [FAIL],
- 'language/expressions/class/ident-name-method-def-enum-escaped': [FAIL],
- 'language/expressions/class/ident-name-method-def-export-escaped': [FAIL],
- 'language/expressions/class/ident-name-method-def-extends-escaped-ext': [FAIL],
- 'language/expressions/class/ident-name-method-def-extends-escaped': [FAIL],
- 'language/expressions/class/ident-name-method-def-finally-escaped': [FAIL],
- 'language/expressions/class/ident-name-method-def-for-escaped': [FAIL],
- 'language/expressions/class/ident-name-method-def-function-escaped': [FAIL],
- 'language/expressions/class/ident-name-method-def-if-escaped': [FAIL],
- 'language/expressions/class/ident-name-method-def-import-escaped': [FAIL],
- 'language/expressions/class/ident-name-method-def-in-escaped': [FAIL],
- 'language/expressions/class/ident-name-method-def-instanceof-escaped': [FAIL],
- 'language/expressions/class/ident-name-method-def-new-escaped': [FAIL],
- 'language/expressions/class/ident-name-method-def-return-escaped': [FAIL],
- 'language/expressions/class/ident-name-method-def-super-escaped': [FAIL],
- 'language/expressions/class/ident-name-method-def-switch-escaped': [FAIL],
- 'language/expressions/class/ident-name-method-def-this-escaped': [FAIL],
- 'language/expressions/class/ident-name-method-def-throw-escaped': [FAIL],
- 'language/expressions/class/ident-name-method-def-try-escaped': [FAIL],
- 'language/expressions/class/ident-name-method-def-typeof-escaped': [FAIL],
- 'language/expressions/class/ident-name-method-def-var-escaped': [FAIL],
- 'language/expressions/class/ident-name-method-def-void-escaped': [FAIL],
- 'language/expressions/class/ident-name-method-def-while-escaped': [FAIL],
- 'language/expressions/class/ident-name-method-def-with-escaped': [FAIL],
- 'language/expressions/object/covered-ident-name-prop-name-literal-break-escaped': [FAIL],
- 'language/expressions/object/covered-ident-name-prop-name-literal-case-escaped': [FAIL],
- 'language/expressions/object/covered-ident-name-prop-name-literal-catch-escaped': [FAIL],
- 'language/expressions/object/covered-ident-name-prop-name-literal-class-escaped': [FAIL],
- 'language/expressions/object/covered-ident-name-prop-name-literal-const-escaped': [FAIL],
- 'language/expressions/object/covered-ident-name-prop-name-literal-continue-escaped': [FAIL],
- 'language/expressions/object/covered-ident-name-prop-name-literal-debugger-escaped': [FAIL],
- 'language/expressions/object/covered-ident-name-prop-name-literal-default-escaped-ext': [FAIL],
- 'language/expressions/object/covered-ident-name-prop-name-literal-default-escaped': [FAIL],
- 'language/expressions/object/covered-ident-name-prop-name-literal-delete-escaped': [FAIL],
- 'language/expressions/object/covered-ident-name-prop-name-literal-do-escaped': [FAIL],
- 'language/expressions/object/covered-ident-name-prop-name-literal-else-escaped': [FAIL],
- 'language/expressions/object/covered-ident-name-prop-name-literal-enum-escaped': [FAIL],
- 'language/expressions/object/covered-ident-name-prop-name-literal-export-escaped': [FAIL],
- 'language/expressions/object/covered-ident-name-prop-name-literal-extends-escaped-ext': [FAIL],
- 'language/expressions/object/covered-ident-name-prop-name-literal-extends-escaped': [FAIL],
- 'language/expressions/object/covered-ident-name-prop-name-literal-finally-escaped': [FAIL],
- 'language/expressions/object/covered-ident-name-prop-name-literal-for-escaped': [FAIL],
- 'language/expressions/object/covered-ident-name-prop-name-literal-function-escaped': [FAIL],
- 'language/expressions/object/covered-ident-name-prop-name-literal-if-escaped': [FAIL],
- 'language/expressions/object/covered-ident-name-prop-name-literal-import-escaped': [FAIL],
- 'language/expressions/object/covered-ident-name-prop-name-literal-in-escaped': [FAIL],
- 'language/expressions/object/covered-ident-name-prop-name-literal-instanceof-escaped': [FAIL],
- 'language/expressions/object/covered-ident-name-prop-name-literal-new-escaped': [FAIL],
- 'language/expressions/object/covered-ident-name-prop-name-literal-return-escaped': [FAIL],
- 'language/expressions/object/covered-ident-name-prop-name-literal-super-escaped': [FAIL],
- 'language/expressions/object/covered-ident-name-prop-name-literal-switch-escaped': [FAIL],
- 'language/expressions/object/covered-ident-name-prop-name-literal-this-escaped': [FAIL],
- 'language/expressions/object/covered-ident-name-prop-name-literal-throw-escaped': [FAIL],
- 'language/expressions/object/covered-ident-name-prop-name-literal-try-escaped': [FAIL],
- 'language/expressions/object/covered-ident-name-prop-name-literal-typeof-escaped': [FAIL],
- 'language/expressions/object/covered-ident-name-prop-name-literal-var-escaped': [FAIL],
- 'language/expressions/object/covered-ident-name-prop-name-literal-void-escaped': [FAIL],
- 'language/expressions/object/covered-ident-name-prop-name-literal-while-escaped': [FAIL],
- 'language/expressions/object/covered-ident-name-prop-name-literal-with-escaped': [FAIL],
- 'language/expressions/object/ident-name-method-def-break-escaped': [FAIL],
- 'language/expressions/object/ident-name-method-def-case-escaped': [FAIL],
- 'language/expressions/object/ident-name-method-def-catch-escaped': [FAIL],
- 'language/expressions/object/ident-name-method-def-class-escaped': [FAIL],
- 'language/expressions/object/ident-name-method-def-const-escaped': [FAIL],
- 'language/expressions/object/ident-name-method-def-continue-escaped': [FAIL],
- 'language/expressions/object/ident-name-method-def-debugger-escaped': [FAIL],
- 'language/expressions/object/ident-name-method-def-default-escaped-ext': [FAIL],
- 'language/expressions/object/ident-name-method-def-default-escaped': [FAIL],
- 'language/expressions/object/ident-name-method-def-delete-escaped': [FAIL],
- 'language/expressions/object/ident-name-method-def-do-escaped': [FAIL],
- 'language/expressions/object/ident-name-method-def-else-escaped': [FAIL],
- 'language/expressions/object/ident-name-method-def-enum-escaped': [FAIL],
- 'language/expressions/object/ident-name-method-def-export-escaped': [FAIL],
- 'language/expressions/object/ident-name-method-def-extends-escaped-ext': [FAIL],
- 'language/expressions/object/ident-name-method-def-extends-escaped': [FAIL],
- 'language/expressions/object/ident-name-method-def-finally-escaped': [FAIL],
- 'language/expressions/object/ident-name-method-def-for-escaped': [FAIL],
- 'language/expressions/object/ident-name-method-def-function-escaped': [FAIL],
- 'language/expressions/object/ident-name-method-def-if-escaped': [FAIL],
- 'language/expressions/object/ident-name-method-def-import-escaped': [FAIL],
- 'language/expressions/object/ident-name-method-def-in-escaped': [FAIL],
- 'language/expressions/object/ident-name-method-def-instanceof-escaped': [FAIL],
- 'language/expressions/object/ident-name-method-def-new-escaped': [FAIL],
- 'language/expressions/object/ident-name-method-def-return-escaped': [FAIL],
- 'language/expressions/object/ident-name-method-def-super-escaped': [FAIL],
- 'language/expressions/object/ident-name-method-def-switch-escaped': [FAIL],
- 'language/expressions/object/ident-name-method-def-this-escaped': [FAIL],
- 'language/expressions/object/ident-name-method-def-throw-escaped': [FAIL],
- 'language/expressions/object/ident-name-method-def-try-escaped': [FAIL],
- 'language/expressions/object/ident-name-method-def-typeof-escaped': [FAIL],
- 'language/expressions/object/ident-name-method-def-var-escaped': [FAIL],
- 'language/expressions/object/ident-name-method-def-void-escaped': [FAIL],
- 'language/expressions/object/ident-name-method-def-while-escaped': [FAIL],
- 'language/expressions/object/ident-name-method-def-with-escaped': [FAIL],
- 'language/expressions/object/ident-name-prop-name-literal-break-escaped': [FAIL],
- 'language/expressions/object/ident-name-prop-name-literal-case-escaped': [FAIL],
- 'language/expressions/object/ident-name-prop-name-literal-catch-escaped': [FAIL],
- 'language/expressions/object/ident-name-prop-name-literal-class-escaped': [FAIL],
- 'language/expressions/object/ident-name-prop-name-literal-const-escaped': [FAIL],
- 'language/expressions/object/ident-name-prop-name-literal-continue-escaped': [FAIL],
- 'language/expressions/object/ident-name-prop-name-literal-debugger-escaped': [FAIL],
- 'language/expressions/object/ident-name-prop-name-literal-default-escaped-ext': [FAIL],
- 'language/expressions/object/ident-name-prop-name-literal-default-escaped': [FAIL],
- 'language/expressions/object/ident-name-prop-name-literal-delete-escaped': [FAIL],
- 'language/expressions/object/ident-name-prop-name-literal-do-escaped': [FAIL],
- 'language/expressions/object/ident-name-prop-name-literal-else-escaped': [FAIL],
- 'language/expressions/object/ident-name-prop-name-literal-enum-escaped': [FAIL],
- 'language/expressions/object/ident-name-prop-name-literal-export-escaped': [FAIL],
- 'language/expressions/object/ident-name-prop-name-literal-extends-escaped-ext': [FAIL],
- 'language/expressions/object/ident-name-prop-name-literal-extends-escaped': [FAIL],
- 'language/expressions/object/ident-name-prop-name-literal-finally-escaped': [FAIL],
- 'language/expressions/object/ident-name-prop-name-literal-for-escaped': [FAIL],
- 'language/expressions/object/ident-name-prop-name-literal-function-escaped': [FAIL],
- 'language/expressions/object/ident-name-prop-name-literal-if-escaped': [FAIL],
- 'language/expressions/object/ident-name-prop-name-literal-import-escaped': [FAIL],
- 'language/expressions/object/ident-name-prop-name-literal-in-escaped': [FAIL],
- 'language/expressions/object/ident-name-prop-name-literal-instanceof-escaped': [FAIL],
- 'language/expressions/object/ident-name-prop-name-literal-new-escaped': [FAIL],
- 'language/expressions/object/ident-name-prop-name-literal-return-escaped': [FAIL],
- 'language/expressions/object/ident-name-prop-name-literal-super-escaped': [FAIL],
- 'language/expressions/object/ident-name-prop-name-literal-switch-escaped': [FAIL],
- 'language/expressions/object/ident-name-prop-name-literal-this-escaped': [FAIL],
- 'language/expressions/object/ident-name-prop-name-literal-throw-escaped': [FAIL],
- 'language/expressions/object/ident-name-prop-name-literal-try-escaped': [FAIL],
- 'language/expressions/object/ident-name-prop-name-literal-typeof-escaped': [FAIL],
- 'language/expressions/object/ident-name-prop-name-literal-var-escaped': [FAIL],
- 'language/expressions/object/ident-name-prop-name-literal-void-escaped': [FAIL],
- 'language/expressions/object/ident-name-prop-name-literal-while-escaped': [FAIL],
- 'language/expressions/object/ident-name-prop-name-literal-with-escaped': [FAIL],
- 'language/statements/class/ident-name-method-def-break-escaped': [FAIL],
- 'language/statements/class/ident-name-method-def-case-escaped': [FAIL],
- 'language/statements/class/ident-name-method-def-catch-escaped': [FAIL],
- 'language/statements/class/ident-name-method-def-class-escaped': [FAIL],
- 'language/statements/class/ident-name-method-def-const-escaped': [FAIL],
- 'language/statements/class/ident-name-method-def-continue-escaped': [FAIL],
- 'language/statements/class/ident-name-method-def-debugger-escaped': [FAIL],
- 'language/statements/class/ident-name-method-def-default-escaped-ext': [FAIL],
- 'language/statements/class/ident-name-method-def-default-escaped': [FAIL],
- 'language/statements/class/ident-name-method-def-delete-escaped': [FAIL],
- 'language/statements/class/ident-name-method-def-do-escaped': [FAIL],
- 'language/statements/class/ident-name-method-def-else-escaped': [FAIL],
- 'language/statements/class/ident-name-method-def-enum-escaped': [FAIL],
- 'language/statements/class/ident-name-method-def-export-escaped': [FAIL],
- 'language/statements/class/ident-name-method-def-extends-escaped-ext': [FAIL],
- 'language/statements/class/ident-name-method-def-extends-escaped': [FAIL],
- 'language/statements/class/ident-name-method-def-finally-escaped': [FAIL],
- 'language/statements/class/ident-name-method-def-for-escaped': [FAIL],
- 'language/statements/class/ident-name-method-def-function-escaped': [FAIL],
- 'language/statements/class/ident-name-method-def-if-escaped': [FAIL],
- 'language/statements/class/ident-name-method-def-import-escaped': [FAIL],
- 'language/statements/class/ident-name-method-def-in-escaped': [FAIL],
- 'language/statements/class/ident-name-method-def-instanceof-escaped': [FAIL],
- 'language/statements/class/ident-name-method-def-new-escaped': [FAIL],
- 'language/statements/class/ident-name-method-def-return-escaped': [FAIL],
- 'language/statements/class/ident-name-method-def-super-escaped': [FAIL],
- 'language/statements/class/ident-name-method-def-switch-escaped': [FAIL],
- 'language/statements/class/ident-name-method-def-this-escaped': [FAIL],
- 'language/statements/class/ident-name-method-def-throw-escaped': [FAIL],
- 'language/statements/class/ident-name-method-def-try-escaped': [FAIL],
- 'language/statements/class/ident-name-method-def-typeof-escaped': [FAIL],
- 'language/statements/class/ident-name-method-def-var-escaped': [FAIL],
- 'language/statements/class/ident-name-method-def-void-escaped': [FAIL],
- 'language/statements/class/ident-name-method-def-while-escaped': [FAIL],
- 'language/statements/class/ident-name-method-def-with-escaped': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=9800
+ 'built-ins/String/prototype/matchAll/flags-undefined-throws': [FAIL],
+ 'built-ins/String/prototype/matchAll/flags-nonglobal-throws': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=9818
+ 'built-ins/AsyncFunction/proto-from-ctor-realm': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=9819
+ 'built-ins/Array/prototype/flatMap/array-like-objects-nested': [FAIL],
######################## NEEDS INVESTIGATION ###########################
@@ -892,9 +654,10 @@
'intl402/String/prototype/toLocaleUpperCase/special_casing_Lithuanian': [FAIL],
'intl402/String/prototype/toLocaleUpperCase/special_casing_Turkish': [FAIL],
- # Unicode property escapes unavailable without i18n
+ # Unicode features unavaible without i18n, ie property escapes.
'built-ins/RegExp/property-escapes/*': [SKIP],
'built-ins/RegExp/named-groups/unicode-property-names': [SKIP],
+ 'built-ins/RegExp/match-indices/indices-array-unicode-property-names': [SKIP],
}], # no_i18n == True
['arch == arm or arch == mipsel or arch == mips or arch == arm64 or arch == mips64 or arch == mips64el', {
@@ -920,528 +683,6 @@
'built-ins/TypedArray/prototype/set/typedarray-arg-set-values-same-buffer-other-type': [SKIP],
}],
-['asan == True', {
- # BUG(v8:4653): Test262 tests which rely on quit() are not compatible with
- # asan's --omit-quit flag.
- 'built-ins/Promise/prototype/then/deferred-is-resolved-value': [SKIP],
- 'language/expressions/dynamic-import/always-create-new-promise': [SKIP],
- 'language/expressions/dynamic-import/assign-expr-get-value-abrupt-throws': [SKIP],
- 'language/expressions/dynamic-import/assignment-expression/additive-expr': [SKIP],
- 'language/expressions/dynamic-import/assignment-expression/array-literal': [SKIP],
- 'language/expressions/dynamic-import/assignment-expression/arrow-function': [SKIP],
- 'language/expressions/dynamic-import/assignment-expression/await-expr': [SKIP],
- 'language/expressions/dynamic-import/assignment-expression/await-identifier': [SKIP],
- 'language/expressions/dynamic-import/assignment-expression/call-expr-arguments': [SKIP],
- 'language/expressions/dynamic-import/assignment-expression/call-expr-expr': [SKIP],
- 'language/expressions/dynamic-import/assignment-expression/call-expr-identifier': [SKIP],
- 'language/expressions/dynamic-import/assignment-expression/cover-call-expr': [SKIP],
- 'language/expressions/dynamic-import/assignment-expression/cover-parenthesized-expr': [SKIP],
- 'language/expressions/dynamic-import/assignment-expression/identifier': [SKIP],
- 'language/expressions/dynamic-import/assignment-expression/import-meta': [SKIP],
- 'language/expressions/dynamic-import/assignment-expression/lhs-assign-operator-assign-expr': [SKIP],
- 'language/expressions/dynamic-import/assignment-expression/lhs-eq-assign-expr': [SKIP],
- 'language/expressions/dynamic-import/assignment-expression/lhs-eq-assign-expr-nostrict': [SKIP],
- 'language/expressions/dynamic-import/assignment-expression/logical-and-expr': [SKIP],
- 'language/expressions/dynamic-import/assignment-expression/logical-or-expr': [SKIP],
- 'language/expressions/dynamic-import/assignment-expression/member-expr': [SKIP],
- 'language/expressions/dynamic-import/assignment-expression/new-target': [SKIP],
- 'language/expressions/dynamic-import/assignment-expression/object-literal': [SKIP],
- 'language/expressions/dynamic-import/assignment-expression/tagged-function-call': [SKIP],
- 'language/expressions/dynamic-import/assignment-expression/ternary': [SKIP],
- 'language/expressions/dynamic-import/assignment-expression/this': [SKIP],
- 'language/expressions/dynamic-import/assignment-expression/unary-expr': [SKIP],
- 'language/expressions/dynamic-import/assignment-expression/yield-assign-expr': [SKIP],
- 'language/expressions/dynamic-import/assignment-expression/yield-expr': [SKIP],
- 'language/expressions/dynamic-import/assignment-expression/yield-identifier': [SKIP],
- 'language/expressions/dynamic-import/assignment-expression/yield-star': [SKIP],
- 'language/expressions/dynamic-import/await-import-evaluation': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-arrow-import-catch-eval-rqstd-abrupt-typeerror': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-arrow-import-catch-eval-rqstd-abrupt-urierror': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-arrow-import-catch-eval-script-code-target': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-arrow-import-catch-file-does-not-exist': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-arrow-import-catch-instn-iee-err-ambiguous-import': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-arrow-import-catch-instn-iee-err-circular': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-arrow-import-catch-specifier-tostring-abrupt-rejects': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-arrow-function-await-eval-rqstd-abrupt-typeerror': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-arrow-function-await-eval-rqstd-abrupt-urierror': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-arrow-function-await-eval-script-code-target': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-arrow-function-await-file-does-not-exist': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-arrow-function-await-instn-iee-err-ambiguous-import': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-arrow-function-await-instn-iee-err-circular': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-arrow-function-await-specifier-tostring-abrupt-rejects': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-arrow-function-return-await-eval-rqstd-abrupt-typeerror': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-arrow-function-return-await-eval-rqstd-abrupt-urierror': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-arrow-function-return-await-eval-script-code-target': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-arrow-function-return-await-file-does-not-exist': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-arrow-function-return-await-instn-iee-err-ambiguous-import': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-arrow-function-return-await-instn-iee-err-circular': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-arrow-function-return-await-specifier-tostring-abrupt-rejects': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-function-await-eval-rqstd-abrupt-typeerror': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-function-await-eval-rqstd-abrupt-urierror': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-function-await-eval-script-code-target': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-function-await-file-does-not-exist': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-function-await-instn-iee-err-ambiguous-import': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-function-await-instn-iee-err-circular': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-function-await-specifier-tostring-abrupt-rejects': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-function-eval-rqstd-abrupt-typeerror': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-function-eval-rqstd-abrupt-urierror': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-function-eval-script-code-target': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-function-file-does-not-exist': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-function-instn-iee-err-ambiguous-import': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-function-instn-iee-err-circular': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-function-return-await-eval-rqstd-abrupt-typeerror': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-function-return-await-eval-rqstd-abrupt-urierror': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-function-return-await-eval-script-code-target': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-function-return-await-file-does-not-exist': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-function-return-await-instn-iee-err-ambiguous-import': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-function-return-await-instn-iee-err-circular': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-function-return-await-specifier-tostring-abrupt-rejects': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-function-specifier-tostring-abrupt-rejects': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-gen-await-eval-rqstd-abrupt-typeerror': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-gen-await-eval-rqstd-abrupt-urierror': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-gen-await-eval-script-code-target': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-gen-await-file-does-not-exist': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-gen-await-instn-iee-err-ambiguous-import': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-gen-await-instn-iee-err-circular': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-gen-await-specifier-tostring-abrupt-rejects': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-gen-return-await-eval-rqstd-abrupt-typeerror': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-gen-return-await-eval-rqstd-abrupt-urierror': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-gen-return-await-eval-script-code-target': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-gen-return-await-file-does-not-exist': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-gen-return-await-instn-iee-err-ambiguous-import': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-gen-return-await-instn-iee-err-circular': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-async-gen-return-await-specifier-tostring-abrupt-rejects': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-block-import-catch-eval-rqstd-abrupt-typeerror': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-block-import-catch-eval-rqstd-abrupt-urierror': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-block-import-catch-eval-script-code-target': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-block-import-catch-file-does-not-exist': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-block-import-catch-instn-iee-err-ambiguous-import': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-block-import-catch-instn-iee-err-circular': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-block-import-catch-specifier-tostring-abrupt-rejects': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-block-labeled-eval-rqstd-abrupt-typeerror': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-block-labeled-eval-rqstd-abrupt-urierror': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-block-labeled-eval-script-code-target': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-block-labeled-file-does-not-exist': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-block-labeled-instn-iee-err-ambiguous-import': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-block-labeled-instn-iee-err-circular': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-block-labeled-specifier-tostring-abrupt-rejects': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-do-while-eval-rqstd-abrupt-typeerror': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-do-while-eval-rqstd-abrupt-urierror': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-do-while-eval-script-code-target': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-do-while-file-does-not-exist': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-do-while-instn-iee-err-ambiguous-import': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-do-while-instn-iee-err-circular': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-do-while-specifier-tostring-abrupt-rejects': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-else-import-catch-eval-rqstd-abrupt-typeerror': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-else-import-catch-eval-rqstd-abrupt-urierror': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-else-import-catch-eval-script-code-target': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-else-import-catch-file-does-not-exist': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-else-import-catch-instn-iee-err-ambiguous-import': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-else-import-catch-instn-iee-err-circular': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-else-import-catch-specifier-tostring-abrupt-rejects': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-function-import-catch-eval-rqstd-abrupt-typeerror': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-function-import-catch-eval-rqstd-abrupt-urierror': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-function-import-catch-eval-script-code-target': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-function-import-catch-file-does-not-exist': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-function-import-catch-instn-iee-err-ambiguous-import': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-function-import-catch-instn-iee-err-circular': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-function-import-catch-specifier-tostring-abrupt-rejects': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-if-import-catch-eval-rqstd-abrupt-typeerror': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-if-import-catch-eval-rqstd-abrupt-urierror': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-if-import-catch-eval-script-code-target': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-if-import-catch-file-does-not-exist': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-if-import-catch-instn-iee-err-ambiguous-import': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-if-import-catch-instn-iee-err-circular': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-if-import-catch-specifier-tostring-abrupt-rejects': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-while-import-catch-eval-rqstd-abrupt-typeerror': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-while-import-catch-eval-rqstd-abrupt-urierror': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-while-import-catch-eval-script-code-target': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-while-import-catch-file-does-not-exist': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-while-import-catch-instn-iee-err-ambiguous-import': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-while-import-catch-instn-iee-err-circular': [SKIP],
- 'language/expressions/dynamic-import/catch/nested-while-import-catch-specifier-tostring-abrupt-rejects': [SKIP],
- 'language/expressions/dynamic-import/catch/top-level-import-catch-eval-rqstd-abrupt-typeerror': [SKIP],
- 'language/expressions/dynamic-import/catch/top-level-import-catch-eval-rqstd-abrupt-urierror': [SKIP],
- 'language/expressions/dynamic-import/catch/top-level-import-catch-eval-script-code-target': [SKIP],
- 'language/expressions/dynamic-import/catch/top-level-import-catch-file-does-not-exist': [SKIP],
- 'language/expressions/dynamic-import/catch/top-level-import-catch-instn-iee-err-ambiguous-import': [SKIP],
- 'language/expressions/dynamic-import/catch/top-level-import-catch-instn-iee-err-circular': [SKIP],
- 'language/expressions/dynamic-import/catch/top-level-import-catch-specifier-tostring-abrupt-rejects': [SKIP],
- 'language/expressions/dynamic-import/custom-primitive': [SKIP],
- 'language/expressions/dynamic-import/escape-sequence-import': [SKIP],
- 'language/expressions/dynamic-import/eval-export-dflt-cls-anon': [SKIP],
- 'language/expressions/dynamic-import/eval-export-dflt-cls-named': [SKIP],
- 'language/expressions/dynamic-import/eval-export-dflt-cls-name-meth': [SKIP],
- 'language/expressions/dynamic-import/eval-export-dflt-expr-cls-anon': [SKIP],
- 'language/expressions/dynamic-import/eval-export-dflt-expr-cls-named': [SKIP],
- 'language/expressions/dynamic-import/eval-export-dflt-expr-cls-name-meth': [SKIP],
- 'language/expressions/dynamic-import/eval-export-dflt-expr-fn-anon': [SKIP],
- 'language/expressions/dynamic-import/eval-export-dflt-expr-fn-named': [SKIP],
- 'language/expressions/dynamic-import/eval-export-dflt-expr-gen-anon': [SKIP],
- 'language/expressions/dynamic-import/eval-export-dflt-expr-gen-named': [SKIP],
- 'language/expressions/dynamic-import/eval-export-dflt-expr-in': [SKIP],
- 'language/expressions/dynamic-import/eval-rqstd-once': [SKIP],
- 'language/expressions/dynamic-import/eval-self-once-module': [SKIP],
- 'language/expressions/dynamic-import/eval-self-once-script': [SKIP],
- 'language/expressions/dynamic-import/for-await-resolution-and-error-agen': [SKIP],
- 'language/expressions/dynamic-import/for-await-resolution-and-error-agen-yield': [SKIP],
- 'language/expressions/dynamic-import/for-await-resolution-and-error': [SKIP],
- 'language/expressions/dynamic-import/imported-self-update': [SKIP],
- 'language/expressions/dynamic-import/indirect-resolution': [SKIP],
- 'language/expressions/dynamic-import/namespace/await-ns-define-own-property': [SKIP],
- 'language/expressions/dynamic-import/namespace/await-ns-delete-exported-init-no-strict': [SKIP],
- 'language/expressions/dynamic-import/namespace/await-ns-delete-exported-init-strict': [SKIP],
- 'language/expressions/dynamic-import/namespace/await-ns-delete-non-exported-no-strict': [SKIP],
- 'language/expressions/dynamic-import/namespace/await-ns-delete-non-exported-strict': [SKIP],
- 'language/expressions/dynamic-import/namespace/await-ns-extensible': [SKIP],
- 'language/expressions/dynamic-import/namespace/await-ns-get-nested-namespace-dflt-direct': [SKIP],
- 'language/expressions/dynamic-import/namespace/await-ns-get-nested-namespace-dflt-indirect': [SKIP],
- 'language/expressions/dynamic-import/namespace/await-ns-get-nested-namespace-props-nrml': [SKIP],
- 'language/expressions/dynamic-import/namespace/await-ns-get-own-property-str-found-init': [SKIP],
- 'language/expressions/dynamic-import/namespace/await-ns-get-own-property-str-not-found': [SKIP],
- 'language/expressions/dynamic-import/namespace/await-ns-get-own-property-sym': [SKIP],
- 'language/expressions/dynamic-import/namespace/await-ns-get-str-found': [SKIP],
- 'language/expressions/dynamic-import/namespace/await-ns-get-str-not-found': [SKIP],
- 'language/expressions/dynamic-import/namespace/await-ns-get-sym-found': [SKIP],
- 'language/expressions/dynamic-import/namespace/await-ns-get-sym-not-found': [SKIP],
- 'language/expressions/dynamic-import/namespace/await-ns-has-property-str-found-init': [SKIP],
- 'language/expressions/dynamic-import/namespace/await-ns-has-property-str-not-found': [SKIP],
- 'language/expressions/dynamic-import/namespace/await-ns-has-property-sym-found': [SKIP],
- 'language/expressions/dynamic-import/namespace/await-ns-has-property-sym-not-found': [SKIP],
- 'language/expressions/dynamic-import/namespace/await-ns-no-iterator': [SKIP],
- 'language/expressions/dynamic-import/namespace/await-ns-own-property-keys-sort': [SKIP],
- 'language/expressions/dynamic-import/namespace/await-ns-prevent-extensions-object': [SKIP],
- 'language/expressions/dynamic-import/namespace/await-ns-prevent-extensions-reflect': [SKIP],
- 'language/expressions/dynamic-import/namespace/await-ns-prop-descs': [SKIP],
- 'language/expressions/dynamic-import/namespace/await-ns-prototype': [SKIP],
- 'language/expressions/dynamic-import/namespace/await-ns-set-no-strict': [SKIP],
- 'language/expressions/dynamic-import/namespace/await-ns-set-prototype-of': [SKIP],
- 'language/expressions/dynamic-import/namespace/await-ns-set-prototype-of-null': [SKIP],
- 'language/expressions/dynamic-import/namespace/await-ns-set-same-values-no-strict': [SKIP],
- 'language/expressions/dynamic-import/namespace/await-ns-set-same-values-strict': [SKIP],
- 'language/expressions/dynamic-import/namespace/await-ns-set-strict': [SKIP],
- 'language/expressions/dynamic-import/namespace/await-ns-Symbol-toStringTag': [SKIP],
- 'language/expressions/dynamic-import/namespace/default-property-not-set-own': [SKIP],
- 'language/expressions/dynamic-import/namespace/promise-then-ns-define-own-property': [SKIP],
- 'language/expressions/dynamic-import/namespace/promise-then-ns-delete-exported-init-no-strict': [SKIP],
- 'language/expressions/dynamic-import/namespace/promise-then-ns-delete-exported-init-strict': [SKIP],
- 'language/expressions/dynamic-import/namespace/promise-then-ns-delete-non-exported-no-strict': [SKIP],
- 'language/expressions/dynamic-import/namespace/promise-then-ns-delete-non-exported-strict': [SKIP],
- 'language/expressions/dynamic-import/namespace/promise-then-ns-extensible': [SKIP],
- 'language/expressions/dynamic-import/namespace/promise-then-ns-get-nested-namespace-dflt-direct': [SKIP],
- 'language/expressions/dynamic-import/namespace/promise-then-ns-get-nested-namespace-dflt-indirect': [SKIP],
- 'language/expressions/dynamic-import/namespace/promise-then-ns-get-nested-namespace-props-nrml': [SKIP],
- 'language/expressions/dynamic-import/namespace/promise-then-ns-get-own-property-str-found-init': [SKIP],
- 'language/expressions/dynamic-import/namespace/promise-then-ns-get-own-property-str-not-found': [SKIP],
- 'language/expressions/dynamic-import/namespace/promise-then-ns-get-own-property-sym': [SKIP],
- 'language/expressions/dynamic-import/namespace/promise-then-ns-get-str-found': [SKIP],
- 'language/expressions/dynamic-import/namespace/promise-then-ns-get-str-not-found': [SKIP],
- 'language/expressions/dynamic-import/namespace/promise-then-ns-get-sym-found': [SKIP],
- 'language/expressions/dynamic-import/namespace/promise-then-ns-get-sym-not-found': [SKIP],
- 'language/expressions/dynamic-import/namespace/promise-then-ns-has-property-str-found-init': [SKIP],
- 'language/expressions/dynamic-import/namespace/promise-then-ns-has-property-str-not-found': [SKIP],
- 'language/expressions/dynamic-import/namespace/promise-then-ns-has-property-sym-found': [SKIP],
- 'language/expressions/dynamic-import/namespace/promise-then-ns-has-property-sym-not-found': [SKIP],
- 'language/expressions/dynamic-import/namespace/promise-then-ns-no-iterator': [SKIP],
- 'language/expressions/dynamic-import/namespace/promise-then-ns-own-property-keys-sort': [SKIP],
- 'language/expressions/dynamic-import/namespace/promise-then-ns-prevent-extensions-object': [SKIP],
- 'language/expressions/dynamic-import/namespace/promise-then-ns-prevent-extensions-reflect': [SKIP],
- 'language/expressions/dynamic-import/namespace/promise-then-ns-prop-descs': [SKIP],
- 'language/expressions/dynamic-import/namespace/promise-then-ns-prototype': [SKIP],
- 'language/expressions/dynamic-import/namespace/promise-then-ns-set-no-strict': [SKIP],
- 'language/expressions/dynamic-import/namespace/promise-then-ns-set-prototype-of': [SKIP],
- 'language/expressions/dynamic-import/namespace/promise-then-ns-set-prototype-of-null': [SKIP],
- 'language/expressions/dynamic-import/namespace/promise-then-ns-set-same-values-no-strict': [SKIP],
- 'language/expressions/dynamic-import/namespace/promise-then-ns-set-same-values-strict': [SKIP],
- 'language/expressions/dynamic-import/namespace/promise-then-ns-set-strict': [SKIP],
- 'language/expressions/dynamic-import/namespace/promise-then-ns-Symbol-toStringTag': [SKIP],
- 'language/expressions/dynamic-import/returns-promise': [SKIP],
- 'language/expressions/dynamic-import/reuse-namespace-object': [SKIP],
- 'language/expressions/dynamic-import/reuse-namespace-object-from-import': [SKIP],
- 'language/expressions/dynamic-import/reuse-namespace-object-from-script': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-arrow-assignment-expression-assignment-expr-not-optional': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-arrow-assignment-expression-no-new-call-expression': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-arrow-assignment-expression-no-rest-param': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-arrow-assignment-expression-not-extensible-args': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-arrow-assignment-expression-not-extensible-no-trailing-comma': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-arrow-assignment-expr-not-optional': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-arrow-no-new-call-expression': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-arrow-no-rest-param': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-arrow-not-extensible-args': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-arrow-not-extensible-no-trailing-comma': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-async-arrow-function-await-assignment-expr-not-optional': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-async-arrow-function-await-no-new-call-expression': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-async-arrow-function-await-no-rest-param': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-async-arrow-function-await-not-extensible-args': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-async-arrow-function-await-not-extensible-no-trailing-comma': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-async-arrow-function-return-await-assignment-expr-not-optional': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-async-arrow-function-return-await-no-new-call-expression': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-async-arrow-function-return-await-no-rest-param': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-async-arrow-function-return-await-not-extensible-args': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-async-arrow-function-return-await-not-extensible-no-trailing-comma': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-async-function-assignment-expr-not-optional': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-async-function-await-assignment-expr-not-optional': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-async-function-await-no-new-call-expression': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-async-function-await-no-rest-param': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-async-function-await-not-extensible-args': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-async-function-await-not-extensible-no-trailing-comma': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-async-function-no-new-call-expression': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-async-function-no-rest-param': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-async-function-not-extensible-args': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-async-function-not-extensible-no-trailing-comma': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-async-function-return-await-assignment-expr-not-optional': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-async-function-return-await-no-new-call-expression': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-async-function-return-await-no-rest-param': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-async-function-return-await-not-extensible-args': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-async-function-return-await-not-extensible-no-trailing-comma': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-async-gen-await-assignment-expr-not-optional': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-async-gen-await-no-new-call-expression': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-async-gen-await-no-rest-param': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-async-gen-await-not-extensible-args': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-async-gen-await-not-extensible-no-trailing-comma': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-block-assignment-expr-not-optional': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-block-labeled-assignment-expr-not-optional': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-block-labeled-no-new-call-expression': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-block-labeled-no-rest-param': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-block-labeled-not-extensible-args': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-block-labeled-not-extensible-no-trailing-comma': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-block-no-new-call-expression': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-block-no-rest-param': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-block-not-extensible-args': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-block-not-extensible-no-trailing-comma': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-do-while-assignment-expr-not-optional': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-do-while-no-new-call-expression': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-do-while-no-rest-param': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-do-while-not-extensible-args': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-do-while-not-extensible-no-trailing-comma': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-else-assignment-expr-not-optional': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-else-braceless-assignment-expr-not-optional': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-else-braceless-no-new-call-expression': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-else-braceless-no-rest-param': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-else-braceless-not-extensible-args': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-else-braceless-not-extensible-no-trailing-comma': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-else-no-new-call-expression': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-else-no-rest-param': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-else-not-extensible-args': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-else-not-extensible-no-trailing-comma': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-function-assignment-expr-not-optional': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-function-no-new-call-expression': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-function-no-rest-param': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-function-not-extensible-args': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-function-not-extensible-no-trailing-comma': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-function-return-assignment-expr-not-optional': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-function-return-no-new-call-expression': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-function-return-no-rest-param': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-function-return-not-extensible-args': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-function-return-not-extensible-no-trailing-comma': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-if-assignment-expr-not-optional': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-if-braceless-assignment-expr-not-optional': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-if-braceless-no-new-call-expression': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-if-braceless-no-rest-param': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-if-braceless-not-extensible-args': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-if-braceless-not-extensible-no-trailing-comma': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-if-no-new-call-expression': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-if-no-rest-param': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-if-not-extensible-args': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-if-not-extensible-no-trailing-comma': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-while-assignment-expr-not-optional': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-while-no-new-call-expression': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-while-no-rest-param': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-while-not-extensible-args': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-while-not-extensible-no-trailing-comma': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-with-assignment-expr-not-optional': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-with-expression-assignment-expr-not-optional': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-with-expression-no-new-call-expression': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-with-expression-no-rest-param': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-with-expression-not-extensible-args': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-with-expression-not-extensible-no-trailing-comma': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-with-no-new-call-expression': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-with-no-rest-param': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-with-not-extensible-args': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/nested-with-not-extensible-no-trailing-comma': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/top-level-assignment-expr-not-optional': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/top-level-no-new-call-expression': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/top-level-no-rest-param': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/top-level-not-extensible-args': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/top-level-not-extensible-no-trailing-comma': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/callexpression-arguments': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/callexpression-templateliteral': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-arrow-assignment-expression-empty-str-is-valid-assign-expr': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-arrow-assignment-expression-nested-imports': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-arrow-assignment-expression-script-code-valid': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-arrow-empty-str-is-valid-assign-expr': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-arrow-nested-imports': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-arrow-script-code-valid': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-async-arrow-function-await-empty-str-is-valid-assign-expr': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-async-arrow-function-await-nested-imports': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-async-arrow-function-await-script-code-valid': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-async-arrow-function-return-await-empty-str-is-valid-assign-expr': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-async-arrow-function-return-await-nested-imports': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-async-arrow-function-return-await-script-code-valid': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-async-function-await-empty-str-is-valid-assign-expr': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-async-function-await-nested-imports': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-async-function-await-script-code-valid': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-async-function-empty-str-is-valid-assign-expr': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-async-function-nested-imports': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-async-function-return-await-empty-str-is-valid-assign-expr': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-async-function-return-await-nested-imports': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-async-function-return-await-script-code-valid': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-async-function-script-code-valid': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-async-gen-await-empty-str-is-valid-assign-expr': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-async-gen-await-nested-imports': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-async-gen-await-script-code-valid': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-block-empty-str-is-valid-assign-expr': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-block-labeled-empty-str-is-valid-assign-expr': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-block-labeled-nested-imports': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-block-labeled-script-code-valid': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-block-nested-imports': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-block-script-code-valid': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-do-while-empty-str-is-valid-assign-expr': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-do-while-nested-imports': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-do-while-script-code-valid': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-else-braceless-empty-str-is-valid-assign-expr': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-else-braceless-nested-imports': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-else-braceless-script-code-valid': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-else-empty-str-is-valid-assign-expr': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-else-nested-imports': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-else-script-code-valid': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-function-empty-str-is-valid-assign-expr': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-function-nested-imports': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-function-return-empty-str-is-valid-assign-expr': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-function-return-nested-imports': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-function-return-script-code-valid': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-function-script-code-valid': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-if-braceless-empty-str-is-valid-assign-expr': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-if-braceless-nested-imports': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-if-braceless-script-code-valid': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-if-empty-str-is-valid-assign-expr': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-if-nested-imports': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-if-script-code-valid': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-while-empty-str-is-valid-assign-expr': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-while-nested-imports': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-while-script-code-valid': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-with-empty-str-is-valid-assign-expr': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-with-expression-empty-str-is-valid-assign-expr': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-with-expression-nested-imports': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-with-expression-script-code-valid': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-with-nested-imports': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/nested-with-script-code-valid': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/new-covered-expression-is-valid': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/top-level-empty-str-is-valid-assign-expr': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/top-level-nested-imports': [SKIP],
- 'language/expressions/dynamic-import/syntax/valid/top-level-script-code-valid': [SKIP],
- 'language/expressions/dynamic-import/update-to-dynamic-import': [SKIP],
- 'language/expressions/dynamic-import/usage-from-eval': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-arrow-assignment-expression-eval-gtbndng-indirect-update-dflt': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-arrow-assignment-expression-eval-gtbndng-indirect-update': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-arrow-assignment-expression-eval-script-code-host-resolves-module-code': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-arrow-assignment-expression-is-call-expression-square-brackets': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-arrow-assignment-expression-returns-thenable': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-arrow-assignment-expression-specifier-tostring': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-arrow-import-then-eval-gtbndng-indirect-update-dflt': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-arrow-import-then-eval-gtbndng-indirect-update': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-arrow-import-then-eval-script-code-host-resolves-module-code': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-arrow-import-then-is-call-expression-square-brackets': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-arrow-import-then-returns-thenable': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-arrow-import-then-specifier-tostring': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-async-arrow-function-await-eval-gtbndng-indirect-update-dflt': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-async-arrow-function-await-eval-gtbndng-indirect-update': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-async-arrow-function-await-eval-script-code-host-resolves-module-code': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-async-arrow-function-await-is-call-expression-square-brackets': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-async-arrow-function-await-returns-thenable': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-async-arrow-function-await-specifier-tostring': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-async-arrow-function-return-await-eval-gtbndng-indirect-update-dflt': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-async-arrow-function-return-await-eval-gtbndng-indirect-update': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-async-arrow-function-return-await-eval-script-code-host-resolves-module-code': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-async-arrow-function-return-await-is-call-expression-square-brackets': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-async-arrow-function-return-await-returns-thenable': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-async-arrow-function-return-await-specifier-tostring': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-async-function-await-eval-gtbndng-indirect-update-dflt': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-async-function-await-eval-gtbndng-indirect-update': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-async-function-await-eval-script-code-host-resolves-module-code': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-async-function-await-is-call-expression-square-brackets': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-async-function-await-returns-thenable': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-async-function-await-specifier-tostring': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-async-function-eval-gtbndng-indirect-update-dflt': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-async-function-eval-gtbndng-indirect-update': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-async-function-eval-script-code-host-resolves-module-code': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-async-function-is-call-expression-square-brackets': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-async-function-return-await-eval-gtbndng-indirect-update-dflt': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-async-function-return-await-eval-gtbndng-indirect-update': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-async-function-return-await-eval-script-code-host-resolves-module-code': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-async-function-return-await-is-call-expression-square-brackets': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-async-function-return-await-returns-thenable': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-async-function-return-await-specifier-tostring': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-async-function-returns-thenable': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-async-function-specifier-tostring': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-async-gen-await-eval-gtbndng-indirect-update-dflt': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-async-gen-await-eval-gtbndng-indirect-update': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-async-gen-await-eval-script-code-host-resolves-module-code': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-async-gen-await-is-call-expression-square-brackets': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-async-gen-await-returns-thenable': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-async-gen-await-specifier-tostring': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-async-gen-return-await-eval-gtbndng-indirect-update-dflt': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-async-gen-return-await-eval-gtbndng-indirect-update': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-async-gen-return-await-eval-script-code-host-resolves-module-code': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-async-gen-return-await-is-call-expression-square-brackets': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-async-gen-return-await-returns-thenable': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-async-gen-return-await-specifier-tostring': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-block-import-then-eval-gtbndng-indirect-update-dflt': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-block-import-then-eval-gtbndng-indirect-update': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-block-import-then-eval-script-code-host-resolves-module-code': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-block-import-then-is-call-expression-square-brackets': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-block-import-then-returns-thenable': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-block-import-then-specifier-tostring': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-do-while-eval-gtbndng-indirect-update-dflt': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-do-while-eval-gtbndng-indirect-update': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-do-while-eval-script-code-host-resolves-module-code': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-do-while-is-call-expression-square-brackets': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-do-while-returns-thenable': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-do-while-specifier-tostring': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-else-import-then-eval-gtbndng-indirect-update-dflt': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-else-import-then-eval-gtbndng-indirect-update': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-else-import-then-eval-script-code-host-resolves-module-code': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-else-import-then-is-call-expression-square-brackets': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-else-import-then-returns-thenable': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-else-import-then-specifier-tostring': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-function-import-then-eval-gtbndng-indirect-update-dflt': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-function-import-then-eval-gtbndng-indirect-update': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-function-import-then-eval-script-code-host-resolves-module-code': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-function-import-then-is-call-expression-square-brackets': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-function-import-then-returns-thenable': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-function-import-then-specifier-tostring': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-if-braceless-eval-gtbndng-indirect-update-dflt': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-if-braceless-eval-gtbndng-indirect-update': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-if-braceless-eval-script-code-host-resolves-module-code': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-if-braceless-is-call-expression-square-brackets': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-if-braceless-returns-thenable': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-if-braceless-specifier-tostring': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-if-import-then-eval-gtbndng-indirect-update-dflt': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-if-import-then-eval-gtbndng-indirect-update': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-if-import-then-eval-script-code-host-resolves-module-code': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-if-import-then-is-call-expression-square-brackets': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-if-import-then-returns-thenable': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-if-import-then-specifier-tostring': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-while-import-then-eval-gtbndng-indirect-update-dflt': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-while-import-then-eval-gtbndng-indirect-update': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-while-import-then-eval-script-code-host-resolves-module-code': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-while-import-then-is-call-expression-square-brackets': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-while-import-then-returns-thenable': [SKIP],
- 'language/expressions/dynamic-import/usage/nested-while-import-then-specifier-tostring': [SKIP],
- 'language/expressions/dynamic-import/usage/syntax-nested-block-labeled-eval-gtbndng-indirect-update-dflt': [SKIP],
- 'language/expressions/dynamic-import/usage/syntax-nested-block-labeled-eval-gtbndng-indirect-update': [SKIP],
- 'language/expressions/dynamic-import/usage/syntax-nested-block-labeled-eval-script-code-host-resolves-module-code': [SKIP],
- 'language/expressions/dynamic-import/usage/syntax-nested-block-labeled-is-call-expression-square-brackets': [SKIP],
- 'language/expressions/dynamic-import/usage/syntax-nested-block-labeled-returns-thenable': [SKIP],
- 'language/expressions/dynamic-import/usage/syntax-nested-block-labeled-specifier-tostring': [SKIP],
- 'language/expressions/dynamic-import/usage/top-level-import-then-eval-gtbndng-indirect-update-dflt': [SKIP],
- 'language/expressions/dynamic-import/usage/top-level-import-then-eval-gtbndng-indirect-update': [SKIP],
- 'language/expressions/dynamic-import/usage/top-level-import-then-eval-script-code-host-resolves-module-code': [SKIP],
- 'language/expressions/dynamic-import/usage/top-level-import-then-is-call-expression-square-brackets': [SKIP],
- 'language/expressions/dynamic-import/usage/top-level-import-then-returns-thenable': [SKIP],
- 'language/expressions/dynamic-import/usage/top-level-import-then-specifier-tostring': [SKIP],
-}], # asan == True
-
['asan == True or msan == True or tsan == True', {
# https://bugs.chromium.org/p/v8/issues/detail?id=4639
# The failed allocation causes an asan/msan/tsan error
@@ -1451,6 +692,12 @@
'built-ins/SharedArrayBuffer/length-is-too-large-throws': [SKIP],
}], # asan == True or msan == True or tsan == True
+['system == android', {
+ # Android Arm64 failures
+ # https://bugs.chromium.org/p/v8/issues/detail?id=9845
+ 'intl402/DateTimeFormat/prototype/formatToParts/related-year': [FAIL],
+}], # system == android
+
##############################################################################
['variant == jitless', {
# https://crbug.com/v8/7777
diff --git a/deps/v8/test/test262/testcfg.py b/deps/v8/test/test262/testcfg.py
index 9aa91dfaef..d70e644d9b 100644
--- a/deps/v8/test/test262/testcfg.py
+++ b/deps/v8/test/test262/testcfg.py
@@ -44,26 +44,26 @@ from testrunner.outproc import test262
# TODO(littledan): move the flag mapping into the status file
FEATURE_FLAGS = {
- 'Intl.DateTimeFormat-datetimestyle': '--harmony-intl-datetime-style',
- 'Intl.DateTimeFormat-formatRange': '--harmony-intl-date-format-range',
- 'Intl.NumberFormat-unified': '--harmony-intl-numberformat-unified',
'Intl.Segmenter': '--harmony-intl-segmenter',
'Intl.DateTimeFormat-dayPeriod': '--harmony-intl-dateformat-day-period',
'Intl.DateTimeFormat-quarter': '--harmony-intl-dateformat-quarter',
'Intl.DateTimeFormat-fractionalSecondDigits': '--harmony-intl-dateformat-fractional-second-digits',
'Symbol.prototype.description': '--harmony-symbol-description',
'export-star-as-namespace-from-module': '--harmony-namespace-exports',
- 'BigInt': '--harmony-intl-bigint',
'Promise.allSettled': '--harmony-promise-all-settled',
'FinalizationGroup': '--harmony-weak-refs',
'WeakRef': '--harmony-weak-refs',
'host-gc-required': '--expose-gc-as=v8GC',
'optional-chaining': '--harmony-optional-chaining',
+ 'top-level-await': '--harmony-top-level-await',
+ 'regexp-match-indices': '--harmony-regexp-match-indices',
+ # https://github.com/tc39/test262/pull/2395
+ 'regexp-named-groups': '--harmony-regexp-match-indices',
+ 'class-methods-private': '--harmony-private-methods',
+ 'class-static-methods-private': '--harmony-private-methods',
}
-SKIPPED_FEATURES = set(['class-methods-private',
- 'class-static-methods-private',
- 'top-level-await'])
+SKIPPED_FEATURES = set([])
DATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
diff --git a/deps/v8/test/unittests/BUILD.gn b/deps/v8/test/unittests/BUILD.gn
index 7a379f77e8..4ae76e1543 100644
--- a/deps/v8/test/unittests/BUILD.gn
+++ b/deps/v8/test/unittests/BUILD.gn
@@ -188,7 +188,9 @@ v8_source_set("unittests_sources") {
"logging/counters-unittest.cc",
"numbers/bigint-unittest.cc",
"numbers/conversions-unittest.cc",
+ "objects/backing-store-unittest.cc",
"objects/object-unittest.cc",
+ "objects/osr-optimized-code-cache-unittest.cc",
"objects/value-serializer-unittest.cc",
"parser/ast-value-unittest.cc",
"parser/preparser-unittest.cc",
diff --git a/deps/v8/test/unittests/api/isolate-unittest.cc b/deps/v8/test/unittests/api/isolate-unittest.cc
index 8d1a5dd84f..cda251f775 100644
--- a/deps/v8/test/unittests/api/isolate-unittest.cc
+++ b/deps/v8/test/unittests/api/isolate-unittest.cc
@@ -9,7 +9,6 @@
#include "include/v8.h"
#include "src/base/macros.h"
#include "src/base/platform/semaphore.h"
-#include "src/base/template-utils.h"
#include "src/execution/execution.h"
#include "src/execution/isolate.h"
#include "src/init/v8.h"
@@ -62,7 +61,7 @@ TEST_F(IsolateTest, MemoryPressureNotificationBackground) {
base::Semaphore semaphore(0);
internal::V8::GetCurrentPlatform()->CallOnWorkerThread(
- base::make_unique<MemoryPressureTask>(isolate(), &semaphore));
+ std::make_unique<MemoryPressureTask>(isolate(), &semaphore));
semaphore.Wait();
diff --git a/deps/v8/test/unittests/base/template-utils-unittest.cc b/deps/v8/test/unittests/base/template-utils-unittest.cc
index 0819b3de8c..4b1f3b834b 100644
--- a/deps/v8/test/unittests/base/template-utils-unittest.cc
+++ b/deps/v8/test/unittests/base/template-utils-unittest.cc
@@ -136,7 +136,7 @@ TEST(TemplateUtilsTest, FoldMoveOnlyType) {
str->push_back(c);
return str;
};
- std::unique_ptr<std::string> str = base::make_unique<std::string>("foo");
+ std::unique_ptr<std::string> str = std::make_unique<std::string>("foo");
std::unique_ptr<std::string> folded =
base::fold(fn, std::move(str), 'b', 'a', 'r');
CHECK_NULL(str);
diff --git a/deps/v8/test/unittests/base/utils/random-number-generator-unittest.cc b/deps/v8/test/unittests/base/utils/random-number-generator-unittest.cc
index 420b236432..453d788a6e 100644
--- a/deps/v8/test/unittests/base/utils/random-number-generator-unittest.cc
+++ b/deps/v8/test/unittests/base/utils/random-number-generator-unittest.cc
@@ -37,11 +37,10 @@ static void CheckSlowSample(const std::vector<uint64_t>& sample, uint64_t max,
}
}
-static void TestNextSample(
- RandomNumberGenerator& rng, // NOLINT(runtime/references)
- uint64_t max, size_t size, bool slow = false) {
+static void TestNextSample(RandomNumberGenerator* rng, uint64_t max,
+ size_t size, bool slow = false) {
std::vector<uint64_t> sample =
- slow ? rng.NextSampleSlow(max, size) : rng.NextSample(max, size);
+ slow ? rng->NextSampleSlow(max, size) : rng->NextSample(max, size);
CheckSample(sample, max, size);
}
@@ -100,14 +99,14 @@ TEST_P(RandomNumberGeneratorTest, NextSample0) {
size_t m = 1;
RandomNumberGenerator rng(GetParam());
- TestNextSample(rng, m, 0);
+ TestNextSample(&rng, m, 0);
}
TEST_P(RandomNumberGeneratorTest, NextSampleSlow0) {
size_t m = 1;
RandomNumberGenerator rng(GetParam());
- TestNextSample(rng, m, 0, true);
+ TestNextSample(&rng, m, 0, true);
}
TEST_P(RandomNumberGeneratorTest, NextSample1) {
@@ -115,7 +114,7 @@ TEST_P(RandomNumberGeneratorTest, NextSample1) {
RandomNumberGenerator rng(GetParam());
for (int k = 0; k < kMaxRuns; ++k) {
- TestNextSample(rng, m, 1);
+ TestNextSample(&rng, m, 1);
}
}
@@ -124,7 +123,7 @@ TEST_P(RandomNumberGeneratorTest, NextSampleSlow1) {
RandomNumberGenerator rng(GetParam());
for (int k = 0; k < kMaxRuns; ++k) {
- TestNextSample(rng, m, 1, true);
+ TestNextSample(&rng, m, 1, true);
}
}
@@ -133,7 +132,7 @@ TEST_P(RandomNumberGeneratorTest, NextSampleMax) {
RandomNumberGenerator rng(GetParam());
for (int k = 0; k < kMaxRuns; ++k) {
- TestNextSample(rng, m, m);
+ TestNextSample(&rng, m, m);
}
}
@@ -142,7 +141,7 @@ TEST_P(RandomNumberGeneratorTest, NextSampleSlowMax) {
RandomNumberGenerator rng(GetParam());
for (int k = 0; k < kMaxRuns; ++k) {
- TestNextSample(rng, m, m, true);
+ TestNextSample(&rng, m, m, true);
}
}
@@ -152,7 +151,7 @@ TEST_P(RandomNumberGeneratorTest, NextSampleHalf) {
RandomNumberGenerator rng(GetParam());
for (int k = 0; k < kMaxRuns; ++k) {
- TestNextSample(rng, m, n);
+ TestNextSample(&rng, m, n);
}
}
@@ -162,7 +161,7 @@ TEST_P(RandomNumberGeneratorTest, NextSampleSlowHalf) {
RandomNumberGenerator rng(GetParam());
for (int k = 0; k < kMaxRuns; ++k) {
- TestNextSample(rng, m, n, true);
+ TestNextSample(&rng, m, n, true);
}
}
@@ -172,7 +171,7 @@ TEST_P(RandomNumberGeneratorTest, NextSampleMoreThanHalf) {
RandomNumberGenerator rng(GetParam());
for (int k = 0; k < kMaxRuns; ++k) {
- TestNextSample(rng, m, n);
+ TestNextSample(&rng, m, n);
}
}
@@ -182,7 +181,7 @@ TEST_P(RandomNumberGeneratorTest, NextSampleSlowMoreThanHalf) {
RandomNumberGenerator rng(GetParam());
for (int k = 0; k < kMaxRuns; ++k) {
- TestNextSample(rng, m, n, true);
+ TestNextSample(&rng, m, n, true);
}
}
@@ -192,7 +191,7 @@ TEST_P(RandomNumberGeneratorTest, NextSampleLessThanHalf) {
RandomNumberGenerator rng(GetParam());
for (int k = 0; k < kMaxRuns; ++k) {
- TestNextSample(rng, m, n);
+ TestNextSample(&rng, m, n);
}
}
@@ -202,7 +201,7 @@ TEST_P(RandomNumberGeneratorTest, NextSampleSlowLessThanHalf) {
RandomNumberGenerator rng(GetParam());
for (int k = 0; k < kMaxRuns; ++k) {
- TestNextSample(rng, m, n, true);
+ TestNextSample(&rng, m, n, true);
}
}
diff --git a/deps/v8/test/unittests/codegen/code-stub-assembler-unittest.cc b/deps/v8/test/unittests/codegen/code-stub-assembler-unittest.cc
index df387d3d94..7be9f75868 100644
--- a/deps/v8/test/unittests/codegen/code-stub-assembler-unittest.cc
+++ b/deps/v8/test/unittests/codegen/code-stub-assembler-unittest.cc
@@ -14,7 +14,6 @@
using ::testing::_;
using v8::internal::compiler::Node;
-using v8::internal::compiler::TNode;
namespace c = v8::internal::compiler;
diff --git a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
index 8b15811d36..954bdd5065 100644
--- a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
+++ b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
@@ -12,7 +12,6 @@
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
#include "src/base/platform/semaphore.h"
-#include "src/base/template-utils.h"
#include "src/codegen/compiler.h"
#include "src/flags/flags.h"
#include "src/handles/handles.h"
@@ -205,7 +204,7 @@ class MockPlatform : public v8::Platform {
tasks.swap(worker_tasks_);
}
platform->CallOnWorkerThread(
- base::make_unique<TaskWrapper>(this, std::move(tasks), true));
+ std::make_unique<TaskWrapper>(this, std::move(tasks), true));
sem_.Wait();
}
@@ -216,7 +215,7 @@ class MockPlatform : public v8::Platform {
tasks.swap(worker_tasks_);
}
platform->CallOnWorkerThread(
- base::make_unique<TaskWrapper>(this, std::move(tasks), false));
+ std::make_unique<TaskWrapper>(this, std::move(tasks), false));
}
void RunForegroundTasks() {
diff --git a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
index b969d9a278..d7a3a92c96 100644
--- a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
@@ -41,16 +41,15 @@ std::ostream& operator<<(std::ostream& os, const Shift& shift) {
// Helper to build Int32Constant or Int64Constant depending on the given
// machine type.
-Node* BuildConstant(
- InstructionSelectorTest::StreamBuilder& m, // NOLINT(runtime/references)
- MachineType type, int64_t value) {
+Node* BuildConstant(InstructionSelectorTest::StreamBuilder* m, MachineType type,
+ int64_t value) {
switch (type.representation()) {
case MachineRepresentation::kWord32:
- return m.Int32Constant(static_cast<int32_t>(value));
+ return m->Int32Constant(static_cast<int32_t>(value));
break;
case MachineRepresentation::kWord64:
- return m.Int64Constant(value);
+ return m->Int64Constant(value);
break;
default:
@@ -373,8 +372,6 @@ const MachInst2 kCanElideChangeUint32ToUint64[] = {
MachineType::Uint32()},
};
-} // namespace
-
// -----------------------------------------------------------------------------
// Logical instructions.
@@ -464,9 +461,8 @@ TEST_P(InstructionSelectorLogicalTest, ShiftByImmediate) {
TRACED_FORRANGE(int, imm, 0, ((type == MachineType::Int32()) ? 31 : 63)) {
StreamBuilder m(this, type, type, type);
m.Return((m.*dpi.constructor)(
- m.Parameter(0),
- (m.*shift.mi.constructor)(m.Parameter(1),
- BuildConstant(m, type, imm))));
+ m.Parameter(0), (m.*shift.mi.constructor)(
+ m.Parameter(1), BuildConstant(&m, type, imm))));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
@@ -480,7 +476,7 @@ TEST_P(InstructionSelectorLogicalTest, ShiftByImmediate) {
StreamBuilder m(this, type, type, type);
m.Return((m.*dpi.constructor)(
(m.*shift.mi.constructor)(m.Parameter(1),
- BuildConstant(m, type, imm)),
+ BuildConstant(&m, type, imm)),
m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -521,7 +517,7 @@ TEST_P(InstructionSelectorAddSubTest, ImmediateOnRight) {
TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
StreamBuilder m(this, type, type);
m.Return(
- (m.*dpi.mi.constructor)(m.Parameter(0), BuildConstant(m, type, imm)));
+ (m.*dpi.mi.constructor)(m.Parameter(0), BuildConstant(&m, type, imm)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(dpi.mi.arch_opcode, s[0]->arch_opcode());
@@ -540,7 +536,7 @@ TEST_P(InstructionSelectorAddSubTest, NegImmediateOnRight) {
if (imm == 0) continue;
StreamBuilder m(this, type, type);
m.Return(
- (m.*dpi.mi.constructor)(m.Parameter(0), BuildConstant(m, type, -imm)));
+ (m.*dpi.mi.constructor)(m.Parameter(0), BuildConstant(&m, type, -imm)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(dpi.negate_arch_opcode, s[0]->arch_opcode());
@@ -568,9 +564,8 @@ TEST_P(InstructionSelectorAddSubTest, ShiftByImmediateOnRight) {
TRACED_FORRANGE(int, imm, 0, ((type == MachineType::Int32()) ? 31 : 63)) {
StreamBuilder m(this, type, type, type);
m.Return((m.*dpi.mi.constructor)(
- m.Parameter(0),
- (m.*shift.mi.constructor)(m.Parameter(1),
- BuildConstant(m, type, imm))));
+ m.Parameter(0), (m.*shift.mi.constructor)(
+ m.Parameter(1), BuildConstant(&m, type, imm))));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(dpi.mi.arch_opcode, s[0]->arch_opcode());
@@ -1156,7 +1151,7 @@ TEST_F(InstructionSelectorTest, AddBranchWithImmediateOnLeft) {
struct TestAndBranch {
MachInst<std::function<Node*(InstructionSelectorTest::StreamBuilder&, Node*,
- uint32_t mask)>>
+ uint64_t mask)>>
mi;
FlagsCondition cond;
};
@@ -1275,6 +1270,92 @@ INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
InstructionSelectorTestAndBranchTest,
::testing::ValuesIn(kTestAndBranchMatchers32));
+// TODO(arm64): Add the missing Word32BinaryNot test cases from the 32-bit
+// version.
+const TestAndBranch kTestAndBranchMatchers64[] = {
+ // Branch on the result of Word64And directly.
+ {{[](InstructionSelectorTest::StreamBuilder& m, Node* x, uint64_t mask)
+ -> Node* { return m.Word64And(x, m.Int64Constant(mask)); },
+ "if (x and mask)", kArm64TestAndBranch, MachineType::Int64()},
+ kNotEqual},
+ {{[](InstructionSelectorTest::StreamBuilder& m, Node* x,
+ uint64_t mask) -> Node* {
+ return m.Word64Equal(m.Word64And(x, m.Int64Constant(mask)),
+ m.Int64Constant(0));
+ },
+ "if not (x and mask)", kArm64TestAndBranch, MachineType::Int64()},
+ kEqual},
+ {{[](InstructionSelectorTest::StreamBuilder& m, Node* x, uint64_t mask)
+ -> Node* { return m.Word64And(m.Int64Constant(mask), x); },
+ "if (mask and x)", kArm64TestAndBranch, MachineType::Int64()},
+ kNotEqual},
+ {{[](InstructionSelectorTest::StreamBuilder& m, Node* x,
+ uint64_t mask) -> Node* {
+ return m.Word64Equal(m.Word64And(m.Int64Constant(mask), x),
+ m.Int64Constant(0));
+ },
+ "if not (mask and x)", kArm64TestAndBranch, MachineType::Int64()},
+ kEqual},
+ // Branch on the result of '(x and mask) == mask'. This tests that a bit is
+ // set rather than cleared which is why conditions are inverted.
+ {{[](InstructionSelectorTest::StreamBuilder& m, Node* x,
+ uint64_t mask) -> Node* {
+ return m.Word64Equal(m.Word64And(x, m.Int64Constant(mask)),
+ m.Int64Constant(mask));
+ },
+ "if ((x and mask) == mask)", kArm64TestAndBranch, MachineType::Int64()},
+ kNotEqual},
+ {{[](InstructionSelectorTest::StreamBuilder& m, Node* x,
+ uint64_t mask) -> Node* {
+ return m.Word64Equal(m.Int64Constant(mask),
+ m.Word64And(x, m.Int64Constant(mask)));
+ },
+ "if (mask == (x and mask))", kArm64TestAndBranch, MachineType::Int64()},
+ kNotEqual},
+ // Same as above but swap 'mask' and 'x'.
+ {{[](InstructionSelectorTest::StreamBuilder& m, Node* x,
+ uint64_t mask) -> Node* {
+ return m.Word64Equal(m.Word64And(m.Int64Constant(mask), x),
+ m.Int64Constant(mask));
+ },
+ "if ((mask and x) == mask)", kArm64TestAndBranch, MachineType::Int64()},
+ kNotEqual},
+ {{[](InstructionSelectorTest::StreamBuilder& m, Node* x,
+ uint64_t mask) -> Node* {
+ return m.Word64Equal(m.Int64Constant(mask),
+ m.Word64And(m.Int64Constant(mask), x));
+ },
+ "if (mask == (mask and x))", kArm64TestAndBranch, MachineType::Int64()},
+ kNotEqual}};
+
+using InstructionSelectorTestAndBranchTest64 =
+ InstructionSelectorTestWithParam<TestAndBranch>;
+
+TEST_P(InstructionSelectorTestAndBranchTest64, TestAndBranch64) {
+ const TestAndBranch inst = GetParam();
+ TRACED_FORRANGE(int, bit, 0, 63) {
+ uint64_t mask = uint64_t{1} << bit;
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ RawMachineLabel a, b;
+ m.Branch(inst.mi.constructor(m, m.Parameter(0), mask), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int64Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int64Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(inst.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(inst.cond, s[0]->flags_condition());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(bit, s.ToInt64(s[0]->InputAt(1)));
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorTestAndBranchTest64,
+ ::testing::ValuesIn(kTestAndBranchMatchers64));
+
TEST_F(InstructionSelectorTest, Word64AndBranchWithOneBitMaskOnRight) {
TRACED_FORRANGE(int, bit, 0, 63) {
uint64_t mask = uint64_t{1} << bit;
@@ -2035,7 +2116,7 @@ TEST_P(InstructionSelectorIntDPWithIntMulTest, NegativeMul) {
{
StreamBuilder m(this, type, type, type);
Node* n =
- (m.*mdpi.sub_constructor)(BuildConstant(m, type, 0), m.Parameter(0));
+ (m.*mdpi.sub_constructor)(BuildConstant(&m, type, 0), m.Parameter(0));
m.Return((m.*mdpi.mul_constructor)(n, m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -2046,7 +2127,7 @@ TEST_P(InstructionSelectorIntDPWithIntMulTest, NegativeMul) {
{
StreamBuilder m(this, type, type, type);
Node* n =
- (m.*mdpi.sub_constructor)(BuildConstant(m, type, 0), m.Parameter(1));
+ (m.*mdpi.sub_constructor)(BuildConstant(&m, type, 0), m.Parameter(1));
m.Return((m.*mdpi.mul_constructor)(m.Parameter(0), n));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -2578,6 +2659,22 @@ TEST_F(InstructionSelectorTest, ChangeInt32ToInt64AfterLoad) {
}
}
+TEST_F(InstructionSelectorTest, ChangeInt32ToInt64WithWord32Sar) {
+ // Test the mod 32 behaviour of Word32Sar by iterating up to 33.
+ TRACED_FORRANGE(int32_t, imm, 0, 33) {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int32());
+ m.Return(m.ChangeInt32ToInt64(
+ m.Word32Sar(m.Parameter(0), m.Int32Constant(imm))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Sbfx, s[0]->arch_opcode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(imm & 0x1f, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(32 - (imm & 0x1f), s.ToInt32(s[0]->InputAt(2)));
+ }
+}
+
// -----------------------------------------------------------------------------
// Memory access instructions.
@@ -2938,7 +3035,8 @@ TEST_P(InstructionSelectorComparisonTest, WithImmediate) {
// Compare with 0 are turned into tst instruction.
if (imm == 0) continue;
StreamBuilder m(this, type, type);
- m.Return((m.*cmp.constructor)(m.Parameter(0), BuildConstant(m, type, imm)));
+ m.Return(
+ (m.*cmp.constructor)(m.Parameter(0), BuildConstant(&m, type, imm)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(cmp.arch_opcode, s[0]->arch_opcode());
@@ -2953,7 +3051,8 @@ TEST_P(InstructionSelectorComparisonTest, WithImmediate) {
// Compare with 0 are turned into tst instruction.
if (imm == 0) continue;
StreamBuilder m(this, type, type);
- m.Return((m.*cmp.constructor)(BuildConstant(m, type, imm), m.Parameter(0)));
+ m.Return(
+ (m.*cmp.constructor)(BuildConstant(&m, type, imm), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(cmp.arch_opcode, s[0]->arch_opcode());
@@ -3507,6 +3606,33 @@ const IntegerCmp kBinopCmpZeroRightInstructions[] = {
kNotEqual,
kNotEqual}};
+const IntegerCmp kBinop64CmpZeroRightInstructions[] = {
+ {{&RawMachineAssembler::Word64Equal, "Word64Equal", kArm64Cmp,
+ MachineType::Int64()},
+ kEqual,
+ kEqual},
+ {{&RawMachineAssembler::Word64NotEqual, "Word64NotEqual", kArm64Cmp,
+ MachineType::Int64()},
+ kNotEqual,
+ kNotEqual},
+ {{&RawMachineAssembler::Int64LessThan, "Int64LessThan", kArm64Cmp,
+ MachineType::Int64()},
+ kNegative,
+ kNegative},
+ {{&RawMachineAssembler::Int64GreaterThanOrEqual, "Int64GreaterThanOrEqual",
+ kArm64Cmp, MachineType::Int64()},
+ kPositiveOrZero,
+ kPositiveOrZero},
+ {{&RawMachineAssembler::Uint64LessThanOrEqual, "Uint64LessThanOrEqual",
+ kArm64Cmp, MachineType::Int64()},
+ kEqual,
+ kEqual},
+ {{&RawMachineAssembler::Uint64GreaterThan, "Uint64GreaterThan", kArm64Cmp,
+ MachineType::Int64()},
+ kNotEqual,
+ kNotEqual},
+};
+
const IntegerCmp kBinopCmpZeroLeftInstructions[] = {
{{&RawMachineAssembler::Word32Equal, "Word32Equal", kArm64Cmp32,
MachineType::Int32()},
@@ -4019,7 +4145,7 @@ TEST_F(InstructionSelectorTest, Word32AndWithImmediateWithWord32Shr) {
TRACED_FORRANGE(int32_t, shift, -32, 63) {
int32_t lsb = shift & 0x1F;
TRACED_FORRANGE(int32_t, width, 1, 31) {
- uint32_t msk = (1 << width) - 1;
+ uint32_t msk = (1u << width) - 1;
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32And(m.Word32Shr(m.Parameter(0), m.Int32Constant(shift)),
m.Int32Constant(msk)));
@@ -4035,7 +4161,7 @@ TEST_F(InstructionSelectorTest, Word32AndWithImmediateWithWord32Shr) {
TRACED_FORRANGE(int32_t, shift, -32, 63) {
int32_t lsb = shift & 0x1F;
TRACED_FORRANGE(int32_t, width, 1, 31) {
- uint32_t msk = (1 << width) - 1;
+ uint32_t msk = (1u << width) - 1;
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(
m.Word32And(m.Int32Constant(msk),
@@ -4282,7 +4408,7 @@ TEST_F(InstructionSelectorTest, Word32ShlWithWord32And) {
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const r =
- m.Word32Shl(m.Word32And(p0, m.Int32Constant((1 << (31 - shift)) - 1)),
+ m.Word32Shl(m.Word32And(p0, m.Int32Constant((1u << (31 - shift)) - 1)),
m.Int32Constant(shift + 1));
m.Return(r);
Stream s = m.Build();
@@ -4531,6 +4657,34 @@ TEST_F(InstructionSelectorTest, CompareAgainstZero32) {
}
}
+TEST_F(InstructionSelectorTest, CompareAgainstZero64) {
+ TRACED_FOREACH(IntegerCmp, cmp, kBinop64CmpZeroRightInstructions) {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ Node* const param = m.Parameter(0);
+ RawMachineLabel a, b;
+ m.Branch((m.*cmp.mi.constructor)(param, m.Int64Constant(0)), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int64Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int64Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(s.ToVreg(param), s.ToVreg(s[0]->InputAt(0)));
+ if (cmp.cond == kNegative || cmp.cond == kPositiveOrZero) {
+ EXPECT_EQ(kArm64TestAndBranch, s[0]->arch_opcode());
+ EXPECT_EQ(4U, s[0]->InputCount()); // The labels are also inputs.
+ EXPECT_EQ((cmp.cond == kNegative) ? kNotEqual : kEqual,
+ s[0]->flags_condition());
+ EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(63, s.ToInt32(s[0]->InputAt(1)));
+ } else {
+ EXPECT_EQ(kArm64CompareAndBranch, s[0]->arch_opcode());
+ EXPECT_EQ(3U, s[0]->InputCount()); // The labels are also inputs.
+ EXPECT_EQ(cmp.cond, s[0]->flags_condition());
+ }
+ }
+}
+
TEST_F(InstructionSelectorTest, CompareFloat64HighLessThanZero64) {
StreamBuilder m(this, MachineType::Int32(), MachineType::Float64());
Node* const param = m.Parameter(0);
@@ -4615,18 +4769,18 @@ namespace {
// Then checks that the correct number of kArm64Poke and kArm64PokePair were
// generated.
void TestPokePair(
- InstructionSelectorTest::StreamBuilder& m, // NOLINT(runtime/references)
+ InstructionSelectorTest::StreamBuilder* m, // NOLINT(runtime/references)
Zone* zone,
- MachineSignature::Builder& builder, // NOLINT(runtime/references)
+ MachineSignature::Builder* builder, // NOLINT(runtime/references)
Node* nodes[], int num_nodes, int expected_poke_pair, int expected_poke) {
auto call_descriptor =
InstructionSelectorTest::StreamBuilder::MakeSimpleCallDescriptor(
- zone, builder.Build());
+ zone, builder->Build());
- m.CallN(call_descriptor, num_nodes, nodes);
- m.Return(m.UndefinedConstant());
+ m->CallN(call_descriptor, num_nodes, nodes);
+ m->Return(m->UndefinedConstant());
- auto s = m.Build();
+ auto s = m->Build();
int num_poke_pair = 0;
int num_poke = 0;
for (size_t i = 0; i < s.size(); ++i) {
@@ -4664,7 +4818,7 @@ TEST_F(InstructionSelectorTest, PokePairPrepareArgumentsInt32) {
// EmitPrepareArguments.
const int expected_poke = 1 + 1;
- TestPokePair(m, zone(), builder, nodes, arraysize(nodes),
+ TestPokePair(&m, zone(), &builder, nodes, arraysize(nodes),
expected_poke_pair, expected_poke);
}
@@ -4684,7 +4838,7 @@ TEST_F(InstructionSelectorTest, PokePairPrepareArgumentsInt32) {
const int expected_poke_pair = 2;
const int expected_poke = 0;
- TestPokePair(m, zone(), builder, nodes, arraysize(nodes),
+ TestPokePair(&m, zone(), &builder, nodes, arraysize(nodes),
expected_poke_pair, expected_poke);
}
}
@@ -4705,8 +4859,8 @@ TEST_F(InstructionSelectorTest, PokePairPrepareArgumentsInt64) {
const int expected_poke_pair = 2;
const int expected_poke = 0;
- TestPokePair(m, zone(), builder, nodes, arraysize(nodes), expected_poke_pair,
- expected_poke);
+ TestPokePair(&m, zone(), &builder, nodes, arraysize(nodes),
+ expected_poke_pair, expected_poke);
}
TEST_F(InstructionSelectorTest, PokePairPrepareArgumentsFloat32) {
@@ -4725,8 +4879,8 @@ TEST_F(InstructionSelectorTest, PokePairPrepareArgumentsFloat32) {
const int expected_poke_pair = 2;
const int expected_poke = 0;
- TestPokePair(m, zone(), builder, nodes, arraysize(nodes), expected_poke_pair,
- expected_poke);
+ TestPokePair(&m, zone(), &builder, nodes, arraysize(nodes),
+ expected_poke_pair, expected_poke);
}
TEST_F(InstructionSelectorTest, PokePairPrepareArgumentsFloat64) {
@@ -4745,8 +4899,8 @@ TEST_F(InstructionSelectorTest, PokePairPrepareArgumentsFloat64) {
const int expected_poke_pair = 2;
const int expected_poke = 0;
- TestPokePair(m, zone(), builder, nodes, arraysize(nodes), expected_poke_pair,
- expected_poke);
+ TestPokePair(&m, zone(), &builder, nodes, arraysize(nodes),
+ expected_poke_pair, expected_poke);
}
TEST_F(InstructionSelectorTest, PokePairPrepareArgumentsIntFloatMixed) {
@@ -4766,7 +4920,7 @@ TEST_F(InstructionSelectorTest, PokePairPrepareArgumentsIntFloatMixed) {
const int expected_poke_pair = 0;
const int expected_poke = 4;
- TestPokePair(m, zone(), builder, nodes, arraysize(nodes),
+ TestPokePair(&m, zone(), &builder, nodes, arraysize(nodes),
expected_poke_pair, expected_poke);
}
@@ -4792,7 +4946,7 @@ TEST_F(InstructionSelectorTest, PokePairPrepareArgumentsIntFloatMixed) {
// EmitPrepareArguments.
const int expected_poke = 3 + 1;
- TestPokePair(m, zone(), builder, nodes, arraysize(nodes),
+ TestPokePair(&m, zone(), &builder, nodes, arraysize(nodes),
expected_poke_pair, expected_poke);
}
}
@@ -4811,10 +4965,11 @@ TEST_F(InstructionSelectorTest, PokePairPrepareArgumentsSimd128) {
const int expected_poke = 2;
// Using kArm64PokePair is not currently supported for Simd128.
- TestPokePair(m, zone(), builder, nodes, arraysize(nodes), expected_poke_pair,
- expected_poke);
+ TestPokePair(&m, zone(), &builder, nodes, arraysize(nodes),
+ expected_poke_pair, expected_poke);
}
+} // namespace
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.h b/deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.h
index 82a8b3019d..b75da308f9 100644
--- a/deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.h
+++ b/deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.h
@@ -47,7 +47,6 @@ class InstructionSequenceTest : public TestWithIsolateAndZone {
kFixedRegister,
kSlot,
kFixedSlot,
- kExplicit,
kImmediate,
kNone,
kConstant,
@@ -75,17 +74,6 @@ class InstructionSequenceTest : public TestWithIsolateAndZone {
static TestOperand Same() { return TestOperand(kSameAsFirst); }
- static TestOperand ExplicitReg(int index) {
- TestOperandType type = kExplicit;
- return TestOperand(type, index);
- }
-
- static TestOperand ExplicitFPReg(int index,
- MachineRepresentation rep = kFloat64) {
- TestOperandType type = kExplicit;
- return TestOperand(type, index, rep);
- }
-
static TestOperand Reg(VReg vreg, int index = kNoValue) {
TestOperandType type = (index == kNoValue) ? kRegister : kFixedRegister;
return TestOperand(type, vreg, index);
diff --git a/deps/v8/test/unittests/compiler/common-operator-unittest.cc b/deps/v8/test/unittests/compiler/common-operator-unittest.cc
index 19e7c6c55f..365b3ea05b 100644
--- a/deps/v8/test/unittests/compiler/common-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/common-operator-unittest.cc
@@ -62,8 +62,6 @@ class CommonSharedOperatorTest
: public TestWithZone,
public ::testing::WithParamInterface<SharedOperator> {};
-} // namespace
-
TEST_P(CommonSharedOperatorTest, InstancesAreGloballyShared) {
const SharedOperator& sop = GetParam();
@@ -387,6 +385,7 @@ TEST_F(CommonOperatorTest, Projection) {
}
}
+} // namespace
} // namespace common_operator_unittest
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc b/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
index 52769b09de..7e927ea078 100644
--- a/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
@@ -1015,7 +1015,7 @@ TEST_F(Int64LoweringTest, WasmBigIntSpecialCaseBigIntToI64) {
Operator::kNoProperties, // properties
StubCallMode::kCallCodeObject); // stub call mode
- auto lowering_special_case = base::make_unique<Int64LoweringSpecialCase>();
+ auto lowering_special_case = std::make_unique<Int64LoweringSpecialCase>();
lowering_special_case->bigint_to_i64_call_descriptor =
bigint_to_i64_call_descriptor;
lowering_special_case->bigint_to_i32_pair_call_descriptor =
@@ -1063,7 +1063,7 @@ TEST_F(Int64LoweringTest, WasmBigIntSpecialCaseI64ToBigInt) {
Operator::kNoProperties, // properties
StubCallMode::kCallCodeObject); // stub call mode
- auto lowering_special_case = base::make_unique<Int64LoweringSpecialCase>();
+ auto lowering_special_case = std::make_unique<Int64LoweringSpecialCase>();
lowering_special_case->i64_to_bigint_call_descriptor =
i64_to_bigint_call_descriptor;
lowering_special_case->i32_pair_to_bigint_call_descriptor =
diff --git a/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc b/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc
index 7c062698c4..10643ddc8b 100644
--- a/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc
@@ -11,6 +11,7 @@
#include "src/compiler/js-graph.h"
#include "src/compiler/simplified-operator.h"
#include "src/execution/isolate.h"
+#include "src/execution/protectors.h"
#include "src/heap/factory.h"
#include "src/objects/feedback-vector.h"
#include "test/unittests/compiler/graph-unittest.h"
@@ -175,12 +176,7 @@ TEST_F(JSCallReducerTest, PromiseConstructorBasic) {
context, frame_state, effect, control);
Reduction r = Reduce(construct);
-
- if (FLAG_experimental_inline_promise_constructor) {
- ASSERT_TRUE(r.Changed());
- } else {
- ASSERT_FALSE(r.Changed());
- }
+ ASSERT_TRUE(r.Changed());
}
// Exactly the same as PromiseConstructorBasic which expects a reduction,
@@ -198,7 +194,7 @@ TEST_F(JSCallReducerTest, PromiseConstructorWithHook) {
graph()->NewNode(javascript()->Construct(3), promise, executor, promise,
context, frame_state, effect, control);
- isolate()->InvalidatePromiseHookProtector();
+ Protectors::InvalidatePromiseHook(isolate());
Reduction r = Reduce(construct);
diff --git a/deps/v8/test/unittests/compiler/js-operator-unittest.cc b/deps/v8/test/unittests/compiler/js-operator-unittest.cc
index 082e81f27c..5a951b35ae 100644
--- a/deps/v8/test/unittests/compiler/js-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-operator-unittest.cc
@@ -54,9 +54,6 @@ std::ostream& operator<<(std::ostream& os, const SharedOperator& sop) {
return os << IrOpcode::Mnemonic(sop.opcode);
}
-} // namespace
-
-
class JSSharedOperatorTest
: public TestWithZone,
public ::testing::WithParamInterface<SharedOperator> {};
@@ -111,6 +108,7 @@ TEST_P(JSSharedOperatorTest, Properties) {
INSTANTIATE_TEST_SUITE_P(JSOperatorTest, JSSharedOperatorTest,
::testing::ValuesIn(kSharedOperators));
+} // namespace
} // namespace js_operator_unittest
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
index 0d85253847..eed74f6181 100644
--- a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
@@ -52,13 +52,6 @@ class JSTypedLoweringTest : public TypedGraphTest {
return reducer.Reduce(node);
}
- Handle<JSArrayBuffer> NewArrayBuffer(void* bytes, size_t byte_length) {
- Handle<JSArrayBuffer> buffer =
- factory()->NewJSArrayBuffer(SharedFlag::kNotShared);
- JSArrayBuffer::Setup(buffer, isolate(), true, bytes, byte_length);
- return buffer;
- }
-
JSOperatorBuilder* javascript() { return &javascript_; }
private:
diff --git a/deps/v8/test/unittests/compiler/regalloc/OWNERS b/deps/v8/test/unittests/compiler/regalloc/OWNERS
deleted file mode 100644
index bfde831c20..0000000000
--- a/deps/v8/test/unittests/compiler/regalloc/OWNERS
+++ /dev/null
@@ -1,4 +0,0 @@
-set noparent
-
-bmeurer@chromium.org
-jarin@chromium.org
diff --git a/deps/v8/test/unittests/compiler/regalloc/move-optimizer-unittest.cc b/deps/v8/test/unittests/compiler/regalloc/move-optimizer-unittest.cc
index e72afd5601..344ea3dfad 100644
--- a/deps/v8/test/unittests/compiler/regalloc/move-optimizer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/regalloc/move-optimizer-unittest.cc
@@ -83,11 +83,6 @@ class MoveOptimizerTest : public InstructionSequenceTest {
CHECK(0 <= op.value_ && op.value_ < GetNumRegs(rep));
return AllocatedOperand(LocationOperand::REGISTER, rep, op.value_);
}
- case kExplicit: {
- MachineRepresentation rep = GetCanonicalRep(op);
- CHECK(0 <= op.value_ && op.value_ < GetNumRegs(rep));
- return ExplicitOperand(LocationOperand::REGISTER, rep, op.value_);
- }
default:
break;
}
@@ -123,45 +118,6 @@ TEST_F(MoveOptimizerTest, RemovesRedundant) {
CHECK(Contains(move, FPReg(kF32_1, kFloat32), FPReg(kF32_2, kFloat32)));
}
-TEST_F(MoveOptimizerTest, RemovesRedundantExplicit) {
- int index1 = GetAllocatableCode(0);
- int index2 = GetAllocatableCode(1);
- int s128_1 = GetAllocatableCode(kS128_1, kSimd128);
- int s128_2 = GetAllocatableCode(kS128_2, kSimd128);
- int f64_1 = GetAllocatableCode(kF64_1, kFloat64);
- int f64_2 = GetAllocatableCode(kF64_2, kFloat64);
- int f32_1 = GetAllocatableCode(kF32_1, kFloat32);
- int f32_2 = GetAllocatableCode(kF32_2, kFloat32);
-
- StartBlock();
- auto first_instr = EmitNop();
- auto last_instr = EmitNop();
-
- AddMove(first_instr, Reg(index1), ExplicitReg(index2));
- AddMove(last_instr, Reg(index2), Reg(index1));
-
- AddMove(first_instr, FPReg(s128_1, kSimd128),
- ExplicitFPReg(s128_2, kSimd128));
- AddMove(last_instr, FPReg(s128_2, kSimd128), FPReg(s128_1, kSimd128));
- AddMove(first_instr, FPReg(f64_1, kFloat64), ExplicitFPReg(f64_2, kFloat64));
- AddMove(last_instr, FPReg(f64_2, kFloat64), FPReg(f64_1, kFloat64));
- AddMove(first_instr, FPReg(f32_1, kFloat32), ExplicitFPReg(f32_2, kFloat32));
- AddMove(last_instr, FPReg(f32_2, kFloat32), FPReg(f32_1, kFloat32));
-
- EndBlock(Last());
-
- Optimize();
-
- CHECK_EQ(0, NonRedundantSize(first_instr->parallel_moves()[0]));
- auto move = last_instr->parallel_moves()[0];
- CHECK_EQ(4, NonRedundantSize(move));
- CHECK(Contains(move, Reg(index1), ExplicitReg(index2)));
- CHECK(
- Contains(move, FPReg(s128_1, kSimd128), ExplicitFPReg(s128_2, kSimd128)));
- CHECK(Contains(move, FPReg(f64_1, kFloat64), ExplicitFPReg(f64_2, kFloat64)));
- CHECK(Contains(move, FPReg(f32_1, kFloat32), ExplicitFPReg(f32_2, kFloat32)));
-}
-
TEST_F(MoveOptimizerTest, SplitsConstants) {
StartBlock();
EndBlock(Last());
diff --git a/deps/v8/test/unittests/compiler/regalloc/register-allocator-unittest.cc b/deps/v8/test/unittests/compiler/regalloc/register-allocator-unittest.cc
index 262c51d31e..f06b004d49 100644
--- a/deps/v8/test/unittests/compiler/regalloc/register-allocator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/regalloc/register-allocator-unittest.cc
@@ -73,7 +73,6 @@ bool IsParallelMovePresent(int instr_index, Instruction::GapPosition gap_pos,
return found_match;
}
-} // namespace
class RegisterAllocatorTest : public InstructionSequenceTest {
public:
@@ -824,6 +823,7 @@ INSTANTIATE_TEST_SUITE_P(
::testing::Combine(::testing::ValuesIn(kParameterTypes),
::testing::Range(0, SlotConstraintTest::kMaxVariant)));
+} // namespace
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/heap/heap-unittest.cc b/deps/v8/test/unittests/heap/heap-unittest.cc
index 048ff5d0a6..76cab01a41 100644
--- a/deps/v8/test/unittests/heap/heap-unittest.cc
+++ b/deps/v8/test/unittests/heap/heap-unittest.cc
@@ -145,8 +145,7 @@ TEST_F(HeapWithPointerCompressionTest, HeapLayout) {
EXPECT_TRUE(IsAligned(isolate_root, size_t{4} * GB));
// Check that all memory chunks belong this region.
- base::AddressRegion heap_reservation(isolate_root - size_t{2} * GB,
- size_t{4} * GB);
+ base::AddressRegion heap_reservation(isolate_root, size_t{4} * GB);
OldGenerationMemoryChunkIterator iter(i_isolate()->heap());
for (;;) {
diff --git a/deps/v8/test/unittests/heap/slot-set-unittest.cc b/deps/v8/test/unittests/heap/slot-set-unittest.cc
index 54b60f55e8..fa635705b3 100644
--- a/deps/v8/test/unittests/heap/slot-set-unittest.cc
+++ b/deps/v8/test/unittests/heap/slot-set-unittest.cc
@@ -16,12 +16,11 @@ namespace internal {
TEST(SlotSet, InsertAndLookup1) {
SlotSet set;
- set.SetPageStart(0);
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
EXPECT_FALSE(set.Lookup(i));
}
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
- set.Insert(i);
+ set.Insert<AccessMode::ATOMIC>(i);
}
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
EXPECT_TRUE(set.Lookup(i));
@@ -30,10 +29,9 @@ TEST(SlotSet, InsertAndLookup1) {
TEST(SlotSet, InsertAndLookup2) {
SlotSet set;
- set.SetPageStart(0);
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
if (i % 7 == 0) {
- set.Insert(i);
+ set.Insert<AccessMode::ATOMIC>(i);
}
}
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
@@ -47,14 +45,14 @@ TEST(SlotSet, InsertAndLookup2) {
TEST(SlotSet, Iterate) {
SlotSet set;
- set.SetPageStart(0);
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
if (i % 7 == 0) {
- set.Insert(i);
+ set.Insert<AccessMode::ATOMIC>(i);
}
}
set.Iterate(
+ kNullAddress,
[](MaybeObjectSlot slot) {
if (slot.address() % 3 == 0) {
return KEEP_SLOT;
@@ -75,10 +73,9 @@ TEST(SlotSet, Iterate) {
TEST(SlotSet, Remove) {
SlotSet set;
- set.SetPageStart(0);
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
if (i % 7 == 0) {
- set.Insert(i);
+ set.Insert<AccessMode::ATOMIC>(i);
}
}
@@ -99,13 +96,12 @@ TEST(SlotSet, Remove) {
void CheckRemoveRangeOn(uint32_t start, uint32_t end) {
SlotSet set;
- set.SetPageStart(0);
uint32_t first = start == 0 ? 0 : start - kTaggedSize;
uint32_t last = end == Page::kPageSize ? end - kTaggedSize : end;
for (const auto mode :
{SlotSet::FREE_EMPTY_BUCKETS, SlotSet::KEEP_EMPTY_BUCKETS}) {
for (uint32_t i = first; i <= last; i += kTaggedSize) {
- set.Insert(i);
+ set.Insert<AccessMode::ATOMIC>(i);
}
set.RemoveRange(start, end, mode);
if (first != start) {
@@ -137,10 +133,9 @@ TEST(SlotSet, RemoveRange) {
}
}
SlotSet set;
- set.SetPageStart(0);
for (const auto mode :
{SlotSet::FREE_EMPTY_BUCKETS, SlotSet::KEEP_EMPTY_BUCKETS}) {
- set.Insert(Page::kPageSize / 2);
+ set.Insert<AccessMode::ATOMIC>(Page::kPageSize / 2);
set.RemoveRange(0, Page::kPageSize, mode);
for (uint32_t i = 0; i < Page::kPageSize; i += kTaggedSize) {
EXPECT_FALSE(set.Lookup(i));
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
index a9c631f8d2..667bfee64b 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
@@ -96,6 +96,7 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
FeedbackSlot sloppy_store_global_slot =
feedback_spec.AddStoreGlobalICSlot(LanguageMode::kSloppy);
FeedbackSlot load_slot = feedback_spec.AddLoadICSlot();
+ FeedbackSlot call_slot = feedback_spec.AddCallICSlot();
FeedbackSlot keyed_load_slot = feedback_spec.AddKeyedLoadICSlot();
FeedbackSlot sloppy_store_slot =
feedback_spec.AddStoreICSlot(LanguageMode::kSloppy);
@@ -152,7 +153,7 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.StoreInArrayLiteral(reg, reg, store_array_element_slot.ToInt());
// Emit Iterator-protocol operations
- builder.GetIterator(reg, load_slot.ToInt());
+ builder.GetIterator(reg, load_slot.ToInt(), call_slot.ToInt());
// Emit load / store lookup slots.
builder.LoadLookupSlot(name, TypeofMode::NOT_INSIDE_TYPEOF)
diff --git a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
index a8ff998107..09d21e3095 100644
--- a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
@@ -16,7 +16,6 @@
using ::testing::_;
using ::testing::Eq;
using v8::internal::compiler::Node;
-using v8::internal::compiler::TNode;
namespace c = v8::internal::compiler;
@@ -310,44 +309,6 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsLoadRegisterOperand(
LoadSensitivity::kCritical));
}
-TARGET_TEST_F(InterpreterAssemblerTest, Jump) {
- // If debug code is enabled we emit extra code in Jump.
- if (FLAG_debug_code) return;
-
- int jump_offsets[] = {-9710, -77, 0, +3, +97109};
- TRACED_FOREACH(int, jump_offset, jump_offsets) {
- TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
- if (!interpreter::Bytecodes::IsJump(bytecode)) return;
-
- InterpreterAssemblerTestState state(this, bytecode);
- InterpreterAssemblerForTest m(&state, bytecode);
- Node* tail_call_node = m.Jump(m.IntPtrConstant(jump_offset));
-
- Matcher<Node*> next_bytecode_offset_matcher = c::IsIntPtrAdd(
- c::IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
- c::IsIntPtrConstant(jump_offset));
- Matcher<Node*> target_bytecode_matcher =
- m.IsLoad(MachineType::Uint8(), _, next_bytecode_offset_matcher);
- target_bytecode_matcher =
- c::IsChangeUint32ToWord(target_bytecode_matcher);
- Matcher<Node*> code_target_matcher = m.IsLoad(
- MachineType::Pointer(),
- c::IsParameter(InterpreterDispatchDescriptor::kDispatchTable),
- c::IsWordShl(target_bytecode_matcher,
- c::IsIntPtrConstant(kSystemPointerSizeLog2)));
-
- EXPECT_THAT(
- tail_call_node,
- c::IsTailCall(
- _, code_target_matcher,
- c::IsParameter(InterpreterDispatchDescriptor::kAccumulator),
- next_bytecode_offset_matcher, _,
- c::IsParameter(InterpreterDispatchDescriptor::kDispatchTable), _,
- _));
- }
- }
-}
-
TARGET_TEST_F(InterpreterAssemblerTest, BytecodeOperand) {
static const OperandScale kOperandScales[] = {
OperandScale::kSingle, OperandScale::kDouble, OperandScale::kQuadruple};
@@ -444,67 +405,70 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) {
InterpreterAssemblerForTest m(&state, bytecode);
{
TNode<IntPtrT> index = m.IntPtrConstant(2);
- Node* load_constant = m.LoadConstantPoolEntry(index);
-#ifdef V8_COMPRESS_POINTERS
- Matcher<Node*> constant_pool_matcher =
- IsChangeCompressedToTagged(m.IsLoadFromObject(
- MachineType::AnyCompressed(),
- c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
- c::IsIntPtrConstant(BytecodeArray::kConstantPoolOffset -
- kHeapObjectTag)));
- EXPECT_THAT(load_constant,
- IsChangeCompressedToTagged(m.IsLoad(
- MachineType::AnyCompressed(), constant_pool_matcher,
- c::IsIntPtrConstant(FixedArray::OffsetOfElementAt(2) -
- kHeapObjectTag),
- LoadSensitivity::kCritical)));
-#else
- Matcher<Node*> constant_pool_matcher = m.IsLoadFromObject(
- MachineType::AnyTagged(),
- c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
- c::IsIntPtrConstant(BytecodeArray::kConstantPoolOffset -
- kHeapObjectTag));
- EXPECT_THAT(
- load_constant,
- m.IsLoad(MachineType::AnyTagged(), constant_pool_matcher,
- c::IsIntPtrConstant(FixedArray::OffsetOfElementAt(2) -
- kHeapObjectTag),
- LoadSensitivity::kCritical));
-#endif
+ TNode<Object> load_constant = m.LoadConstantPoolEntry(index);
+ if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) {
+ Matcher<Node*> constant_pool_matcher =
+ IsChangeCompressedToTagged(m.IsLoadFromObject(
+ MachineType::AnyCompressed(),
+ c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
+ c::IsIntPtrConstant(BytecodeArray::kConstantPoolOffset -
+ kHeapObjectTag)));
+ EXPECT_THAT(load_constant,
+ IsChangeCompressedToTagged(m.IsLoad(
+ MachineType::AnyCompressed(), constant_pool_matcher,
+ c::IsIntPtrConstant(FixedArray::OffsetOfElementAt(2) -
+ kHeapObjectTag),
+ LoadSensitivity::kCritical)));
+ } else {
+ Matcher<Node*> constant_pool_matcher = m.IsLoadFromObject(
+ MachineType::AnyTagged(),
+ c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
+ c::IsIntPtrConstant(BytecodeArray::kConstantPoolOffset -
+ kHeapObjectTag));
+ EXPECT_THAT(
+ load_constant,
+ m.IsLoad(MachineType::AnyTagged(), constant_pool_matcher,
+ c::IsIntPtrConstant(FixedArray::OffsetOfElementAt(2) -
+ kHeapObjectTag),
+ LoadSensitivity::kCritical));
+ }
}
{
Node* index = m.Parameter(2);
- Node* load_constant = m.LoadConstantPoolEntry(index);
-#if V8_COMPRESS_POINTERS
- Matcher<Node*> constant_pool_matcher =
- IsChangeCompressedToTagged(m.IsLoadFromObject(
- MachineType::AnyCompressed(),
- c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
- c::IsIntPtrConstant(BytecodeArray::kConstantPoolOffset -
- kHeapObjectTag)));
- EXPECT_THAT(
- load_constant,
- IsChangeCompressedToTagged(m.IsLoad(
- MachineType::AnyCompressed(), constant_pool_matcher,
- c::IsIntPtrAdd(
- c::IsIntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
- c::IsWordShl(index, c::IsIntPtrConstant(kTaggedSizeLog2))),
- LoadSensitivity::kCritical)));
-#else
- Matcher<Node*> constant_pool_matcher = m.IsLoadFromObject(
- MachineType::AnyTagged(),
- c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
- c::IsIntPtrConstant(BytecodeArray::kConstantPoolOffset -
- kHeapObjectTag));
- EXPECT_THAT(
- load_constant,
- m.IsLoad(
- MachineType::AnyTagged(), constant_pool_matcher,
- c::IsIntPtrAdd(
- c::IsIntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
- c::IsWordShl(index, c::IsIntPtrConstant(kTaggedSizeLog2))),
- LoadSensitivity::kCritical));
-#endif
+ TNode<Object> load_constant =
+ m.LoadConstantPoolEntry(m.ReinterpretCast<IntPtrT>(index));
+ if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) {
+ Matcher<Node*> constant_pool_matcher =
+ IsChangeCompressedToTagged(m.IsLoadFromObject(
+ MachineType::AnyCompressed(),
+ c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
+ c::IsIntPtrConstant(BytecodeArray::kConstantPoolOffset -
+ kHeapObjectTag)));
+ EXPECT_THAT(
+ load_constant,
+ IsChangeCompressedToTagged(m.IsLoad(
+ MachineType::AnyCompressed(), constant_pool_matcher,
+ c::IsIntPtrAdd(
+ c::IsIntPtrConstant(FixedArray::kHeaderSize -
+ kHeapObjectTag),
+ c::IsWordShl(index, c::IsIntPtrConstant(kTaggedSizeLog2))),
+ LoadSensitivity::kCritical)));
+ } else {
+ Matcher<Node*> constant_pool_matcher = m.IsLoadFromObject(
+ MachineType::AnyTagged(),
+ c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
+ c::IsIntPtrConstant(BytecodeArray::kConstantPoolOffset -
+ kHeapObjectTag));
+ EXPECT_THAT(
+ load_constant,
+ m.IsLoad(
+ MachineType::AnyTagged(), constant_pool_matcher,
+ c::IsIntPtrAdd(
+ c::IsIntPtrConstant(FixedArray::kHeaderSize -
+ kHeapObjectTag),
+ c::IsWordShl(index, c::IsIntPtrConstant(kTaggedSizeLog2))),
+ LoadSensitivity::kCritical));
+ }
}
}
}
@@ -517,15 +481,17 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadObjectField) {
m.ReinterpretCast<HeapObject>(m.IntPtrConstant(0xDEADBEEF));
int offset = 16;
TNode<Object> load_field = m.LoadObjectField(object, offset);
-#ifdef V8_COMPRESS_POINTERS
- EXPECT_THAT(load_field, IsChangeCompressedToTagged(m.IsLoadFromObject(
- MachineType::AnyCompressed(), Eq(object),
- c::IsIntPtrConstant(offset - kHeapObjectTag))));
-#else
- EXPECT_THAT(load_field, m.IsLoadFromObject(
- MachineType::AnyTagged(), Eq(object),
- c::IsIntPtrConstant(offset - kHeapObjectTag)));
-#endif
+ if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) {
+ EXPECT_THAT(load_field,
+ IsChangeCompressedToTagged(m.IsLoadFromObject(
+ MachineType::AnyCompressed(), Eq(object),
+ c::IsIntPtrConstant(offset - kHeapObjectTag))));
+ } else {
+ EXPECT_THAT(
+ load_field,
+ m.IsLoadFromObject(MachineType::AnyTagged(), Eq(object),
+ c::IsIntPtrConstant(offset - kHeapObjectTag)));
+ }
}
}
@@ -554,10 +520,10 @@ TARGET_TEST_F(InterpreterAssemblerTest, CallRuntime) {
Callable builtin =
CodeFactory::InterpreterCEntry(isolate(), result_size);
- TNode<Int32T> function_id = m.Int32Constant(0);
+ TNode<Uint32T> function_id = m.Uint32Constant(0);
InterpreterAssembler::RegListNodePair registers(m.IntPtrConstant(1),
m.Int32Constant(2));
- TNode<Object> context = m.ReinterpretCast<Object>(m.Int32Constant(4));
+ TNode<Context> context = m.ReinterpretCast<Context>(m.Int32Constant(4));
Matcher<Node*> function_table = c::IsExternalConstant(
ExternalReference::runtime_function_table_address_for_unittests(
@@ -607,25 +573,28 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadFeedbackVector) {
m.IsLoad(MachineType::Pointer(), c::IsLoadParentFramePointer(),
c::IsIntPtrConstant(Register::function_closure().ToOperand() *
kSystemPointerSize)));
-#ifdef V8_COMPRESS_POINTERS
- Matcher<Node*> load_vector_cell_matcher =
- IsChangeCompressedPointerToTaggedPointer(m.IsLoadFromObject(
- MachineType::CompressedPointer(), load_function_matcher,
- c::IsIntPtrConstant(JSFunction::kFeedbackCellOffset -
- kHeapObjectTag)));
- EXPECT_THAT(load_feedback_vector,
- IsChangeCompressedPointerToTaggedPointer(m.IsLoadFromObject(
- MachineType::CompressedPointer(), load_vector_cell_matcher,
- c::IsIntPtrConstant(Cell::kValueOffset - kHeapObjectTag))));
-#else
- Matcher<Node*> load_vector_cell_matcher = m.IsLoadFromObject(
- MachineType::TaggedPointer(), load_function_matcher,
- c::IsIntPtrConstant(JSFunction::kFeedbackCellOffset - kHeapObjectTag));
- EXPECT_THAT(load_feedback_vector,
- m.IsLoadFromObject(
- MachineType::TaggedPointer(), load_vector_cell_matcher,
- c::IsIntPtrConstant(Cell::kValueOffset - kHeapObjectTag)));
-#endif
+ if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) {
+ Matcher<Node*> load_vector_cell_matcher =
+ IsChangeCompressedPointerToTaggedPointer(m.IsLoadFromObject(
+ MachineType::CompressedPointer(), load_function_matcher,
+ c::IsIntPtrConstant(JSFunction::kFeedbackCellOffset -
+ kHeapObjectTag)));
+ EXPECT_THAT(
+ load_feedback_vector,
+ IsChangeCompressedPointerToTaggedPointer(m.IsLoadFromObject(
+ MachineType::CompressedPointer(), load_vector_cell_matcher,
+ c::IsIntPtrConstant(Cell::kValueOffset - kHeapObjectTag))));
+ } else {
+ Matcher<Node*> load_vector_cell_matcher = m.IsLoadFromObject(
+ MachineType::TaggedPointer(), load_function_matcher,
+ c::IsIntPtrConstant(JSFunction::kFeedbackCellOffset -
+ kHeapObjectTag));
+ EXPECT_THAT(
+ load_feedback_vector,
+ m.IsLoadFromObject(
+ MachineType::TaggedPointer(), load_vector_cell_matcher,
+ c::IsIntPtrConstant(Cell::kValueOffset - kHeapObjectTag)));
+ }
}
}
diff --git a/deps/v8/test/unittests/libplatform/default-worker-threads-task-runner-unittest.cc b/deps/v8/test/unittests/libplatform/default-worker-threads-task-runner-unittest.cc
index 8d52e80e39..e9581cc80e 100644
--- a/deps/v8/test/unittests/libplatform/default-worker-threads-task-runner-unittest.cc
+++ b/deps/v8/test/unittests/libplatform/default-worker-threads-task-runner-unittest.cc
@@ -37,10 +37,10 @@ TEST(DefaultWorkerThreadsTaskRunnerUnittest, PostTaskOrder) {
base::Semaphore semaphore(0);
std::unique_ptr<TestTask> task1 =
- base::make_unique<TestTask>([&] { order.push_back(1); });
+ std::make_unique<TestTask>([&] { order.push_back(1); });
std::unique_ptr<TestTask> task2 =
- base::make_unique<TestTask>([&] { order.push_back(2); });
- std::unique_ptr<TestTask> task3 = base::make_unique<TestTask>([&] {
+ std::make_unique<TestTask>([&] { order.push_back(2); });
+ std::unique_ptr<TestTask> task3 = std::make_unique<TestTask>([&] {
order.push_back(3);
semaphore.Signal();
});
@@ -65,27 +65,27 @@ TEST(DefaultWorkerThreadsTaskRunnerUnittest, PostTaskOrderMultipleWorkers) {
std::vector<int> order;
std::atomic_int count{0};
- std::unique_ptr<TestTask> task1 = base::make_unique<TestTask>([&] {
+ std::unique_ptr<TestTask> task1 = std::make_unique<TestTask>([&] {
base::MutexGuard guard(&vector_lock);
order.push_back(1);
count++;
});
- std::unique_ptr<TestTask> task2 = base::make_unique<TestTask>([&] {
+ std::unique_ptr<TestTask> task2 = std::make_unique<TestTask>([&] {
base::MutexGuard guard(&vector_lock);
order.push_back(2);
count++;
});
- std::unique_ptr<TestTask> task3 = base::make_unique<TestTask>([&] {
+ std::unique_ptr<TestTask> task3 = std::make_unique<TestTask>([&] {
base::MutexGuard guard(&vector_lock);
order.push_back(3);
count++;
});
- std::unique_ptr<TestTask> task4 = base::make_unique<TestTask>([&] {
+ std::unique_ptr<TestTask> task4 = std::make_unique<TestTask>([&] {
base::MutexGuard guard(&vector_lock);
order.push_back(4);
count++;
});
- std::unique_ptr<TestTask> task5 = base::make_unique<TestTask>([&] {
+ std::unique_ptr<TestTask> task5 = std::make_unique<TestTask>([&] {
base::MutexGuard guard(&vector_lock);
order.push_back(5);
count++;
@@ -123,7 +123,7 @@ class FakeClock {
// PostTask will cause the condition variable WaitFor() call to be notified
// early, rather than waiting for the real amount of time. WaitFor() listens
// to the system clock and not our FakeClock.
- runner->PostTask(base::make_unique<TestTask>([] {}));
+ runner->PostTask(std::make_unique<TestTask>([] {}));
}
private:
@@ -140,13 +140,13 @@ TEST(DefaultWorkerThreadsTaskRunnerUnittest, PostDelayedTaskOrder) {
base::Semaphore task1_semaphore(0);
base::Semaphore task3_semaphore(0);
- std::unique_ptr<TestTask> task1 = base::make_unique<TestTask>([&] {
+ std::unique_ptr<TestTask> task1 = std::make_unique<TestTask>([&] {
order.push_back(1);
task1_semaphore.Signal();
});
std::unique_ptr<TestTask> task2 =
- base::make_unique<TestTask>([&] { order.push_back(2); });
- std::unique_ptr<TestTask> task3 = base::make_unique<TestTask>([&] {
+ std::make_unique<TestTask>([&] { order.push_back(2); });
+ std::unique_ptr<TestTask> task3 = std::make_unique<TestTask>([&] {
order.push_back(3);
task3_semaphore.Signal();
});
@@ -181,15 +181,15 @@ TEST(DefaultWorkerThreadsTaskRunnerUnittest, PostDelayedTaskOrder2) {
base::Semaphore task2_semaphore(0);
base::Semaphore task3_semaphore(0);
- std::unique_ptr<TestTask> task1 = base::make_unique<TestTask>([&] {
+ std::unique_ptr<TestTask> task1 = std::make_unique<TestTask>([&] {
order.push_back(1);
task1_semaphore.Signal();
});
- std::unique_ptr<TestTask> task2 = base::make_unique<TestTask>([&] {
+ std::unique_ptr<TestTask> task2 = std::make_unique<TestTask>([&] {
order.push_back(2);
task2_semaphore.Signal();
});
- std::unique_ptr<TestTask> task3 = base::make_unique<TestTask>([&] {
+ std::unique_ptr<TestTask> task3 = std::make_unique<TestTask>([&] {
order.push_back(3);
task3_semaphore.Signal();
});
@@ -230,15 +230,15 @@ TEST(DefaultWorkerThreadsTaskRunnerUnittest, PostAfterTerminate) {
base::Semaphore task2_semaphore(0);
base::Semaphore task3_semaphore(0);
- std::unique_ptr<TestTask> task1 = base::make_unique<TestTask>([&] {
+ std::unique_ptr<TestTask> task1 = std::make_unique<TestTask>([&] {
order.push_back(1);
task1_semaphore.Signal();
});
- std::unique_ptr<TestTask> task2 = base::make_unique<TestTask>([&] {
+ std::unique_ptr<TestTask> task2 = std::make_unique<TestTask>([&] {
order.push_back(2);
task2_semaphore.Signal();
});
- std::unique_ptr<TestTask> task3 = base::make_unique<TestTask>([&] {
+ std::unique_ptr<TestTask> task3 = std::make_unique<TestTask>([&] {
order.push_back(3);
task3_semaphore.Signal();
});
@@ -281,7 +281,7 @@ TEST(DefaultWorkerThreadsTaskRunnerUnittest, RunsTasksOnCurrentThread) {
EXPECT_FALSE(runner.RunsTasksOnCurrentThread());
- std::unique_ptr<TestTask> task1 = base::make_unique<TestTask>([&] {
+ std::unique_ptr<TestTask> task1 = std::make_unique<TestTask>([&] {
EXPECT_TRUE(runner.RunsTasksOnCurrentThread());
semaphore.Signal();
});
diff --git a/deps/v8/test/unittests/logging/counters-unittest.cc b/deps/v8/test/unittests/logging/counters-unittest.cc
index dd38d80ee4..67cc7df465 100644
--- a/deps/v8/test/unittests/logging/counters-unittest.cc
+++ b/deps/v8/test/unittests/logging/counters-unittest.cc
@@ -149,16 +149,11 @@ class SnapshotNativeCounterTest : public TestWithNativeContextAndCounters {
SnapshotNativeCounterTest() {}
bool SupportsNativeCounters() const {
-#ifdef V8_USE_SNAPSHOT
#ifdef V8_SNAPSHOT_NATIVE_CODE_COUNTERS
return true;
#else
return false;
#endif // V8_SNAPSHOT_NATIVE_CODE_COUNTERS
-#else
- // If we do not have a snapshot then we rely on the runtime option.
- return internal::FLAG_native_code_counters;
-#endif // V8_USE_SNAPSHOT
}
#define SC(name, caption) \
diff --git a/deps/v8/test/unittests/objects/backing-store-unittest.cc b/deps/v8/test/unittests/objects/backing-store-unittest.cc
new file mode 100644
index 0000000000..d00f5632fe
--- /dev/null
+++ b/deps/v8/test/unittests/objects/backing-store-unittest.cc
@@ -0,0 +1,128 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/backing-store.h"
+#include "src/base/platform/platform.h"
+#include "test/unittests/test-utils.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+
+class BackingStoreTest : public TestWithIsolate {};
+
+TEST_F(BackingStoreTest, GrowWasmMemoryInPlace) {
+ auto backing_store =
+ BackingStore::AllocateWasmMemory(isolate(), 1, 2, SharedFlag::kNotShared);
+ CHECK(backing_store);
+ EXPECT_TRUE(backing_store->is_wasm_memory());
+ EXPECT_EQ(1 * wasm::kWasmPageSize, backing_store->byte_length());
+ EXPECT_EQ(2 * wasm::kWasmPageSize, backing_store->byte_capacity());
+
+ bool success = backing_store->GrowWasmMemoryInPlace(isolate(), 1, 2);
+ EXPECT_TRUE(success);
+ EXPECT_EQ(2 * wasm::kWasmPageSize, backing_store->byte_length());
+}
+
+TEST_F(BackingStoreTest, GrowWasmMemoryInPlace_neg) {
+ auto backing_store =
+ BackingStore::AllocateWasmMemory(isolate(), 1, 2, SharedFlag::kNotShared);
+ CHECK(backing_store);
+ EXPECT_TRUE(backing_store->is_wasm_memory());
+ EXPECT_EQ(1 * wasm::kWasmPageSize, backing_store->byte_length());
+ EXPECT_EQ(2 * wasm::kWasmPageSize, backing_store->byte_capacity());
+
+ bool success = backing_store->GrowWasmMemoryInPlace(isolate(), 2, 2);
+ EXPECT_FALSE(success);
+ EXPECT_EQ(1 * wasm::kWasmPageSize, backing_store->byte_length());
+}
+
+TEST_F(BackingStoreTest, GrowSharedWasmMemoryInPlace) {
+ auto backing_store =
+ BackingStore::AllocateWasmMemory(isolate(), 2, 3, SharedFlag::kShared);
+ CHECK(backing_store);
+ EXPECT_TRUE(backing_store->is_wasm_memory());
+ EXPECT_EQ(2 * wasm::kWasmPageSize, backing_store->byte_length());
+ EXPECT_EQ(3 * wasm::kWasmPageSize, backing_store->byte_capacity());
+
+ bool success = backing_store->GrowWasmMemoryInPlace(isolate(), 1, 3);
+ EXPECT_TRUE(success);
+ EXPECT_EQ(3 * wasm::kWasmPageSize, backing_store->byte_length());
+}
+
+TEST_F(BackingStoreTest, CopyWasmMemory) {
+ auto bs1 =
+ BackingStore::AllocateWasmMemory(isolate(), 1, 2, SharedFlag::kNotShared);
+ CHECK(bs1);
+ EXPECT_TRUE(bs1->is_wasm_memory());
+ EXPECT_EQ(1 * wasm::kWasmPageSize, bs1->byte_length());
+ EXPECT_EQ(2 * wasm::kWasmPageSize, bs1->byte_capacity());
+
+ auto bs2 = bs1->CopyWasmMemory(isolate(), 3);
+ EXPECT_TRUE(bs2->is_wasm_memory());
+ EXPECT_EQ(3 * wasm::kWasmPageSize, bs2->byte_length());
+ EXPECT_EQ(3 * wasm::kWasmPageSize, bs2->byte_capacity());
+}
+
+class GrowerThread : public base::Thread {
+ public:
+ GrowerThread(Isolate* isolate, uint32_t increment, uint32_t max,
+ std::shared_ptr<BackingStore> backing_store)
+ : base::Thread(base::Thread::Options("GrowerThread")),
+ isolate_(isolate),
+ increment_(increment),
+ max_(max),
+ backing_store_(backing_store) {}
+
+ void Run() override {
+ size_t max_length = max_ * wasm::kWasmPageSize;
+ while (true) {
+ size_t current_length = backing_store_->byte_length();
+ if (current_length >= max_length) break;
+ bool result =
+ backing_store_->GrowWasmMemoryInPlace(isolate_, increment_, max_);
+ size_t new_length = backing_store_->byte_length();
+ if (result) {
+ CHECK_GE(new_length, current_length + increment_);
+ } else {
+ CHECK_EQ(max_length, new_length);
+ }
+ }
+ }
+
+ private:
+ Isolate* isolate_;
+ uint32_t increment_;
+ uint32_t max_;
+ std::shared_ptr<BackingStore> backing_store_;
+};
+
+TEST_F(BackingStoreTest, RacyGrowWasmMemoryInPlace) {
+ constexpr int kNumThreads = 10;
+ constexpr int kMaxPages = 1024;
+ GrowerThread* threads[kNumThreads];
+
+ std::shared_ptr<BackingStore> backing_store =
+ BackingStore::AllocateWasmMemory(isolate(), 0, kMaxPages,
+ SharedFlag::kShared);
+
+ for (int i = 0; i < kNumThreads; i++) {
+ threads[i] = new GrowerThread(isolate(), 1, kMaxPages, backing_store);
+ CHECK(threads[i]->Start());
+ }
+
+ for (int i = 0; i < kNumThreads; i++) {
+ threads[i]->Join();
+ }
+
+ EXPECT_EQ(kMaxPages * wasm::kWasmPageSize, backing_store->byte_length());
+
+ for (int i = 0; i < kNumThreads; i++) {
+ delete threads[i];
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/objects/object-unittest.cc b/deps/v8/test/unittests/objects/object-unittest.cc
index 67dfc0f9db..b1768e0923 100644
--- a/deps/v8/test/unittests/objects/object-unittest.cc
+++ b/deps/v8/test/unittests/objects/object-unittest.cc
@@ -72,25 +72,34 @@ TEST(Object, InstanceTypeListOrder) {
<< " vs. current = " << current_type; \
last = current;
- INSTANCE_TYPE_LIST(TEST_INSTANCE_TYPE)
+ // Only test hand-written portion of instance type list. The generated portion
+ // doesn't run the same risk of getting out of order, and it does emit type
+ // names out of numerical order in one case: JS_OBJECT_TYPE is emitted before
+ // its subclass types, because types are emitted in depth-first pre-order
+ // traversal order, and some of its subclass types are numerically earlier.
+ INSTANCE_TYPE_LIST_BASE(TEST_INSTANCE_TYPE)
#undef TEST_INSTANCE_TYPE
}
TEST(Object, StructListOrder) {
- int current = static_cast<int>(InstanceType::ACCESS_CHECK_INFO_TYPE);
+ int current = static_cast<int>(InstanceType::FIRST_STRUCT_TYPE);
int last = current - 1;
ASSERT_LT(0, last);
InstanceType current_type = static_cast<InstanceType>(current);
#define TEST_STRUCT(TYPE, class, name) \
current_type = InstanceType::TYPE; \
current = static_cast<int>(current_type); \
- EXPECT_EQ(last + 1, current) \
+ EXPECT_LE(last + 1, current) \
<< " STRUCT_LIST is not ordered: " \
<< " last = " << static_cast<InstanceType>(last) \
<< " vs. current = " << current_type; \
last = current;
- STRUCT_LIST(TEST_STRUCT)
+ // Only test the _BASE portion (the hand-coded part). Note that the values are
+ // not necessarily consecutive because some Structs that need special
+ // handling, such as those that have multiple Map instances associated, are
+ // omitted from this list.
+ STRUCT_LIST_GENERATOR_BASE(STRUCT_LIST_ADAPTER, TEST_STRUCT)
#undef TEST_STRUCT
}
diff --git a/deps/v8/test/unittests/objects/osr-optimized-code-cache-unittest.cc b/deps/v8/test/unittests/objects/osr-optimized-code-cache-unittest.cc
new file mode 100644
index 0000000000..225048de63
--- /dev/null
+++ b/deps/v8/test/unittests/objects/osr-optimized-code-cache-unittest.cc
@@ -0,0 +1,412 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <cmath>
+#include <iostream>
+#include <limits>
+
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/objects.h"
+#include "src/objects/osr-optimized-code-cache.h"
+#include "test/unittests/test-utils.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+const char* code_template_string =
+ "function f%d() { return 0; };"
+ "%%PrepareFunctionForOptimization(f%d);"
+ "f%d(); f%d();"
+ "%%OptimizeFunctionOnNextCall(f%d);"
+ "f%d(); f%d;";
+
+void GetSource(i::ScopedVector<char>* source, int index) {
+ i::SNPrintF(*source, code_template_string, index, index, index, index, index,
+ index, index);
+}
+
+const int kInitialLength = OSROptimizedCodeCache::kInitialLength;
+const int kInitialEntries =
+ kInitialLength / OSROptimizedCodeCache::kEntryLength;
+const int kMaxLength = OSROptimizedCodeCache::kMaxLength;
+const int kMaxEntries = kMaxLength / OSROptimizedCodeCache::kEntryLength;
+
+} // namespace
+
+TEST_F(TestWithNativeContext, AddCodeToEmptyCache) {
+ if (!i::FLAG_opt) return;
+
+ i::FLAG_allow_natives_syntax = true;
+
+ i::ScopedVector<char> source(1024);
+ GetSource(&source, 0);
+ Handle<JSFunction> function = RunJS<JSFunction>(source.begin());
+ Isolate* isolate = function->GetIsolate();
+ Handle<NativeContext> native_context(function->native_context(), isolate);
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate);
+ Handle<Code> code(function->code(), isolate);
+ BailoutId bailout_id(1);
+ OSROptimizedCodeCache::AddOptimizedCode(native_context, shared, code,
+ bailout_id);
+
+ Handle<OSROptimizedCodeCache> osr_cache(
+ native_context->GetOSROptimizedCodeCache(), isolate);
+ EXPECT_EQ(osr_cache->length(), kInitialLength);
+
+ HeapObject sfi_entry;
+ osr_cache->Get(OSROptimizedCodeCache::kSharedOffset)
+ ->GetHeapObject(&sfi_entry);
+ EXPECT_EQ(sfi_entry, *shared);
+ HeapObject code_entry;
+ osr_cache->Get(OSROptimizedCodeCache::kCachedCodeOffset)
+ ->GetHeapObject(&code_entry);
+ EXPECT_EQ(code_entry, *code);
+ Smi osr_offset_entry;
+ osr_cache->Get(OSROptimizedCodeCache::kOsrIdOffset)->ToSmi(&osr_offset_entry);
+ EXPECT_EQ(osr_offset_entry.value(), bailout_id.ToInt());
+}
+
+TEST_F(TestWithNativeContext, GrowCodeCache) {
+ if (!i::FLAG_opt) return;
+
+ i::FLAG_allow_natives_syntax = true;
+
+ i::ScopedVector<char> source(1024);
+ GetSource(&source, 0);
+ Handle<JSFunction> function = RunJS<JSFunction>(source.begin());
+ Isolate* isolate = function->GetIsolate();
+ Handle<NativeContext> native_context(function->native_context(), isolate);
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate);
+ Handle<Code> code(function->code(), isolate);
+
+ int bailout_id = 0;
+ for (bailout_id = 0; bailout_id < kInitialEntries; bailout_id++) {
+ OSROptimizedCodeCache::AddOptimizedCode(native_context, shared, code,
+ BailoutId(bailout_id));
+ }
+ Handle<OSROptimizedCodeCache> osr_cache(
+ native_context->GetOSROptimizedCodeCache(), isolate);
+ EXPECT_EQ(osr_cache->length(), kInitialLength);
+
+ OSROptimizedCodeCache::AddOptimizedCode(native_context, shared, code,
+ BailoutId(bailout_id));
+ osr_cache = Handle<OSROptimizedCodeCache>(
+ native_context->GetOSROptimizedCodeCache(), isolate);
+ EXPECT_EQ(osr_cache->length(), kInitialLength * 2);
+
+ int index = kInitialLength;
+ HeapObject sfi_entry;
+ osr_cache->Get(index + OSROptimizedCodeCache::kSharedOffset)
+ ->GetHeapObject(&sfi_entry);
+ EXPECT_EQ(sfi_entry, *shared);
+ HeapObject code_entry;
+ osr_cache->Get(index + OSROptimizedCodeCache::kCachedCodeOffset)
+ ->GetHeapObject(&code_entry);
+ EXPECT_EQ(code_entry, *code);
+ Smi osr_offset_entry;
+ osr_cache->Get(index + OSROptimizedCodeCache::kOsrIdOffset)
+ ->ToSmi(&osr_offset_entry);
+ EXPECT_EQ(osr_offset_entry.value(), bailout_id);
+}
+
+TEST_F(TestWithNativeContext, FindCachedEntry) {
+ if (!i::FLAG_opt) return;
+
+ i::FLAG_allow_natives_syntax = true;
+
+ i::ScopedVector<char> source(1024);
+ GetSource(&source, 0);
+ Handle<JSFunction> function = RunJS<JSFunction>(source.begin());
+ Isolate* isolate = function->GetIsolate();
+ Handle<NativeContext> native_context(function->native_context(), isolate);
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate);
+ Handle<Code> code(function->code(), isolate);
+
+ int bailout_id = 0;
+ for (bailout_id = 0; bailout_id < kInitialEntries; bailout_id++) {
+ OSROptimizedCodeCache::AddOptimizedCode(native_context, shared, code,
+ BailoutId(bailout_id));
+ }
+
+ i::ScopedVector<char> source1(1024);
+ GetSource(&source1, 1);
+ Handle<JSFunction> function1 = RunJS<JSFunction>(source1.begin());
+ Handle<SharedFunctionInfo> shared1(function1->shared(), isolate);
+ Handle<Code> code1(function1->code(), isolate);
+ OSROptimizedCodeCache::AddOptimizedCode(native_context, shared1, code1,
+ BailoutId(bailout_id));
+
+ Handle<OSROptimizedCodeCache> osr_cache(
+ native_context->GetOSROptimizedCodeCache(), isolate);
+ EXPECT_EQ(osr_cache->GetOptimizedCode(shared, BailoutId(0), isolate), *code);
+ EXPECT_EQ(
+ osr_cache->GetOptimizedCode(shared1, BailoutId(bailout_id), isolate),
+ *code1);
+
+ RunJS("%DeoptimizeFunction(f1)");
+ EXPECT_TRUE(
+ osr_cache->GetOptimizedCode(shared1, BailoutId(bailout_id), isolate)
+ .is_null());
+
+ osr_cache->Set(OSROptimizedCodeCache::kCachedCodeOffset,
+ HeapObjectReference::ClearedValue(isolate));
+ EXPECT_TRUE(
+ osr_cache->GetOptimizedCode(shared, BailoutId(0), isolate).is_null());
+}
+
+TEST_F(TestWithNativeContext, MaxCapacityCache) {
+ if (!i::FLAG_opt) return;
+
+ i::FLAG_allow_natives_syntax = true;
+
+ i::ScopedVector<char> source(1024);
+ GetSource(&source, 0);
+ Handle<JSFunction> function = RunJS<JSFunction>(source.begin());
+ Isolate* isolate = function->GetIsolate();
+ Handle<NativeContext> native_context(function->native_context(), isolate);
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate);
+ Handle<Code> code(function->code(), isolate);
+
+ int bailout_id = 0;
+ // Add max_capacity - 1 entries.
+ for (bailout_id = 0; bailout_id < kMaxEntries - 1; bailout_id++) {
+ OSROptimizedCodeCache::AddOptimizedCode(native_context, shared, code,
+ BailoutId(bailout_id));
+ }
+ Handle<OSROptimizedCodeCache> osr_cache(
+ native_context->GetOSROptimizedCodeCache(), isolate);
+ EXPECT_EQ(osr_cache->length(), kMaxLength);
+
+ // Add an entry to reach max capacity.
+ i::ScopedVector<char> source1(1024);
+ GetSource(&source1, 1);
+ Handle<JSFunction> function1 = RunJS<JSFunction>(source1.begin());
+ Handle<SharedFunctionInfo> shared1(function1->shared(), isolate);
+ Handle<Code> code1(function1->code(), isolate);
+ OSROptimizedCodeCache::AddOptimizedCode(native_context, shared1, code1,
+ BailoutId(bailout_id));
+ osr_cache = Handle<OSROptimizedCodeCache>(
+ native_context->GetOSROptimizedCodeCache(), isolate);
+ EXPECT_EQ(osr_cache->length(), kMaxLength);
+
+ int index = (kMaxEntries - 1) * OSROptimizedCodeCache::kEntryLength;
+ HeapObject object;
+ Smi smi;
+ osr_cache->Get(index + OSROptimizedCodeCache::kSharedOffset)
+ ->GetHeapObject(&object);
+ EXPECT_EQ(object, *shared1);
+ osr_cache->Get(index + OSROptimizedCodeCache::kCachedCodeOffset)
+ ->GetHeapObject(&object);
+ EXPECT_EQ(object, *code1);
+ osr_cache->Get(index + OSROptimizedCodeCache::kOsrIdOffset)->ToSmi(&smi);
+ EXPECT_EQ(smi.value(), bailout_id);
+
+ // Add an entry beyond max capacity.
+ i::ScopedVector<char> source2(1024);
+ GetSource(&source2, 2);
+ Handle<JSFunction> function2 = RunJS<JSFunction>(source2.begin());
+ Handle<SharedFunctionInfo> shared2(function2->shared(), isolate);
+ Handle<Code> code2(function2->code(), isolate);
+ bailout_id++;
+ OSROptimizedCodeCache::AddOptimizedCode(native_context, shared2, code2,
+ BailoutId(bailout_id));
+ osr_cache = Handle<OSROptimizedCodeCache>(
+ native_context->GetOSROptimizedCodeCache(), isolate);
+ EXPECT_EQ(osr_cache->length(), kMaxLength);
+
+ index = 0;
+ osr_cache->Get(index + OSROptimizedCodeCache::kSharedOffset)
+ ->GetHeapObject(&object);
+ EXPECT_EQ(object, *shared2);
+ osr_cache->Get(index + OSROptimizedCodeCache::kCachedCodeOffset)
+ ->GetHeapObject(&object);
+ EXPECT_EQ(object, *code2);
+ osr_cache->Get(index + OSROptimizedCodeCache::kOsrIdOffset)->ToSmi(&smi);
+ EXPECT_EQ(smi.value(), bailout_id);
+}
+
+TEST_F(TestWithNativeContext, ReuseClearedEntry) {
+ if (!i::FLAG_opt) return;
+
+ i::FLAG_allow_natives_syntax = true;
+
+ i::ScopedVector<char> source(1024);
+ GetSource(&source, 0);
+ Handle<JSFunction> function = RunJS<JSFunction>(source.begin());
+ Isolate* isolate = function->GetIsolate();
+ Handle<NativeContext> native_context(function->native_context(), isolate);
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate);
+ Handle<Code> code(function->code(), isolate);
+
+ int num_entries = kInitialEntries * 2;
+ int expected_length = kInitialLength * 2;
+ int bailout_id = 0;
+ for (bailout_id = 0; bailout_id < num_entries; bailout_id++) {
+ OSROptimizedCodeCache::AddOptimizedCode(native_context, shared, code,
+ BailoutId(bailout_id));
+ }
+ Handle<OSROptimizedCodeCache> osr_cache(
+ native_context->GetOSROptimizedCodeCache(), isolate);
+ EXPECT_EQ(osr_cache->length(), expected_length);
+
+ int clear_index1 = 0;
+ int clear_index2 = (num_entries - 1) * OSROptimizedCodeCache::kEntryLength;
+ osr_cache->Set(clear_index1 + OSROptimizedCodeCache::kSharedOffset,
+ HeapObjectReference::ClearedValue(isolate));
+ osr_cache->Set(clear_index2 + OSROptimizedCodeCache::kCachedCodeOffset,
+ HeapObjectReference::ClearedValue(isolate));
+
+ i::ScopedVector<char> source1(1024);
+ GetSource(&source1, 1);
+ Handle<JSFunction> function1 = RunJS<JSFunction>(source1.begin());
+ Handle<SharedFunctionInfo> shared1(function1->shared(), isolate);
+ Handle<Code> code1(function1->code(), isolate);
+ OSROptimizedCodeCache::AddOptimizedCode(native_context, shared1, code1,
+ BailoutId(bailout_id));
+ osr_cache = Handle<OSROptimizedCodeCache>(
+ native_context->GetOSROptimizedCodeCache(), isolate);
+ EXPECT_EQ(osr_cache->length(), expected_length);
+
+ int index = clear_index1;
+ HeapObject object;
+ Smi smi;
+ osr_cache->Get(index + OSROptimizedCodeCache::kSharedOffset)
+ ->GetHeapObject(&object);
+ EXPECT_EQ(object, *shared1);
+ osr_cache->Get(index + OSROptimizedCodeCache::kCachedCodeOffset)
+ ->GetHeapObject(&object);
+ EXPECT_EQ(object, *code1);
+ osr_cache->Get(index + OSROptimizedCodeCache::kOsrIdOffset)->ToSmi(&smi);
+ EXPECT_EQ(smi.value(), bailout_id);
+
+ i::ScopedVector<char> source2(1024);
+ GetSource(&source2, 2);
+ Handle<JSFunction> function2 = RunJS<JSFunction>(source2.begin());
+ Handle<SharedFunctionInfo> shared2(function2->shared(), isolate);
+ Handle<Code> code2(function2->code(), isolate);
+ bailout_id++;
+ OSROptimizedCodeCache::AddOptimizedCode(native_context, shared2, code2,
+ BailoutId(bailout_id));
+ osr_cache = Handle<OSROptimizedCodeCache>(
+ native_context->GetOSROptimizedCodeCache(), isolate);
+ EXPECT_EQ(osr_cache->length(), expected_length);
+
+ index = clear_index2;
+ osr_cache->Get(index + OSROptimizedCodeCache::kSharedOffset)
+ ->GetHeapObject(&object);
+ EXPECT_EQ(object, *shared2);
+ osr_cache->Get(index + OSROptimizedCodeCache::kCachedCodeOffset)
+ ->GetHeapObject(&object);
+ EXPECT_EQ(object, *code2);
+ osr_cache->Get(index + OSROptimizedCodeCache::kOsrIdOffset)->ToSmi(&smi);
+ EXPECT_EQ(smi.value(), bailout_id);
+}
+
+TEST_F(TestWithNativeContext, EvictDeoptedEntriesNoCompact) {
+ if (!i::FLAG_opt) return;
+
+ i::FLAG_allow_natives_syntax = true;
+
+ i::ScopedVector<char> source(1024);
+ GetSource(&source, 0);
+ Handle<JSFunction> function = RunJS<JSFunction>(source.begin());
+ Isolate* isolate = function->GetIsolate();
+ Handle<NativeContext> native_context(function->native_context(), isolate);
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate);
+ Handle<Code> code(function->code(), isolate);
+
+ i::ScopedVector<char> source1(1024);
+ GetSource(&source1, 1);
+ Handle<JSFunction> deopt_function = RunJS<JSFunction>(source1.begin());
+ Handle<SharedFunctionInfo> deopt_shared(deopt_function->shared(), isolate);
+ Handle<Code> deopt_code(deopt_function->code(), isolate);
+
+ int num_entries = kInitialEntries * 2;
+ int expected_length = kInitialLength * 2;
+ int deopt_id1 = num_entries - 2;
+ int deopt_id2 = 0;
+ int bailout_id = 0;
+ for (bailout_id = 0; bailout_id < num_entries; bailout_id++) {
+ if (bailout_id == deopt_id1 || bailout_id == deopt_id2) {
+ OSROptimizedCodeCache::AddOptimizedCode(
+ native_context, deopt_shared, deopt_code, BailoutId(bailout_id));
+ } else {
+ OSROptimizedCodeCache::AddOptimizedCode(native_context, shared, code,
+ BailoutId(bailout_id));
+ }
+ }
+ Handle<OSROptimizedCodeCache> osr_cache(
+ native_context->GetOSROptimizedCodeCache(), isolate);
+ EXPECT_EQ(osr_cache->length(), expected_length);
+
+ RunJS("%DeoptimizeFunction(f1)");
+ osr_cache = Handle<OSROptimizedCodeCache>(
+ native_context->GetOSROptimizedCodeCache(), isolate);
+ EXPECT_EQ(osr_cache->length(), expected_length);
+
+ int index = (num_entries - 2) * OSROptimizedCodeCache::kEntryLength;
+ EXPECT_TRUE(osr_cache->Get(index + OSROptimizedCodeCache::kSharedOffset)
+ ->IsCleared());
+ EXPECT_TRUE(osr_cache->Get(index + OSROptimizedCodeCache::kCachedCodeOffset)
+ ->IsCleared());
+ EXPECT_TRUE(
+ osr_cache->Get(index + OSROptimizedCodeCache::kOsrIdOffset)->IsCleared());
+
+ index = (num_entries - 1) * OSROptimizedCodeCache::kEntryLength;
+ EXPECT_TRUE(osr_cache->Get(index + OSROptimizedCodeCache::kSharedOffset)
+ ->IsCleared());
+ EXPECT_TRUE(osr_cache->Get(index + OSROptimizedCodeCache::kCachedCodeOffset)
+ ->IsCleared());
+ EXPECT_TRUE(
+ osr_cache->Get(index + OSROptimizedCodeCache::kOsrIdOffset)->IsCleared());
+}
+
+TEST_F(TestWithNativeContext, EvictDeoptedEntriesCompact) {
+ if (!i::FLAG_opt) return;
+
+ i::FLAG_allow_natives_syntax = true;
+
+ i::ScopedVector<char> source(1024);
+ GetSource(&source, 0);
+ Handle<JSFunction> function = RunJS<JSFunction>(source.begin());
+ Isolate* isolate = function->GetIsolate();
+ Handle<NativeContext> native_context(function->native_context(), isolate);
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate);
+ Handle<Code> code(function->code(), isolate);
+
+ i::ScopedVector<char> source1(1024);
+ GetSource(&source1, 1);
+ Handle<JSFunction> deopt_function = RunJS<JSFunction>(source1.begin());
+ Handle<SharedFunctionInfo> deopt_shared(deopt_function->shared(), isolate);
+ Handle<Code> deopt_code(deopt_function->code(), isolate);
+
+ int num_entries = kInitialEntries + 1;
+ int expected_length = kInitialLength * 2;
+ int bailout_id = 0;
+ for (bailout_id = 0; bailout_id < num_entries; bailout_id++) {
+ if (bailout_id % 2 == 0) {
+ OSROptimizedCodeCache::AddOptimizedCode(
+ native_context, deopt_shared, deopt_code, BailoutId(bailout_id));
+ } else {
+ OSROptimizedCodeCache::AddOptimizedCode(native_context, shared, code,
+ BailoutId(bailout_id));
+ }
+ }
+ Handle<OSROptimizedCodeCache> osr_cache(
+ native_context->GetOSROptimizedCodeCache(), isolate);
+ EXPECT_EQ(osr_cache->length(), expected_length);
+
+ RunJS("%DeoptimizeFunction(f1)");
+ osr_cache = Handle<OSROptimizedCodeCache>(
+ native_context->GetOSROptimizedCodeCache(), isolate);
+ EXPECT_EQ(osr_cache->length(), kInitialLength);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/objects/value-serializer-unittest.cc b/deps/v8/test/unittests/objects/value-serializer-unittest.cc
index a3a6fb22a7..d5583d5a69 100644
--- a/deps/v8/test/unittests/objects/value-serializer-unittest.cc
+++ b/deps/v8/test/unittests/objects/value-serializer-unittest.cc
@@ -10,6 +10,7 @@
#include "include/v8.h"
#include "src/api/api-inl.h"
#include "src/base/build_config.h"
+#include "src/objects/backing-store.h"
#include "src/objects/objects-inl.h"
#include "src/wasm/wasm-objects.h"
#include "test/unittests/test-utils.h"
@@ -1729,7 +1730,7 @@ class ValueSerializerTestWithArrayBufferTransfer : public ValueSerializerTest {
Context::Scope scope(deserialization_context());
output_buffer_ = ArrayBuffer::New(isolate(), kTestByteLength);
const uint8_t data[kTestByteLength] = {0x00, 0x01, 0x80, 0xFF};
- memcpy(output_buffer_->GetContents().Data(), data, kTestByteLength);
+ memcpy(output_buffer_->GetBackingStore()->Data(), data, kTestByteLength);
}
}
@@ -1987,23 +1988,44 @@ class ValueSerializerTestWithSharedArrayBufferClone
ValueSerializerTestWithSharedArrayBufferClone()
: serializer_delegate_(this), deserializer_delegate_(this) {}
- void InitializeData(const std::vector<uint8_t>& data) {
+ void InitializeData(const std::vector<uint8_t>& data, bool is_wasm_memory) {
data_ = data;
{
Context::Scope scope(serialization_context());
input_buffer_ =
- SharedArrayBuffer::New(isolate(), data_.data(), data_.size());
+ NewSharedArrayBuffer(data_.data(), data_.size(), is_wasm_memory);
}
{
Context::Scope scope(deserialization_context());
output_buffer_ =
- SharedArrayBuffer::New(isolate(), data_.data(), data_.size());
+ NewSharedArrayBuffer(data_.data(), data_.size(), is_wasm_memory);
}
}
const Local<SharedArrayBuffer>& input_buffer() { return input_buffer_; }
const Local<SharedArrayBuffer>& output_buffer() { return output_buffer_; }
+ Local<SharedArrayBuffer> NewSharedArrayBuffer(void* data, size_t byte_length,
+ bool is_wasm_memory) {
+ if (is_wasm_memory) {
+ // TODO(titzer): there is no way to create Wasm memory backing stores
+ // through the API, or to create a shared array buffer whose backing
+ // store is wasm memory, so use the internal API.
+ DCHECK_EQ(0, byte_length % i::wasm::kWasmPageSize);
+ auto pages = byte_length / i::wasm::kWasmPageSize;
+ auto i_isolate = reinterpret_cast<i::Isolate*>(isolate());
+ auto backing_store = i::BackingStore::AllocateWasmMemory(
+ i_isolate, pages, pages, i::SharedFlag::kShared);
+ memcpy(backing_store->buffer_start(), data, byte_length);
+ i::Handle<i::JSArrayBuffer> buffer =
+ i_isolate->factory()->NewJSSharedArrayBuffer(
+ std::move(backing_store));
+ return Utils::ToLocalShared(buffer);
+ } else {
+ return SharedArrayBuffer::New(isolate(), data, byte_length);
+ }
+ }
+
static void SetUpTestCase() {
flag_was_enabled_ = i::FLAG_harmony_sharedarraybuffer;
i::FLAG_harmony_sharedarraybuffer = true;
@@ -2075,7 +2097,7 @@ bool ValueSerializerTestWithSharedArrayBufferClone::flag_was_enabled_ = false;
TEST_F(ValueSerializerTestWithSharedArrayBufferClone,
RoundTripSharedArrayBufferClone) {
- InitializeData({0x00, 0x01, 0x80, 0xFF});
+ InitializeData({0x00, 0x01, 0x80, 0xFF}, false);
EXPECT_CALL(serializer_delegate_,
GetSharedArrayBufferId(isolate(), input_buffer()))
@@ -2114,7 +2136,7 @@ TEST_F(ValueSerializerTestWithSharedArrayBufferClone,
std::vector<uint8_t> data = {0x00, 0x01, 0x80, 0xFF};
data.resize(65536);
- InitializeData(data);
+ InitializeData(data, true);
EXPECT_CALL(serializer_delegate_,
GetSharedArrayBufferId(isolate(), input_buffer()))
@@ -2487,35 +2509,32 @@ class ValueSerializerTestWithWasm : public ValueSerializerTest {
class SerializeToTransfer : public ValueSerializer::Delegate {
public:
- SerializeToTransfer(
- std::vector<WasmModuleObject::TransferrableModule>* modules)
+ explicit SerializeToTransfer(std::vector<CompiledWasmModule>* modules)
: modules_(modules) {}
Maybe<uint32_t> GetWasmModuleTransferId(
Isolate* isolate, Local<WasmModuleObject> module) override {
- modules_->push_back(module->GetTransferrableModule());
+ modules_->push_back(module->GetCompiledModule());
return Just(static_cast<uint32_t>(modules_->size()) - 1);
}
void ThrowDataCloneError(Local<String> message) override { UNREACHABLE(); }
private:
- std::vector<WasmModuleObject::TransferrableModule>* modules_;
+ std::vector<CompiledWasmModule>* modules_;
};
class DeserializeFromTransfer : public ValueDeserializer::Delegate {
public:
- DeserializeFromTransfer(
- std::vector<WasmModuleObject::TransferrableModule>* modules)
+ explicit DeserializeFromTransfer(std::vector<CompiledWasmModule>* modules)
: modules_(modules) {}
MaybeLocal<WasmModuleObject> GetWasmModuleFromId(Isolate* isolate,
uint32_t id) override {
- return WasmModuleObject::FromTransferrableModule(isolate,
- modules_->at(id));
+ return WasmModuleObject::FromCompiledModule(isolate, modules_->at(id));
}
private:
- std::vector<WasmModuleObject::TransferrableModule>* modules_;
+ std::vector<CompiledWasmModule>* modules_;
};
ValueSerializer::Delegate* GetSerializerDelegate() override {
@@ -2595,7 +2614,7 @@ class ValueSerializerTestWithWasm : public ValueSerializerTest {
private:
static bool g_saved_flag;
- std::vector<WasmModuleObject::TransferrableModule> transfer_modules_;
+ std::vector<CompiledWasmModule> transfer_modules_;
SerializeToTransfer serialize_delegate_;
DeserializeFromTransfer deserialize_delegate_;
ValueSerializer::Delegate* current_serializer_delegate_ = nullptr;
diff --git a/deps/v8/test/unittests/tasks/background-compile-task-unittest.cc b/deps/v8/test/unittests/tasks/background-compile-task-unittest.cc
index 8c3fb017a4..a9a0fac66b 100644
--- a/deps/v8/test/unittests/tasks/background-compile-task-unittest.cc
+++ b/deps/v8/test/unittests/tasks/background-compile-task-unittest.cc
@@ -9,7 +9,6 @@
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
#include "src/base/platform/semaphore.h"
-#include "src/base/template-utils.h"
#include "src/codegen/compiler.h"
#include "src/execution/isolate-inl.h"
#include "src/flags/flags.h"
@@ -198,7 +197,7 @@ TEST_F(BackgroundCompileTaskTest, CompileOnBackgroundThread) {
NewBackgroundCompileTask(isolate(), shared));
base::Semaphore semaphore(0);
- auto background_task = base::make_unique<CompileTask>(task.get(), &semaphore);
+ auto background_task = std::make_unique<CompileTask>(task.get(), &semaphore);
V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(background_task));
semaphore.Wait();
diff --git a/deps/v8/test/unittests/tasks/cancelable-tasks-unittest.cc b/deps/v8/test/unittests/tasks/cancelable-tasks-unittest.cc
index 2a0e7d7f90..63ddaae758 100644
--- a/deps/v8/test/unittests/tasks/cancelable-tasks-unittest.cc
+++ b/deps/v8/test/unittests/tasks/cancelable-tasks-unittest.cc
@@ -74,7 +74,7 @@ class CancelableTaskManagerTest : public ::testing::Test {
std::unique_ptr<TestTask> NewTask(
ResultType* result, TestTask::Mode mode = TestTask::kDoNothing) {
- return base::make_unique<TestTask>(this, result, mode);
+ return std::make_unique<TestTask>(this, result, mode);
}
void CancelAndWait() {
diff --git a/deps/v8/test/unittests/test-helpers.cc b/deps/v8/test/unittests/test-helpers.cc
index 614ddba4f5..1011c39fc5 100644
--- a/deps/v8/test/unittests/test-helpers.cc
+++ b/deps/v8/test/unittests/test-helpers.cc
@@ -6,7 +6,6 @@
#include "include/v8.h"
#include "src/api/api.h"
-#include "src/base/template-utils.h"
#include "src/execution/isolate.h"
#include "src/handles/handles.h"
#include "src/objects/objects-inl.h"
@@ -59,7 +58,7 @@ std::unique_ptr<ParseInfo> OuterParseInfoForShared(
Handle<Script> script =
Handle<Script>::cast(handle(shared->script(), isolate));
std::unique_ptr<ParseInfo> result =
- base::make_unique<ParseInfo>(isolate, script);
+ std::make_unique<ParseInfo>(isolate, script);
// Create a character stream to simulate the parser having done so for the
// to-level ParseProgram.
diff --git a/deps/v8/test/unittests/unittests.status b/deps/v8/test/unittests/unittests.status
index def90fc3b5..08022d5689 100644
--- a/deps/v8/test/unittests/unittests.status
+++ b/deps/v8/test/unittests/unittests.status
@@ -14,19 +14,13 @@
'RandomNumberGenerator.NextSampleInvalidParam': [SKIP],
'RandomNumberGenerator.NextSampleSlowInvalidParam1': [SKIP],
'RandomNumberGenerator.NextSampleSlowInvalidParam2': [SKIP],
-}], # 'system == macos and asan'
-
-['(arch == arm or arch == mips) and not simulator_run', {
- # Uses too much memory.
- 'Parameterized/WasmCodeManagerTest.GrowingVsFixedModule/Fixed': [SKIP]
-}], # '(arch == arm or arch == mips) and not simulator_run'
+}], # system == macos and asan
##############################################################################
['lite_mode or variant == jitless', {
# TODO(v8:7777): Re-enable once wasm is supported in jitless mode.
'ValueSerializerTestWithSharedArrayBufferClone.RoundTripWebAssemblyMemory': [SKIP],
'ValueSerializerTestWithWasm.*': [SKIP],
- 'Parameterized/WasmCodeManagerTest.*': [SKIP],
}], # lite_mode or variant == jitless
##############################################################################
@@ -37,19 +31,18 @@
['system == windows and asan', {
# BUG(893437).
'Torque*': [SKIP],
-}], # 'system == windows and asan'
+}], # system == windows and asan
['system == windows and arch == x64 and mode == release', {
# BUG(992783).
'Torque.ConditionalFields': [SKIP],
'Torque.UsingUnderscorePrefixedIdentifierError': [SKIP],
-}], # 'system == windows and arch == x64 and mode == release'
+}], # system == windows and arch == x64 and mode == release
-##############################################################################
['tsan == True', {
# https://crbug.com/v8/9380
# The test is broken and needs to be fixed to use separate isolates.
'BackingStoreTest.RacyGrowWasmMemoryInPlace': [SKIP],
-}], # 'tsan == True'
+}], # tsan == True
]
diff --git a/deps/v8/test/unittests/wasm/OWNERS b/deps/v8/test/unittests/wasm/OWNERS
index dc68b39733..16b08f3b3b 100644
--- a/deps/v8/test/unittests/wasm/OWNERS
+++ b/deps/v8/test/unittests/wasm/OWNERS
@@ -1,5 +1,5 @@
ahaas@chromium.org
-clemensh@chromium.org
+clemensb@chromium.org
titzer@chromium.org
# COMPONENT: Blink>JavaScript>WebAssembly
diff --git a/deps/v8/test/unittests/wasm/control-transfer-unittest.cc b/deps/v8/test/unittests/wasm/control-transfer-unittest.cc
index 29cb176197..54f22135f1 100644
--- a/deps/v8/test/unittests/wasm/control-transfer-unittest.cc
+++ b/deps/v8/test/unittests/wasm/control-transfer-unittest.cc
@@ -87,8 +87,7 @@ class ControlTransferTest : public TestWithZone {
}
void CheckNoOtherTargets(
- const byte* start, const byte* end,
- ControlTransferMap& map, // NOLINT(runtime/references)
+ const byte* start, const byte* end, const ControlTransferMap& map,
std::initializer_list<ExpectedControlTransfer> targets) {
// Check there are no other control targets.
for (pc_t pc = 0; start + pc < end; pc++) {
diff --git a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
index 791770ee94..7e55283b61 100644
--- a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
@@ -33,8 +33,8 @@ namespace function_body_decoder_unittest {
#define WASM_IF_OP kExprIf, kLocalVoid
#define WASM_LOOP_OP kExprLoop, kLocalVoid
-static const byte kCodeGetLocal0[] = {kExprGetLocal, 0};
-static const byte kCodeGetLocal1[] = {kExprGetLocal, 1};
+static const byte kCodeGetLocal0[] = {kExprLocalGet, 0};
+static const byte kCodeGetLocal1[] = {kExprLocalGet, 1};
static const byte kCodeSetLocal0[] = {WASM_SET_LOCAL(0, WASM_ZERO)};
static const byte kCodeTeeLocal0[] = {WASM_TEE_LOCAL(0, WASM_ZERO)};
@@ -408,7 +408,7 @@ TEST_F(FunctionBodyDecoderTest, GetLocalN_local) {
for (byte i = 1; i < 8; i++) {
AddLocals(kWasmI32, 1);
for (byte j = 0; j < i; j++) {
- ExpectValidates(sigs.i_v(), {kExprGetLocal, j});
+ ExpectValidates(sigs.i_v(), {kExprLocalGet, j});
}
}
}
@@ -422,7 +422,7 @@ TEST_F(FunctionBodyDecoderTest, GetLocal1_fail_no_locals) {
}
TEST_F(FunctionBodyDecoderTest, GetLocal_off_end) {
- ExpectFailure(sigs.i_i(), {kExprGetLocal});
+ ExpectFailure(sigs.i_i(), {kExprLocalGet});
}
TEST_F(FunctionBodyDecoderTest, NumLocalBelowLimit) {
@@ -444,29 +444,29 @@ TEST_F(FunctionBodyDecoderTest, GetLocal_varint) {
const int kMaxLocals = kV8MaxWasmFunctionLocals - 1;
AddLocals(kWasmI32, kMaxLocals);
- ExpectValidates(sigs.i_i(), {kExprGetLocal, U32V_1(66)});
- ExpectValidates(sigs.i_i(), {kExprGetLocal, U32V_2(7777)});
- ExpectValidates(sigs.i_i(), {kExprGetLocal, U32V_3(8888)});
- ExpectValidates(sigs.i_i(), {kExprGetLocal, U32V_4(9999)});
+ ExpectValidates(sigs.i_i(), {kExprLocalGet, U32V_1(66)});
+ ExpectValidates(sigs.i_i(), {kExprLocalGet, U32V_2(7777)});
+ ExpectValidates(sigs.i_i(), {kExprLocalGet, U32V_3(8888)});
+ ExpectValidates(sigs.i_i(), {kExprLocalGet, U32V_4(9999)});
- ExpectValidates(sigs.i_i(), {kExprGetLocal, U32V_5(kMaxLocals - 1)});
+ ExpectValidates(sigs.i_i(), {kExprLocalGet, U32V_5(kMaxLocals - 1)});
- ExpectFailure(sigs.i_i(), {kExprGetLocal, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF});
+ ExpectFailure(sigs.i_i(), {kExprLocalGet, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF});
- ExpectValidates(sigs.i_i(), {kExprGetLocal, U32V_4(kMaxLocals - 1)});
- ExpectValidates(sigs.i_i(), {kExprGetLocal, U32V_4(kMaxLocals)});
- ExpectFailure(sigs.i_i(), {kExprGetLocal, U32V_4(kMaxLocals + 1)});
+ ExpectValidates(sigs.i_i(), {kExprLocalGet, U32V_4(kMaxLocals - 1)});
+ ExpectValidates(sigs.i_i(), {kExprLocalGet, U32V_4(kMaxLocals)});
+ ExpectFailure(sigs.i_i(), {kExprLocalGet, U32V_4(kMaxLocals + 1)});
- ExpectFailure(sigs.i_v(), {kExprGetLocal, U32V_4(kMaxLocals)});
- ExpectFailure(sigs.i_v(), {kExprGetLocal, U32V_4(kMaxLocals + 1)});
+ ExpectFailure(sigs.i_v(), {kExprLocalGet, U32V_4(kMaxLocals)});
+ ExpectFailure(sigs.i_v(), {kExprLocalGet, U32V_4(kMaxLocals + 1)});
}
TEST_F(FunctionBodyDecoderTest, GetLocal_toomany) {
AddLocals(kWasmI32, kV8MaxWasmFunctionLocals - 100);
AddLocals(kWasmI32, 100);
- ExpectValidates(sigs.i_v(), {kExprGetLocal, U32V_1(66)});
- ExpectFailure(sigs.i_i(), {kExprGetLocal, U32V_1(66)});
+ ExpectValidates(sigs.i_v(), {kExprLocalGet, U32V_1(66)});
+ ExpectFailure(sigs.i_i(), {kExprLocalGet, U32V_1(66)});
}
TEST_F(FunctionBodyDecoderTest, Binops_off_end) {
@@ -476,13 +476,13 @@ TEST_F(FunctionBodyDecoderTest, Binops_off_end) {
ExpectFailure(sigs.i_i(), code1);
}
- byte code3[] = {kExprGetLocal, 0, 0}; // [expr] [opcode]
+ byte code3[] = {kExprLocalGet, 0, 0}; // [expr] [opcode]
for (size_t i = 0; i < arraysize(kInt32BinopOpcodes); i++) {
code3[2] = kInt32BinopOpcodes[i];
ExpectFailure(sigs.i_i(), code3);
}
- byte code4[] = {kExprGetLocal, 0, 0, 0}; // [expr] [opcode] [opcode]
+ byte code4[] = {kExprLocalGet, 0, 0, 0}; // [expr] [opcode] [opcode]
for (size_t i = 0; i < arraysize(kInt32BinopOpcodes); i++) {
code4[2] = kInt32BinopOpcodes[i];
code4[3] = kInt32BinopOpcodes[i];
@@ -778,7 +778,7 @@ TEST_F(FunctionBodyDecoderTest, If_else_else) {
}
TEST_F(FunctionBodyDecoderTest, IfEmpty) {
- ExpectValidates(sigs.v_i(), {kExprGetLocal, 0, WASM_IF_OP, kExprEnd});
+ ExpectValidates(sigs.v_i(), {kExprLocalGet, 0, WASM_IF_OP, kExprEnd});
}
TEST_F(FunctionBodyDecoderTest, IfSet) {
@@ -852,15 +852,15 @@ TEST_F(FunctionBodyDecoderTest, IfNop) {
}
TEST_F(FunctionBodyDecoderTest, If_end) {
- ExpectValidates(sigs.v_i(), {kExprGetLocal, 0, WASM_IF_OP, kExprEnd});
- ExpectFailure(sigs.v_i(), {kExprGetLocal, 0, WASM_IF_OP, kExprEnd, kExprEnd});
+ ExpectValidates(sigs.v_i(), {kExprLocalGet, 0, WASM_IF_OP, kExprEnd});
+ ExpectFailure(sigs.v_i(), {kExprLocalGet, 0, WASM_IF_OP, kExprEnd, kExprEnd});
}
TEST_F(FunctionBodyDecoderTest, If_falloff1) {
- ExpectFailure(sigs.v_i(), {kExprGetLocal, 0, kExprIf});
- ExpectFailure(sigs.v_i(), {kExprGetLocal, 0, WASM_IF_OP});
+ ExpectFailure(sigs.v_i(), {kExprLocalGet, 0, kExprIf});
+ ExpectFailure(sigs.v_i(), {kExprLocalGet, 0, WASM_IF_OP});
ExpectFailure(sigs.v_i(),
- {kExprGetLocal, 0, WASM_IF_OP, kExprNop, kExprElse});
+ {kExprLocalGet, 0, WASM_IF_OP, kExprNop, kExprElse});
}
TEST_F(FunctionBodyDecoderTest, IfElseNop) {
@@ -1001,7 +1001,7 @@ TEST_F(FunctionBodyDecoderTest, ReturnVoid3) {
ExpectFailure(sigs.v_v(), {kExprRefNull});
ExpectFailure(sigs.v_v(), {kExprRefFunc, 0});
- ExpectFailure(sigs.v_i(), {kExprGetLocal, 0});
+ ExpectFailure(sigs.v_i(), {kExprLocalGet, 0});
}
TEST_F(FunctionBodyDecoderTest, Unreachable1) {
@@ -3485,10 +3485,10 @@ TEST_F(WasmOpcodeLengthTest, MiscExpressions) {
ExpectLength(5, kExprF32Const);
ExpectLength(9, kExprF64Const);
ExpectLength(1, kExprRefNull);
- ExpectLength(2, kExprGetLocal);
- ExpectLength(2, kExprSetLocal);
- ExpectLength(2, kExprGetGlobal);
- ExpectLength(2, kExprSetGlobal);
+ ExpectLength(2, kExprLocalGet);
+ ExpectLength(2, kExprLocalSet);
+ ExpectLength(2, kExprGlobalGet);
+ ExpectLength(2, kExprGlobalSet);
ExpectLength(2, kExprCallFunction);
ExpectLength(3, kExprCallIndirect);
}
@@ -3514,11 +3514,11 @@ TEST_F(WasmOpcodeLengthTest, I64Const) {
}
TEST_F(WasmOpcodeLengthTest, VariableLength) {
- ExpectLength(2, kExprGetGlobal, U32V_1(1));
- ExpectLength(3, kExprGetGlobal, U32V_2(33));
- ExpectLength(4, kExprGetGlobal, U32V_3(44));
- ExpectLength(5, kExprGetGlobal, U32V_4(66));
- ExpectLength(6, kExprGetGlobal, U32V_5(77));
+ ExpectLength(2, kExprGlobalGet, U32V_1(1));
+ ExpectLength(3, kExprGlobalGet, U32V_2(33));
+ ExpectLength(4, kExprGlobalGet, U32V_3(44));
+ ExpectLength(5, kExprGlobalGet, U32V_4(66));
+ ExpectLength(6, kExprGlobalGet, U32V_5(77));
ExpectLength(2, kExprRefFunc, U32V_1(1));
ExpectLength(3, kExprRefFunc, U32V_2(33));
diff --git a/deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc b/deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc
index 5f56da3a23..97e7dee27e 100644
--- a/deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc
+++ b/deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc
@@ -111,7 +111,7 @@ TEST_F(WasmLoopAssignmentAnalyzerTest, NestedIf) {
TEST_F(WasmLoopAssignmentAnalyzerTest, BigLocal) {
num_locals = 65000;
for (int i = 13; i < 65000; i = static_cast<int>(i * 1.5)) {
- byte code[] = {WASM_LOOP(WASM_I32V_1(11), kExprSetLocal, U32V_3(i))};
+ byte code[] = {WASM_LOOP(WASM_I32V_1(11), kExprLocalSet, U32V_3(i))};
BitVector* assigned = Analyze(code, code + arraysize(code));
for (int j = 0; j < assigned->length(); j++) {
@@ -185,7 +185,7 @@ TEST_F(WasmLoopAssignmentAnalyzerTest, Malformed) {
TEST_F(WasmLoopAssignmentAnalyzerTest, regress_642867) {
static const byte code[] = {
- WASM_LOOP(WASM_ZERO, kExprSetLocal, 0xFA, 0xFF, 0xFF, 0xFF,
+ WASM_LOOP(WASM_ZERO, kExprLocalSet, 0xFA, 0xFF, 0xFF, 0xFF,
0x0F)}; // local index LEB128 0xFFFFFFFA
// Just make sure that the analysis does not crash.
Analyze(code, code + arraysize(code));
diff --git a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
index 4493fcf1dd..25eb121074 100644
--- a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
@@ -343,6 +343,22 @@ TEST_F(WasmModuleVerifyTest, FuncRefGlobal) {
}
}
+TEST_F(WasmModuleVerifyTest, InvalidFuncRefGlobal) {
+ WASM_FEATURE_SCOPE(anyref);
+ static const byte data[] = {
+ // sig#0 ---------------------------------------------------------------
+ SIGNATURES_SECTION_VOID_VOID,
+ // funcs ---------------------------------------------------------------
+ TWO_EMPTY_FUNCTIONS(SIG_INDEX(0)),
+ SECTION(Global, // --
+ ENTRY_COUNT(1), // --
+ kLocalFuncRef, // local type
+ 0, // immutable
+ WASM_INIT_EXPR_REF_FUNC(7)), // invalid function index
+ TWO_EMPTY_BODIES};
+ EXPECT_FAILURE(data);
+}
+
TEST_F(WasmModuleVerifyTest, AnyRefGlobalWithGlobalInit) {
WASM_FEATURE_SCOPE(anyref);
static const byte data[] = {
@@ -439,17 +455,15 @@ TEST_F(WasmModuleVerifyTest, ExportMutableGlobal) {
}
}
-static void AppendUint32v(
- std::vector<byte>& buffer, // NOLINT(runtime/references)
- uint32_t val) {
+static void AppendUint32v(std::vector<byte>* buffer, uint32_t val) {
while (true) {
uint32_t next = val >> 7;
uint32_t out = val & 0x7F;
if (next) {
- buffer.push_back(static_cast<byte>(0x80 | out));
+ buffer->push_back(static_cast<byte>(0x80 | out));
val = next;
} else {
- buffer.push_back(static_cast<byte>(out));
+ buffer->push_back(static_cast<byte>(out));
break;
}
}
@@ -469,7 +483,7 @@ TEST_F(WasmModuleVerifyTest, NGlobals) {
for (size_t g = 0; g != sizeof(globals); ++g) {
buffer.push_back(globals[g]);
}
- AppendUint32v(buffer, i); // Number of globals.
+ AppendUint32v(&buffer, i); // Number of globals.
for (uint32_t j = 0; j < i; j++) {
buffer.insert(buffer.end(), data, data + sizeof(data));
}
@@ -1072,6 +1086,7 @@ TEST_F(WasmModuleVerifyTest, ElementSectionMultipleTables) {
FUNC_INDEX(0), // function
TABLE_INDEX(1), // element for table 1
WASM_INIT_EXPR_I32V_1(7), // index
+ kExternalFunction, // type
2, // elements count
FUNC_INDEX(0), // entry 0
FUNC_INDEX(0)), // entry 1
@@ -1118,15 +1133,18 @@ TEST_F(WasmModuleVerifyTest, ElementSectionMixedTables) {
FUNC_INDEX(0), // function
TABLE_INDEX(1), // element for table 1
WASM_INIT_EXPR_I32V_1(7), // index
+ kExternalFunction, // type
2, // elements count
FUNC_INDEX(0), // entry 0
FUNC_INDEX(0), // entry 1
TABLE_INDEX(2), // element for table 2
WASM_INIT_EXPR_I32V_1(12), // index
+ kExternalFunction, // type
1, // elements count
FUNC_INDEX(0), // function
TABLE_INDEX(3), // element for table 1
WASM_INIT_EXPR_I32V_1(17), // index
+ kExternalFunction, // type
2, // elements count
FUNC_INDEX(0), // entry 0
FUNC_INDEX(0)), // entry 1
@@ -1159,6 +1177,7 @@ TEST_F(WasmModuleVerifyTest, ElementSectionMultipleTablesArbitraryOrder) {
FUNC_INDEX(0), // function
TABLE_INDEX(1), // element for table 0
WASM_INIT_EXPR_I32V_1(7), // index
+ kExternalFunction, // type
2, // elements count
FUNC_INDEX(0), // entry 0
FUNC_INDEX(0), // entry 1
@@ -1205,10 +1224,12 @@ TEST_F(WasmModuleVerifyTest, ElementSectionMixedTablesArbitraryOrder) {
4, // entry count
TABLE_INDEX(2), // element for table 0
WASM_INIT_EXPR_I32V_1(10), // index
+ kExternalFunction, // type
1, // elements count
FUNC_INDEX(0), // function
TABLE_INDEX(3), // element for table 1
WASM_INIT_EXPR_I32V_1(17), // index
+ kExternalFunction, // type
2, // elements count
FUNC_INDEX(0), // entry 0
FUNC_INDEX(0), // entry 1
@@ -1218,6 +1239,7 @@ TEST_F(WasmModuleVerifyTest, ElementSectionMixedTablesArbitraryOrder) {
FUNC_INDEX(0), // function
TABLE_INDEX(1), // element for table 1
WASM_INIT_EXPR_I32V_1(7), // index
+ kExternalFunction, // type
2, // elements count
FUNC_INDEX(0), // entry 0
FUNC_INDEX(0)), // entry 1
@@ -1248,6 +1270,7 @@ TEST_F(WasmModuleVerifyTest, ElementSectionInitAnyRefTableWithFuncRef) {
FUNC_INDEX(0), // function
TABLE_INDEX(1), // element for table 1
WASM_INIT_EXPR_I32V_1(7), // index
+ kExternalFunction, // type
2, // elements count
FUNC_INDEX(0), // entry 0
FUNC_INDEX(0)), // entry 1
@@ -1295,6 +1318,7 @@ TEST_F(WasmModuleVerifyTest, ElementSectionDontInitAnyRefImportedTable) {
FUNC_INDEX(0), // function
TABLE_INDEX(1), // element for table 1
WASM_INIT_EXPR_I32V_1(17), // index
+ kExternalFunction, // type
2, // elements count
FUNC_INDEX(0), // entry 0
FUNC_INDEX(0)), // entry 1
@@ -2345,7 +2369,7 @@ TEST_F(WasmModuleVerifyTest, PassiveDataSegment) {
EXPECT_OFF_END_FAILURE(data, arraysize(data) - 5);
}
-TEST_F(WasmModuleVerifyTest, PassiveElementSegment) {
+TEST_F(WasmModuleVerifyTest, ActiveElementSegmentWithElements) {
static const byte data[] = {
// sig#0 -----------------------------------------------------------------
SIGNATURES_SECTION_VOID_VOID,
@@ -2354,7 +2378,8 @@ TEST_F(WasmModuleVerifyTest, PassiveElementSegment) {
// table declaration -----------------------------------------------------
SECTION(Table, ENTRY_COUNT(1), kLocalFuncRef, 0, 1),
// element segments -----------------------------------------------------
- SECTION(Element, ENTRY_COUNT(1), PASSIVE, kLocalFuncRef, U32V_1(3),
+ SECTION(Element, ENTRY_COUNT(1), ACTIVE_WITH_ELEMENTS, TABLE_INDEX0,
+ WASM_INIT_EXPR_I32V_1(0), kLocalFuncRef, U32V_1(3),
REF_FUNC_ELEMENT(0), REF_FUNC_ELEMENT(0), REF_NULL_ELEMENT),
// code ------------------------------------------------------------------
ONE_EMPTY_BODY};
@@ -2364,6 +2389,26 @@ TEST_F(WasmModuleVerifyTest, PassiveElementSegment) {
EXPECT_OFF_END_FAILURE(data, arraysize(data) - 5);
}
+TEST_F(WasmModuleVerifyTest, PassiveElementSegment) {
+ static const byte data[] = {
+ // sig#0 -----------------------------------------------------------------
+ SIGNATURES_SECTION_VOID_VOID,
+ // funcs -----------------------------------------------------------------
+ ONE_EMPTY_FUNCTION(SIG_INDEX(0)),
+ // table declaration -----------------------------------------------------
+ SECTION(Table, ENTRY_COUNT(1), kLocalFuncRef, 0, 1),
+ // element segments -----------------------------------------------------
+ SECTION(Element, ENTRY_COUNT(1), PASSIVE_WITH_ELEMENTS, kLocalFuncRef,
+ U32V_1(3), REF_FUNC_ELEMENT(0), REF_FUNC_ELEMENT(0),
+ REF_NULL_ELEMENT),
+ // code ------------------------------------------------------------------
+ ONE_EMPTY_BODY};
+ EXPECT_FAILURE(data);
+ WASM_FEATURE_SCOPE(bulk_memory);
+ EXPECT_VERIFIES(data);
+ EXPECT_OFF_END_FAILURE(data, arraysize(data) - 5);
+}
+
TEST_F(WasmModuleVerifyTest, PassiveElementSegmentAnyRef) {
static const byte data[] = {
// sig#0 -----------------------------------------------------------------
@@ -2373,13 +2418,33 @@ TEST_F(WasmModuleVerifyTest, PassiveElementSegmentAnyRef) {
// table declaration -----------------------------------------------------
SECTION(Table, ENTRY_COUNT(1), kLocalFuncRef, 0, 1),
// element segments -----------------------------------------------------
- SECTION(Element, ENTRY_COUNT(1), PASSIVE, kLocalAnyRef, U32V_1(0)),
+ SECTION(Element, ENTRY_COUNT(1), PASSIVE_WITH_ELEMENTS, kLocalAnyRef,
+ U32V_1(0)),
// code ------------------------------------------------------------------
ONE_EMPTY_BODY};
WASM_FEATURE_SCOPE(bulk_memory);
EXPECT_FAILURE(data);
}
+TEST_F(WasmModuleVerifyTest, PassiveElementSegmentWithIndices) {
+ static const byte data[] = {
+ // sig#0 -----------------------------------------------------------------
+ SIGNATURES_SECTION_VOID_VOID,
+ // funcs -----------------------------------------------------------------
+ ONE_EMPTY_FUNCTION(SIG_INDEX(0)),
+ // table declaration -----------------------------------------------------
+ SECTION(Table, ENTRY_COUNT(1), kLocalFuncRef, 0, 1),
+ // element segments -----------------------------------------------------
+ SECTION(Element, ENTRY_COUNT(1), PASSIVE, kExternalFunction,
+ ENTRY_COUNT(3), U32V_1(0), U32V_1(0), U32V_1(0)),
+ // code ------------------------------------------------------------------
+ ONE_EMPTY_BODY};
+ EXPECT_FAILURE(data);
+ WASM_FEATURE_SCOPE(bulk_memory);
+ EXPECT_VERIFIES(data);
+ EXPECT_OFF_END_FAILURE(data, arraysize(data) - 5);
+}
+
TEST_F(WasmModuleVerifyTest, DataCountSectionCorrectPlacement) {
static const byte data[] = {SECTION(Element, ENTRY_COUNT(0)),
SECTION(DataCount, ENTRY_COUNT(0)),
diff --git a/deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc b/deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc
index 5166b13628..7e8068095e 100644
--- a/deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc
@@ -99,7 +99,7 @@ class WasmStreamingDecoderTest : public ::testing::Test {
for (int split = 0; split <= data.length(); ++split) {
MockStreamingResult result;
StreamingDecoder stream(
- base::make_unique<MockStreamingProcessor>(&result));
+ std::make_unique<MockStreamingProcessor>(&result));
stream.OnBytesReceived(data.SubVector(0, split));
stream.OnBytesReceived(data.SubVector(split, data.length()));
stream.Finish();
@@ -115,7 +115,7 @@ class WasmStreamingDecoderTest : public ::testing::Test {
for (int split = 0; split <= data.length(); ++split) {
MockStreamingResult result;
StreamingDecoder stream(
- base::make_unique<MockStreamingProcessor>(&result));
+ std::make_unique<MockStreamingProcessor>(&result));
stream.OnBytesReceived(data.SubVector(0, split));
stream.OnBytesReceived(data.SubVector(split, data.length()));
stream.Finish();
@@ -128,7 +128,7 @@ class WasmStreamingDecoderTest : public ::testing::Test {
TEST_F(WasmStreamingDecoderTest, EmptyStream) {
MockStreamingResult result;
- StreamingDecoder stream(base::make_unique<MockStreamingProcessor>(&result));
+ StreamingDecoder stream(std::make_unique<MockStreamingProcessor>(&result));
stream.Finish();
EXPECT_FALSE(result.ok());
}
@@ -137,7 +137,7 @@ TEST_F(WasmStreamingDecoderTest, IncompleteModuleHeader) {
const uint8_t data[] = {U32_LE(kWasmMagic), U32_LE(kWasmVersion)};
{
MockStreamingResult result;
- StreamingDecoder stream(base::make_unique<MockStreamingProcessor>(&result));
+ StreamingDecoder stream(std::make_unique<MockStreamingProcessor>(&result));
stream.OnBytesReceived(VectorOf(data, 1));
stream.Finish();
EXPECT_FALSE(result.ok());
diff --git a/deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc b/deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc
index 9f7cfc6b1d..d43ade2b0e 100644
--- a/deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc
+++ b/deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc
@@ -25,11 +25,11 @@
#include "src/codegen/assembler-inl.h"
#include "src/codegen/macro-assembler-inl.h"
#include "src/execution/simulator.h"
+#include "src/objects/backing-store.h"
#include "src/trap-handler/trap-handler.h"
#include "src/utils/allocation.h"
#include "src/utils/vector.h"
#include "src/wasm/wasm-engine.h"
-#include "src/wasm/wasm-memory.h"
#include "test/common/assembler-tester.h"
#include "test/unittests/test-utils.h"
@@ -80,19 +80,13 @@ class TrapHandlerTest : public TestWithIsolate,
public ::testing::WithParamInterface<TrapHandlerStyle> {
protected:
void SetUp() override {
- void* base = nullptr;
- size_t length = 0;
- accessible_memory_start_ =
- i_isolate()
- ->wasm_engine()
- ->memory_tracker()
- ->TryAllocateBackingStoreForTesting(
- i_isolate()->heap(), 1 * kWasmPageSize, &base, &length);
- memory_buffer_ =
- base::AddressRegion(reinterpret_cast<Address>(base), length);
-
- // The allocated memory buffer ends with a guard page.
- crash_address_ = memory_buffer_.end() - 32;
+ backing_store_ = BackingStore::AllocateWasmMemory(i_isolate(), 1, 1,
+ SharedFlag::kNotShared);
+ CHECK(backing_store_);
+ CHECK(backing_store_->has_guard_regions());
+ // The allocated backing store ends with a guard page.
+ crash_address_ = reinterpret_cast<Address>(backing_store_->buffer_start()) +
+ backing_store_->byte_length() + 32;
// Allocate a buffer for the generated code.
buffer_ = AllocateAssemblerBuffer(AssemblerBase::kMinimalBufferSize,
GetRandomMmapAddr());
@@ -122,10 +116,7 @@ class TrapHandlerTest : public TestWithIsolate,
CHECK(!GetThreadInWasmFlag());
buffer_.reset();
recovery_buffer_.reset();
-
- // Free the allocated backing store.
- i_isolate()->wasm_engine()->memory_tracker()->FreeBackingStoreForTesting(
- memory_buffer_, accessible_memory_start_);
+ backing_store_.reset();
// Clean up the trap handler
trap_handler::RemoveTrapHandler();
@@ -252,14 +243,12 @@ class TrapHandlerTest : public TestWithIsolate,
bool test_handler_executed() { return g_test_handler_executed; }
- // Allocated memory which corresponds to wasm memory with guard regions.
- base::AddressRegion memory_buffer_;
+ // The backing store used for testing the trap handler.
+ std::unique_ptr<BackingStore> backing_store_;
+
// Address within the guard region of the wasm memory. Accessing this memory
// address causes a signal or exception.
Address crash_address_;
- // The start of the accessible region in the allocated memory. This pointer is
- // needed to de-register the memory from the wasm memory tracker again.
- void* accessible_memory_start_;
// Buffer for generated code.
std::unique_ptr<TestingAssemblerBuffer> buffer_;
@@ -472,7 +461,7 @@ TEST_P(TrapHandlerTest, TestCrashInOtherThread) {
*trap_handler::GetThreadInWasmThreadLocalAddress() = 0;
}
-INSTANTIATE_TEST_SUITE_P(/* no prefix */, TrapHandlerTest,
+INSTANTIATE_TEST_SUITE_P(Traps, TrapHandlerTest,
::testing::Values(kDefault, kCallback),
PrintTrapHandlerTestParam);
diff --git a/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc b/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc
index a6b29ffc6c..e0abf7adb4 100644
--- a/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc
+++ b/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc
@@ -9,7 +9,6 @@
#include "src/wasm/jump-table-assembler.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-engine.h"
-#include "src/wasm/wasm-memory.h"
namespace v8 {
namespace internal {
@@ -139,230 +138,6 @@ TEST_F(DisjointAllocationPoolTest, MergingSkipLargerSrcWithGap) {
CheckPool(a, {{10, 5}, {20, 15}, {36, 4}});
}
-enum ModuleStyle : int { Fixed = 0, Growable = 1 };
-
-std::string PrintWasmCodeManageTestParam(
- ::testing::TestParamInfo<ModuleStyle> info) {
- switch (info.param) {
- case Fixed:
- return "Fixed";
- case Growable:
- return "Growable";
- }
- UNREACHABLE();
-}
-
-class WasmCodeManagerTest : public TestWithContext,
- public ::testing::WithParamInterface<ModuleStyle> {
- public:
- static constexpr uint32_t kNumFunctions = 10;
- static size_t allocate_page_size;
- static size_t commit_page_size;
-
- WasmCodeManagerTest() {
- CHECK_EQ(allocate_page_size == 0, commit_page_size == 0);
- if (allocate_page_size == 0) {
- allocate_page_size = AllocatePageSize();
- commit_page_size = CommitPageSize();
- }
- CHECK_NE(0, allocate_page_size);
- CHECK_NE(0, commit_page_size);
- manager()->DisableImplicitAllocationsForTesting();
- }
-
- using NativeModulePtr = std::shared_ptr<NativeModule>;
-
- NativeModulePtr AllocModule(size_t size, ModuleStyle style) {
- std::shared_ptr<WasmModule> module(new WasmModule);
- module->num_declared_functions = kNumFunctions;
- bool can_request_more = style == Growable;
- return engine()->NewNativeModule(i_isolate(), kAllWasmFeatures, size,
- can_request_more, std::move(module));
- }
-
- WasmCode* AddCode(NativeModule* native_module, uint32_t index, size_t size) {
- CodeDesc desc;
- memset(reinterpret_cast<void*>(&desc), 0, sizeof(CodeDesc));
- std::unique_ptr<byte[]> exec_buff(new byte[size]);
- desc.buffer = exec_buff.get();
- desc.instr_size = static_cast<int>(size);
- std::unique_ptr<WasmCode> code = native_module->AddCode(
- index, desc, 0, 0, {}, {}, WasmCode::kFunction, ExecutionTier::kNone);
- return native_module->PublishCode(std::move(code));
- }
-
- WasmEngine* engine() { return i_isolate()->wasm_engine(); }
-
- WasmCodeManager* manager() { return engine()->code_manager(); }
-
- void SetMaxCommittedMemory(size_t limit) {
- manager()->SetMaxCommittedMemoryForTesting(limit);
- }
-};
-
-// static
-size_t WasmCodeManagerTest::allocate_page_size = 0;
-size_t WasmCodeManagerTest::commit_page_size = 0;
-
-INSTANTIATE_TEST_SUITE_P(Parameterized, WasmCodeManagerTest,
- ::testing::Values(Fixed, Growable),
- PrintWasmCodeManageTestParam);
-
-TEST_P(WasmCodeManagerTest, EmptyCase) {
- SetMaxCommittedMemory(0);
- CHECK_EQ(0, manager()->committed_code_space());
-
- NativeModulePtr native_module = AllocModule(allocate_page_size, GetParam());
- ASSERT_DEATH_IF_SUPPORTED(AddCode(native_module.get(), 0, kCodeAlignment),
- "OOM in wasm code commit");
-}
-
-TEST_P(WasmCodeManagerTest, AllocateAndGoOverLimit) {
- SetMaxCommittedMemory(allocate_page_size);
-
- CHECK_EQ(0, manager()->committed_code_space());
- NativeModulePtr native_module = AllocModule(allocate_page_size, GetParam());
- CHECK(native_module);
- CHECK_EQ(0, manager()->committed_code_space());
- WasmCodeRefScope code_ref_scope;
- uint32_t index = 0;
- WasmCode* code = AddCode(native_module.get(), index++, 1 * kCodeAlignment);
- CHECK_NOT_NULL(code);
- CHECK_EQ(commit_page_size, manager()->committed_code_space());
-
- code = AddCode(native_module.get(), index++, 3 * kCodeAlignment);
- CHECK_NOT_NULL(code);
- CHECK_EQ(commit_page_size, manager()->committed_code_space());
-
- code = AddCode(native_module.get(), index++,
- allocate_page_size - 4 * kCodeAlignment);
- CHECK_NOT_NULL(code);
- CHECK_EQ(allocate_page_size, manager()->committed_code_space());
-
- // This fails in "reservation" if we cannot extend the code space, or in
- // "commit" it we can (since we hit the allocation limit in the
- // WasmCodeManager). Hence don't check for that part of the OOM message.
- ASSERT_DEATH_IF_SUPPORTED(
- AddCode(native_module.get(), index++, 1 * kCodeAlignment),
- "OOM in wasm code");
-}
-
-TEST_P(WasmCodeManagerTest, TotalLimitIrrespectiveOfModuleCount) {
- SetMaxCommittedMemory(3 * allocate_page_size);
-
- NativeModulePtr nm1 = AllocModule(2 * allocate_page_size, GetParam());
- NativeModulePtr nm2 = AllocModule(2 * allocate_page_size, GetParam());
- CHECK(nm1);
- CHECK(nm2);
- WasmCodeRefScope code_ref_scope;
- WasmCode* code = AddCode(nm1.get(), 0, 2 * allocate_page_size);
- CHECK_NOT_NULL(code);
- ASSERT_DEATH_IF_SUPPORTED(AddCode(nm2.get(), 0, 2 * allocate_page_size),
- "OOM in wasm code commit");
-}
-
-TEST_P(WasmCodeManagerTest, GrowingVsFixedModule) {
- SetMaxCommittedMemory(3 * allocate_page_size);
-
- NativeModulePtr nm = AllocModule(allocate_page_size, GetParam());
- size_t module_size =
- GetParam() == Fixed ? kMaxWasmCodeMemory : allocate_page_size;
- size_t remaining_space_in_module = module_size;
- if (GetParam() == Fixed) {
- // Requesting more than the remaining space fails because the module cannot
- // grow.
- ASSERT_DEATH_IF_SUPPORTED(
- AddCode(nm.get(), 0, remaining_space_in_module + kCodeAlignment),
- "OOM in wasm code reservation");
- } else {
- // The module grows by one page. One page remains uncommitted.
- WasmCodeRefScope code_ref_scope;
- CHECK_NOT_NULL(
- AddCode(nm.get(), 0, remaining_space_in_module + kCodeAlignment));
- CHECK_EQ(commit_page_size + allocate_page_size,
- manager()->committed_code_space());
- }
-}
-
-TEST_P(WasmCodeManagerTest, CommitIncrements) {
- SetMaxCommittedMemory(10 * allocate_page_size);
-
- NativeModulePtr nm = AllocModule(3 * allocate_page_size, GetParam());
- WasmCodeRefScope code_ref_scope;
- WasmCode* code = AddCode(nm.get(), 0, kCodeAlignment);
- CHECK_NOT_NULL(code);
- CHECK_EQ(commit_page_size, manager()->committed_code_space());
- code = AddCode(nm.get(), 1, 2 * allocate_page_size);
- CHECK_NOT_NULL(code);
- CHECK_EQ(commit_page_size + 2 * allocate_page_size,
- manager()->committed_code_space());
- code = AddCode(nm.get(), 2, allocate_page_size - kCodeAlignment);
- CHECK_NOT_NULL(code);
- CHECK_EQ(3 * allocate_page_size, manager()->committed_code_space());
-}
-
-TEST_P(WasmCodeManagerTest, Lookup) {
- SetMaxCommittedMemory(2 * allocate_page_size);
-
- NativeModulePtr nm1 = AllocModule(allocate_page_size, GetParam());
- NativeModulePtr nm2 = AllocModule(allocate_page_size, GetParam());
- Address mid_code1_1;
- {
- // The {WasmCodeRefScope} needs to die before {nm1} dies.
- WasmCodeRefScope code_ref_scope;
- WasmCode* code1_0 = AddCode(nm1.get(), 0, kCodeAlignment);
- CHECK_EQ(nm1.get(), code1_0->native_module());
- WasmCode* code1_1 = AddCode(nm1.get(), 1, kCodeAlignment);
- WasmCode* code2_0 = AddCode(nm2.get(), 0, kCodeAlignment);
- WasmCode* code2_1 = AddCode(nm2.get(), 1, kCodeAlignment);
- CHECK_EQ(nm2.get(), code2_1->native_module());
-
- CHECK_EQ(0, code1_0->index());
- CHECK_EQ(1, code1_1->index());
- CHECK_EQ(0, code2_0->index());
- CHECK_EQ(1, code2_1->index());
-
- // we know the manager object is allocated here, so we shouldn't
- // find any WasmCode* associated with that ptr.
- WasmCode* not_found =
- manager()->LookupCode(reinterpret_cast<Address>(manager()));
- CHECK_NULL(not_found);
- WasmCode* found = manager()->LookupCode(code1_0->instruction_start());
- CHECK_EQ(found, code1_0);
- found = manager()->LookupCode(code2_1->instruction_start() +
- (code2_1->instructions().size() / 2));
- CHECK_EQ(found, code2_1);
- found = manager()->LookupCode(code2_1->instruction_start() +
- code2_1->instructions().size() - 1);
- CHECK_EQ(found, code2_1);
- found = manager()->LookupCode(code2_1->instruction_start() +
- code2_1->instructions().size());
- CHECK_NULL(found);
- mid_code1_1 =
- code1_1->instruction_start() + (code1_1->instructions().size() / 2);
- CHECK_EQ(code1_1, manager()->LookupCode(mid_code1_1));
- }
- nm1.reset();
- CHECK_NULL(manager()->LookupCode(mid_code1_1));
-}
-
-TEST_P(WasmCodeManagerTest, LookupWorksAfterRewrite) {
- SetMaxCommittedMemory(2 * allocate_page_size);
-
- NativeModulePtr nm1 = AllocModule(allocate_page_size, GetParam());
-
- WasmCodeRefScope code_ref_scope;
- WasmCode* code0 = AddCode(nm1.get(), 0, kCodeAlignment);
- WasmCode* code1 = AddCode(nm1.get(), 1, kCodeAlignment);
- CHECK_EQ(0, code0->index());
- CHECK_EQ(1, code1->index());
- CHECK_EQ(code1, manager()->LookupCode(code1->instruction_start()));
- WasmCode* code1_1 = AddCode(nm1.get(), 1, kCodeAlignment);
- CHECK_EQ(1, code1_1->index());
- CHECK_EQ(code1, manager()->LookupCode(code1->instruction_start()));
- CHECK_EQ(code1_1, manager()->LookupCode(code1_1->instruction_start()));
-}
-
} // namespace wasm_heap_unittest
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/test/wasm-js/testcfg.py b/deps/v8/test/wasm-js/testcfg.py
index 197d9195f1..61e55477f9 100644
--- a/deps/v8/test/wasm-js/testcfg.py
+++ b/deps/v8/test/wasm-js/testcfg.py
@@ -26,7 +26,12 @@ proposal_flags = [{
'name': 'js-types',
'flags': ['--experimental-wasm-type-reflection',
'--no-experimental-wasm-bulk-memory']
- }]
+ },
+ {
+ 'name': 'JS-BigInt-integration',
+ 'flags': ['--experimental-wasm-bigint']
+ },
+ ]
class TestLoader(testsuite.JSTestLoader):
diff --git a/deps/v8/test/wasm-js/tests.tar.gz.sha1 b/deps/v8/test/wasm-js/tests.tar.gz.sha1
index ec8be70e2a..5001675b36 100644
--- a/deps/v8/test/wasm-js/tests.tar.gz.sha1
+++ b/deps/v8/test/wasm-js/tests.tar.gz.sha1
@@ -1 +1 @@
-26e59563060bd6de4adbb4021684e8cf38fe71c8 \ No newline at end of file
+2ecf4038f24fc08bd9da504f15942d3abb5ec685 \ No newline at end of file
diff --git a/deps/v8/test/wasm-js/wasm-js.status b/deps/v8/test/wasm-js/wasm-js.status
index 42ad2a4152..5d219f6eed 100644
--- a/deps/v8/test/wasm-js/wasm-js.status
+++ b/deps/v8/test/wasm-js/wasm-js.status
@@ -7,6 +7,9 @@
# https://bugs.chromium.org/p/v8/issues/detail?id=8633
'limits': [SKIP],
'proposals/reference-types/limits': [SKIP],
+ # TODO(v8:9673): Enable these spec tests once they exist, and the out-dated
+ # tests have been removed.
+ 'proposals/JS-BigInt-integration/*': [SKIP],
}], # ALWAYS
['arch == s390 or arch == s390x or system == aix', {
diff --git a/deps/v8/test/wasm-spec-tests/OWNERS b/deps/v8/test/wasm-spec-tests/OWNERS
index b347d0ae0c..8c66d12288 100644
--- a/deps/v8/test/wasm-spec-tests/OWNERS
+++ b/deps/v8/test/wasm-spec-tests/OWNERS
@@ -1,4 +1,4 @@
ahaas@chromium.org
-clemensh@chromium.org
+clemensb@chromium.org
# COMPONENT: Blink>JavaScript>WebAssembly
diff --git a/deps/v8/test/wasm-spec-tests/testcfg.py b/deps/v8/test/wasm-spec-tests/testcfg.py
index e0bd19f268..65131327b0 100644
--- a/deps/v8/test/wasm-spec-tests/testcfg.py
+++ b/deps/v8/test/wasm-spec-tests/testcfg.py
@@ -20,7 +20,12 @@ proposal_flags = [{
'name': 'js-types',
'flags': ['--experimental-wasm-type-reflection',
'--no-experimental-wasm-bulk-memory']
- }]
+ },
+ {
+ 'name': 'JS-BigInt-integration',
+ 'flags': ['--experimental-wasm-bigint']
+ },
+ ]
class TestLoader(testsuite.JSTestLoader):
pass
diff --git a/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1 b/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
index 36c9e08ac1..c61881d072 100644
--- a/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
+++ b/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
@@ -1 +1 @@
-d9e649f4ea6da6bd18999795201c2bd138c0d786 \ No newline at end of file
+f9bd936c708402051f87f4ac8940d1916112a15a \ No newline at end of file
diff --git a/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status b/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status
index 16faaaed35..a0b96847c0 100644
--- a/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status
+++ b/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status
@@ -10,19 +10,16 @@
# the bulk-memory proposal. Since we've enabled bulk-memory by default, we
# need to update to use its testsuite.
'linking': [FAIL],
+ 'binary-leb128': [FAIL],
'elem': [FAIL],
'data': [FAIL],
- # TODO(v8:9658): The encoding of element segments changed in the bulk memory
- # proposal
- 'proposals/bulk-memory-operations/bulk': [FAIL],
- 'proposals/bulk-memory-operations/table_init': [FAIL],
- 'proposals/bulk-memory-operations/table_copy': [FAIL],
- 'proposals/bulk-memory-operations/elem': [FAIL],
- 'proposals/bulk-memory-operations/binary': [FAIL],
# TODO(mstarzinger): Roll newest tests into "js-types" repository.
'proposals/js-types/exports': [FAIL],
'proposals/js-types/globals': [FAIL],
'proposals/js-types/linking': [FAIL],
+ # TODO(v8:9673): Enable these spec tests once they exist, and the out-dated
+ # tests have been removed.
+ 'proposals/JS-BigInt-integration/*': [SKIP],
}], # ALWAYS
['arch == mipsel or arch == mips64el or arch == mips or arch == mips64', {
diff --git a/deps/v8/third_party/inspector_protocol/README.v8 b/deps/v8/third_party/inspector_protocol/README.v8
index a092e3e7a1..6a75bef675 100644
--- a/deps/v8/third_party/inspector_protocol/README.v8
+++ b/deps/v8/third_party/inspector_protocol/README.v8
@@ -2,7 +2,7 @@ Name: inspector protocol
Short Name: inspector_protocol
URL: https://chromium.googlesource.com/deps/inspector_protocol/
Version: 0
-Revision: d114a62e144cdfdae697fe0af6581ce39a31af37
+Revision: a14dad30f0e5b0fc05911856d5a20b1ffe89fd9b
License: BSD
License File: LICENSE
Security Critical: no
diff --git a/deps/v8/third_party/inspector_protocol/bindings/bindings.h b/deps/v8/third_party/inspector_protocol/bindings/bindings.h
index 0fae2032e4..b24b59b0e1 100644
--- a/deps/v8/third_party/inspector_protocol/bindings/bindings.h
+++ b/deps/v8/third_party/inspector_protocol/bindings/bindings.h
@@ -58,10 +58,9 @@ class ValueMaybe {
return is_just_ ? value_ : default_value;
}
bool isJust() const { return is_just_; }
- // TODO(johannes): |is_just_| isn't reset by this operation -
- // introduce && to ensure avoiding continued usage of |this|?
T takeJust() {
assert(is_just_);
+ is_just_ = false;
return std::move(value_);
}
diff --git a/deps/v8/third_party/inspector_protocol/encoding/encoding.cc b/deps/v8/third_party/inspector_protocol/encoding/encoding.cc
index 6e5619d00e..fcefb8dbb9 100644
--- a/deps/v8/third_party/inspector_protocol/encoding/encoding.cc
+++ b/deps/v8/third_party/inspector_protocol/encoding/encoding.cc
@@ -85,8 +85,25 @@ std::string Status::ToASCIIString() const {
return ToASCIIString("CBOR: map start expected");
case Error::CBOR_MAP_STOP_EXPECTED:
return ToASCIIString("CBOR: map stop expected");
+ case Error::CBOR_ARRAY_START_EXPECTED:
+ return ToASCIIString("CBOR: array start expected");
case Error::CBOR_ENVELOPE_SIZE_LIMIT_EXCEEDED:
return ToASCIIString("CBOR: envelope size limit exceeded");
+
+ case Error::BINDINGS_MANDATORY_FIELD_MISSING:
+ return ToASCIIString("BINDINGS: mandatory field missing");
+ case Error::BINDINGS_BOOL_VALUE_EXPECTED:
+ return ToASCIIString("BINDINGS: bool value expected");
+ case Error::BINDINGS_INT32_VALUE_EXPECTED:
+ return ToASCIIString("BINDINGS: int32 value expected");
+ case Error::BINDINGS_DOUBLE_VALUE_EXPECTED:
+ return ToASCIIString("BINDINGS: double value expected");
+ case Error::BINDINGS_STRING_VALUE_EXPECTED:
+ return ToASCIIString("BINDINGS: string value expected");
+ case Error::BINDINGS_STRING8_VALUE_EXPECTED:
+ return ToASCIIString("BINDINGS: string8 value expected");
+ case Error::BINDINGS_BINARY_VALUE_EXPECTED:
+ return ToASCIIString("BINDINGS: binary value expected");
}
// Some compilers can't figure out that we can't get here.
return "INVALID ERROR CODE";
@@ -707,6 +724,12 @@ span<uint8_t> CBORTokenizer::GetBinary() const {
return bytes_.subspan(status_.pos + (token_byte_length_ - length), length);
}
+span<uint8_t> CBORTokenizer::GetEnvelope() const {
+ assert(token_tag_ == CBORTokenTag::ENVELOPE);
+ auto length = static_cast<size_t>(token_start_internal_value_);
+ return bytes_.subspan(status_.pos, length + kEncodedEnvelopeHeaderSize);
+}
+
span<uint8_t> CBORTokenizer::GetEnvelopeContents() const {
assert(token_tag_ == CBORTokenTag::ENVELOPE);
auto length = static_cast<size_t>(token_start_internal_value_);
diff --git a/deps/v8/third_party/inspector_protocol/encoding/encoding.h b/deps/v8/third_party/inspector_protocol/encoding/encoding.h
index c9ddd3a9be..47f6d22e9b 100644
--- a/deps/v8/third_party/inspector_protocol/encoding/encoding.h
+++ b/deps/v8/third_party/inspector_protocol/encoding/encoding.h
@@ -141,7 +141,16 @@ enum class Error {
CBOR_TRAILING_JUNK = 0x1e,
CBOR_MAP_START_EXPECTED = 0x1f,
CBOR_MAP_STOP_EXPECTED = 0x20,
- CBOR_ENVELOPE_SIZE_LIMIT_EXCEEDED = 0x21,
+ CBOR_ARRAY_START_EXPECTED = 0x21,
+ CBOR_ENVELOPE_SIZE_LIMIT_EXCEEDED = 0x22,
+
+ BINDINGS_MANDATORY_FIELD_MISSING = 0x23,
+ BINDINGS_BOOL_VALUE_EXPECTED = 0x24,
+ BINDINGS_INT32_VALUE_EXPECTED = 0x25,
+ BINDINGS_DOUBLE_VALUE_EXPECTED = 0x26,
+ BINDINGS_STRING_VALUE_EXPECTED = 0x27,
+ BINDINGS_STRING8_VALUE_EXPECTED = 0x28,
+ BINDINGS_BINARY_VALUE_EXPECTED = 0x29,
};
// A status value with position that can be copied. The default status
@@ -419,6 +428,17 @@ class CBORTokenizer {
span<uint8_t> GetBinary() const;
// To be called only if ::TokenTag() == CBORTokenTag::ENVELOPE.
+ // Returns the envelope including its payload; message which
+ // can be passed to the CBORTokenizer constructor, which will
+ // then see the envelope token first (looking at it a second time,
+ // basically).
+ span<uint8_t> GetEnvelope() const;
+
+ // To be called only if ::TokenTag() == CBORTokenTag::ENVELOPE.
+ // Returns only the payload inside the envelope, e.g., a map
+ // or an array. This is not a complete message by our
+ // IsCBORMessage definition, since it doesn't include the
+ // enclosing envelope (the header, basically).
span<uint8_t> GetEnvelopeContents() const;
private:
diff --git a/deps/v8/third_party/inspector_protocol/encoding/encoding_test.cc b/deps/v8/third_party/inspector_protocol/encoding/encoding_test.cc
index a36b200bad..005e9dcf17 100644
--- a/deps/v8/third_party/inspector_protocol/encoding/encoding_test.cc
+++ b/deps/v8/third_party/inspector_protocol/encoding/encoding_test.cc
@@ -688,6 +688,71 @@ TEST(EncodeDecodeDoubleTest, RoundtripsAdditionalExamples) {
}
}
+TEST(EncodeDecodeEnvelopesTest, MessageWithNestingAndEnvelopeContentsAccess) {
+ // This encodes and decodes the following message, which has some nesting
+ // and therefore envelopes.
+ // { "inner": { "foo" : "bar" } }
+ // The decoding is done with the Tokenizer,
+ // and we test both ::GetEnvelopeContents and GetEnvelope here.
+ std::vector<uint8_t> message;
+ EnvelopeEncoder envelope;
+ envelope.EncodeStart(&message);
+ size_t pos_after_header = message.size();
+ message.push_back(EncodeIndefiniteLengthMapStart());
+ EncodeString8(SpanFrom("inner"), &message);
+ size_t pos_inside_inner = message.size();
+ EnvelopeEncoder inner_envelope;
+ inner_envelope.EncodeStart(&message);
+ size_t pos_inside_inner_contents = message.size();
+ message.push_back(EncodeIndefiniteLengthMapStart());
+ EncodeString8(SpanFrom("foo"), &message);
+ EncodeString8(SpanFrom("bar"), &message);
+ message.push_back(EncodeStop());
+ size_t pos_after_inner = message.size();
+ inner_envelope.EncodeStop(&message);
+ message.push_back(EncodeStop());
+ envelope.EncodeStop(&message);
+
+ CBORTokenizer tokenizer(SpanFrom(message));
+ ASSERT_EQ(CBORTokenTag::ENVELOPE, tokenizer.TokenTag());
+ EXPECT_EQ(message.size(), tokenizer.GetEnvelope().size());
+ EXPECT_EQ(message.data(), tokenizer.GetEnvelope().data());
+ EXPECT_EQ(message.data() + pos_after_header,
+ tokenizer.GetEnvelopeContents().data());
+ EXPECT_EQ(message.size() - pos_after_header,
+ tokenizer.GetEnvelopeContents().size());
+ tokenizer.EnterEnvelope();
+ ASSERT_EQ(CBORTokenTag::MAP_START, tokenizer.TokenTag());
+ tokenizer.Next();
+ ASSERT_EQ(CBORTokenTag::STRING8, tokenizer.TokenTag());
+ EXPECT_EQ("inner", std::string(tokenizer.GetString8().begin(),
+ tokenizer.GetString8().end()));
+ tokenizer.Next();
+ ASSERT_EQ(CBORTokenTag::ENVELOPE, tokenizer.TokenTag());
+ EXPECT_EQ(message.data() + pos_inside_inner, tokenizer.GetEnvelope().data());
+ EXPECT_EQ(pos_after_inner - pos_inside_inner, tokenizer.GetEnvelope().size());
+ EXPECT_EQ(message.data() + pos_inside_inner_contents,
+ tokenizer.GetEnvelopeContents().data());
+ EXPECT_EQ(pos_after_inner - pos_inside_inner_contents,
+ tokenizer.GetEnvelopeContents().size());
+ tokenizer.EnterEnvelope();
+ ASSERT_EQ(CBORTokenTag::MAP_START, tokenizer.TokenTag());
+ tokenizer.Next();
+ ASSERT_EQ(CBORTokenTag::STRING8, tokenizer.TokenTag());
+ EXPECT_EQ("foo", std::string(tokenizer.GetString8().begin(),
+ tokenizer.GetString8().end()));
+ tokenizer.Next();
+ ASSERT_EQ(CBORTokenTag::STRING8, tokenizer.TokenTag());
+ EXPECT_EQ("bar", std::string(tokenizer.GetString8().begin(),
+ tokenizer.GetString8().end()));
+ tokenizer.Next();
+ ASSERT_EQ(CBORTokenTag::STOP, tokenizer.TokenTag());
+ tokenizer.Next();
+ ASSERT_EQ(CBORTokenTag::STOP, tokenizer.TokenTag());
+ tokenizer.Next();
+ ASSERT_EQ(CBORTokenTag::DONE, tokenizer.TokenTag());
+}
+
// =============================================================================
// cbor::NewCBOREncoder - for encoding from a streaming parser
// =============================================================================
diff --git a/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_cpp.template b/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_cpp.template
index 11843f4330..84c3efd3a0 100644
--- a/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_cpp.template
+++ b/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_cpp.template
@@ -302,15 +302,21 @@ void UberDispatcher::dispatch(int callId, const String& in_method, std::unique_p
UberDispatcher::~UberDispatcher() = default;
// static
-std::unique_ptr<InternalResponse> InternalResponse::createResponse(int callId, std::unique_ptr<Serializable> params)
+std::unique_ptr<Serializable> InternalResponse::createResponse(int callId, std::unique_ptr<Serializable> params)
{
- return std::unique_ptr<InternalResponse>(new InternalResponse(callId, String(), std::move(params)));
+ return std::unique_ptr<Serializable>(new InternalResponse(callId, String(), std::move(params)));
}
// static
-std::unique_ptr<InternalResponse> InternalResponse::createNotification(const String& notification, std::unique_ptr<Serializable> params)
+std::unique_ptr<Serializable> InternalResponse::createNotification(const String& notification, std::unique_ptr<Serializable> params)
{
- return std::unique_ptr<InternalResponse>(new InternalResponse(0, notification, std::move(params)));
+ return std::unique_ptr<Serializable>(new InternalResponse(0, notification, std::move(params)));
+}
+
+// static
+std::unique_ptr<Serializable> InternalResponse::createErrorResponse(int callId, DispatchResponse::ErrorCode code, const String& message)
+{
+ return ProtocolError::createErrorResponse(callId, code, message, nullptr);
}
String InternalResponse::serializeToJSON()
diff --git a/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_h.template b/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_h.template
index 4aa0688adb..3862fb9a73 100644
--- a/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_h.template
+++ b/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_h.template
@@ -128,8 +128,9 @@ private:
class InternalResponse : public Serializable {
PROTOCOL_DISALLOW_COPY(InternalResponse);
public:
- static std::unique_ptr<InternalResponse> createResponse(int callId, std::unique_ptr<Serializable> params);
- static std::unique_ptr<InternalResponse> createNotification(const String& notification, std::unique_ptr<Serializable> params = nullptr);
+ static std::unique_ptr<Serializable> createResponse(int callId, std::unique_ptr<Serializable> params);
+ static std::unique_ptr<Serializable> createNotification(const String& notification, std::unique_ptr<Serializable> params = nullptr);
+ static std::unique_ptr<Serializable> createErrorResponse(int callId, DispatchResponse::ErrorCode code, const String& message);
String serializeToJSON() override;
std::vector<uint8_t> serializeToBinary() override;
diff --git a/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_cpp.template b/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_cpp.template
index 982e2c61b8..b1c3ab74e3 100644
--- a/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_cpp.template
+++ b/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_cpp.template
@@ -385,7 +385,6 @@ void DispatcherImpl::{{command.name}}(int callId, const String& method, const Pr
{% endif %}
return;
{% else %}
- std::unique_ptr<DispatcherBase::WeakPtr> weak = weakPtr();
std::unique_ptr<{{command_name_title}}CallbackImpl> callback(new {{command.name | to_title_case}}CallbackImpl(weakPtr(), callId, method, message));
m_backend->{{command.name | to_method_case}}(
{%- for property in command.parameters -%}
diff --git a/deps/v8/third_party/v8/builtins/array-sort.tq b/deps/v8/third_party/v8/builtins/array-sort.tq
index 04184d9676..5a26bf0c64 100644
--- a/deps/v8/third_party/v8/builtins/array-sort.tq
+++ b/deps/v8/third_party/v8/builtins/array-sort.tq
@@ -297,7 +297,6 @@ namespace array {
transitioning builtin Delete<ElementsAccessor: type>(
context: Context, sortState: SortState, index: Smi): Smi {
const receiver = sortState.receiver;
- if (!HasProperty_Inline(receiver, index)) return kSuccess;
DeleteProperty(receiver, index, kStrict);
return kSuccess;
}
diff --git a/deps/v8/tools/clusterfuzz/toolchain/BUILD.gn b/deps/v8/tools/clusterfuzz/toolchain/BUILD.gn
new file mode 100644
index 0000000000..ddcb4e1ad2
--- /dev/null
+++ b/deps/v8/tools/clusterfuzz/toolchain/BUILD.gn
@@ -0,0 +1,15 @@
+# Copyright 2019 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/toolchain/gcc_toolchain.gni")
+
+# Fake toolchain to enable build output for a pointer-compression-comparison
+# build in a nested build sub-directory. We toggle pointer compression when
+# this toolchain is used in v8/BUILD.gn.
+clang_toolchain("clang_x64_pointer_compression") {
+ toolchain_args = {
+ current_cpu = "x64"
+ current_os = "linux"
+ }
+}
diff --git a/deps/v8/tools/clusterfuzz/v8_foozzie.py b/deps/v8/tools/clusterfuzz/v8_foozzie.py
index ff481e9370..3b05d3829c 100755
--- a/deps/v8/tools/clusterfuzz/v8_foozzie.py
+++ b/deps/v8/tools/clusterfuzz/v8_foozzie.py
@@ -213,14 +213,15 @@ def parse_args():
assert os.path.exists(options.first_d8)
assert os.path.exists(options.second_d8)
+ # Ensure we make a sane comparison.
+ if (options.first_d8 == options.second_d8 and
+ options.first_config == options.second_config):
+ parser.error('Need either executable or config difference.')
+
# Infer architecture from build artifacts.
options.first_arch = infer_arch(options.first_d8)
options.second_arch = infer_arch(options.second_d8)
- # Ensure we make a sane comparison.
- if (options.first_arch == options.second_arch and
- options.first_config == options.second_config):
- parser.error('Need either arch or config difference.')
assert options.first_arch in SUPPORTED_ARCHS
assert options.second_arch in SUPPORTED_ARCHS
assert options.first_config in CONFIGS
diff --git a/deps/v8/tools/clusterfuzz/v8_foozzie_test.py b/deps/v8/tools/clusterfuzz/v8_foozzie_test.py
index 43b65e850b..356f79a1c3 100755
--- a/deps/v8/tools/clusterfuzz/v8_foozzie_test.py
+++ b/deps/v8/tools/clusterfuzz/v8_foozzie_test.py
@@ -38,10 +38,11 @@ class ConfigTest(unittest.TestCase):
return 0.5
self.assertEqual(
[
- '--first-config=ignition',
+ '--first-config=ignition_no_ic',
'--second-config=ignition_turbo',
'--second-d8=d8',
'--second-config-extra-flags=--stress-scavenge=100',
+ '--second-config-extra-flags=--no-regexp-tier-up',
],
v8_fuzz_config.Config('foo', Rng(), 42).choose_foozzie_flags(),
)
diff --git a/deps/v8/tools/clusterfuzz/v8_fuzz_config.py b/deps/v8/tools/clusterfuzz/v8_fuzz_config.py
index 0dcacf216b..92bf0dd3aa 100644
--- a/deps/v8/tools/clusterfuzz/v8_fuzz_config.py
+++ b/deps/v8/tools/clusterfuzz/v8_fuzz_config.py
@@ -11,16 +11,18 @@ FOOZZIE_EXPERIMENTS = [
[10, 'ignition', 'jitless', 'd8'],
[10, 'ignition', 'slow_path', 'd8'],
[5, 'ignition', 'slow_path_opt', 'd8'],
- [26, 'ignition', 'ignition_turbo', 'd8'],
+ [25, 'ignition', 'ignition_turbo', 'd8'],
[2, 'ignition_no_ic', 'ignition_turbo', 'd8'],
[2, 'ignition', 'ignition_turbo_no_ic', 'd8'],
- [18, 'ignition', 'ignition_turbo_opt', 'd8'],
+ [15, 'ignition', 'ignition_turbo_opt', 'd8'],
[2, 'ignition_no_ic', 'ignition_turbo_opt', 'd8'],
- [5, 'ignition_turbo_opt', 'ignition_turbo_opt', 'clang_x86/d8'],
- [5, 'ignition_turbo', 'ignition_turbo', 'clang_x86/d8'],
- [5, 'ignition', 'ignition', 'clang_x86/d8'],
- [5, 'ignition', 'ignition', 'clang_x64_v8_arm64/d8'],
- [5, 'ignition', 'ignition', 'clang_x86_v8_arm/d8'],
+ [4, 'ignition_turbo_opt', 'ignition_turbo_opt', 'clang_x64_pointer_compression/d8'],
+ [5, 'ignition_turbo', 'ignition_turbo', 'clang_x64_pointer_compression/d8'],
+ [4, 'ignition_turbo_opt', 'ignition_turbo_opt', 'clang_x86/d8'],
+ [4, 'ignition_turbo', 'ignition_turbo', 'clang_x86/d8'],
+ [4, 'ignition', 'ignition', 'clang_x86/d8'],
+ [4, 'ignition', 'ignition', 'clang_x64_v8_arm64/d8'],
+ [4, 'ignition', 'ignition', 'clang_x86_v8_arm/d8'],
]
# Additional flag experiments. List of tuples like
@@ -37,6 +39,11 @@ ADDITIONAL_FLAGS = [
(0.01, '--thread-pool-size=4'),
(0.01, '--thread-pool-size=8'),
(0.1, '--interrupt-budget=1000'),
+ (0.25, '--future'),
+ (0.2, '--no-regexp-tier-up'),
+ (0.1, '--regexp-interpret-all'),
+ (0.1, '--regexp-tier-up-ticks=10'),
+ (0.1, '--regexp-tier-up-ticks=100'),
]
class Config(object):
diff --git a/deps/v8/tools/debug_helper/BUILD.gn b/deps/v8/tools/debug_helper/BUILD.gn
index c81fddc9e5..2fe5f0d8be 100644
--- a/deps/v8/tools/debug_helper/BUILD.gn
+++ b/deps/v8/tools/debug_helper/BUILD.gn
@@ -100,5 +100,8 @@ v8_component("v8_debug_helper") {
configs += [ "//third_party/icu:icu_config" ]
}
+ remove_configs = [ "//build/config/compiler:no_rtti" ]
+ configs += [ "//build/config/compiler:rtti" ]
+
public_configs = [ ":external_config" ]
}
diff --git a/deps/v8/tools/debug_helper/debug-helper-internal.cc b/deps/v8/tools/debug_helper/debug-helper-internal.cc
index ee5629b438..597ea7a639 100644
--- a/deps/v8/tools/debug_helper/debug-helper-internal.cc
+++ b/deps/v8/tools/debug_helper/debug-helper-internal.cc
@@ -12,15 +12,14 @@ namespace v8_debug_helper_internal {
bool IsPointerCompressed(uintptr_t address) {
#if COMPRESS_POINTERS_BOOL
- STATIC_ASSERT(i::kPtrComprHeapReservationSize == uintptr_t{1} << 32);
- intptr_t signed_address = static_cast<intptr_t>(address);
- return signed_address >= INT32_MIN && signed_address <= INT32_MAX;
+ return address < i::kPtrComprHeapReservationSize;
#else
return false;
#endif
}
-uintptr_t Decompress(uintptr_t address, uintptr_t any_uncompressed_ptr) {
+uintptr_t EnsureDecompressed(uintptr_t address,
+ uintptr_t any_uncompressed_ptr) {
if (!COMPRESS_POINTERS_BOOL || !IsPointerCompressed(address)) return address;
return i::DecompressTaggedAny(any_uncompressed_ptr,
static_cast<i::Tagged_t>(address));
@@ -55,4 +54,8 @@ void TqObject::Visit(TqObjectVisitor* visitor) const {
visitor->VisitObject(this);
}
+bool TqObject::IsSuperclassOf(const TqObject* other) const {
+ return GetName() != other->GetName();
+}
+
} // namespace v8_debug_helper_internal
diff --git a/deps/v8/tools/debug_helper/debug-helper-internal.h b/deps/v8/tools/debug_helper/debug-helper-internal.h
index 82506c0941..e2161e25ba 100644
--- a/deps/v8/tools/debug_helper/debug-helper-internal.h
+++ b/deps/v8/tools/debug_helper/debug-helper-internal.h
@@ -10,6 +10,7 @@
#ifndef V8_TOOLS_DEBUG_HELPER_DEBUG_HELPER_INTERNAL_H_
#define V8_TOOLS_DEBUG_HELPER_DEBUG_HELPER_INTERNAL_H_
+#include <memory>
#include <string>
#include <vector>
@@ -27,6 +28,7 @@ struct Value {
TValue value;
};
+// Internal version of API class v8::debug_helper::ObjectProperty.
class ObjectProperty {
public:
inline ObjectProperty(std::string name, std::string type,
@@ -68,15 +70,20 @@ struct ObjectPropertiesResultExtended : public d::ObjectPropertiesResult {
ObjectPropertiesResultInternal* base; // Back reference for cleanup
};
+// Internal version of API class v8::debug_helper::ObjectPropertiesResult.
class ObjectPropertiesResult {
public:
- inline ObjectPropertiesResult(
+ ObjectPropertiesResult(d::TypeCheckResult type_check_result,
+ std::string brief, std::string type)
+ : type_check_result_(type_check_result), brief_(brief), type_(type) {}
+ ObjectPropertiesResult(
d::TypeCheckResult type_check_result, std::string brief, std::string type,
- std::vector<std::unique_ptr<ObjectProperty>> properties)
- : type_check_result_(type_check_result),
- brief_(brief),
- type_(type),
- properties_(std::move(properties)) {}
+ std::vector<std::unique_ptr<ObjectProperty>> properties,
+ std::vector<std::string> guessed_types)
+ : ObjectPropertiesResult(type_check_result, brief, type) {
+ properties_ = std::move(properties);
+ guessed_types_ = std::move(guessed_types);
+ }
inline void Prepend(const char* prefix) { brief_ = prefix + brief_; }
@@ -85,11 +92,17 @@ class ObjectPropertiesResult {
public_view_.brief = brief_.c_str();
public_view_.type = type_.c_str();
public_view_.num_properties = properties_.size();
- properties_raw_.resize(0);
+ properties_raw_.clear();
for (const auto& property : properties_) {
properties_raw_.push_back(property->GetPublicView());
}
public_view_.properties = properties_raw_.data();
+ public_view_.num_guessed_types = guessed_types_.size();
+ guessed_types_raw_.clear();
+ for (const auto& guess : guessed_types_) {
+ guessed_types_raw_.push_back(guess.c_str());
+ }
+ public_view_.guessed_types = guessed_types_raw_.data();
public_view_.base = this;
return &public_view_;
}
@@ -99,9 +112,11 @@ class ObjectPropertiesResult {
std::string brief_;
std::string type_;
std::vector<std::unique_ptr<ObjectProperty>> properties_;
+ std::vector<std::string> guessed_types_;
ObjectPropertiesResultExtended public_view_;
std::vector<d::ObjectProperty*> properties_raw_;
+ std::vector<const char*> guessed_types_raw_;
};
class TqObjectVisitor;
@@ -116,13 +131,24 @@ class TqObject {
d::MemoryAccessor accessor) const;
virtual const char* GetName() const;
virtual void Visit(TqObjectVisitor* visitor) const;
+ virtual bool IsSuperclassOf(const TqObject* other) const;
protected:
uintptr_t address_;
};
+// In ptr-compr builds, returns whether the address looks like a compressed
+// pointer (sign-extended from 32 bits). Otherwise returns false because no
+// pointers can be compressed.
bool IsPointerCompressed(uintptr_t address);
-uintptr_t Decompress(uintptr_t address, uintptr_t any_uncompressed_address);
+
+// If the given address looks like a compressed pointer, returns a decompressed
+// representation of it. Otherwise returns the address unmodified.
+uintptr_t EnsureDecompressed(uintptr_t address,
+ uintptr_t any_uncompressed_address);
+
+// Converts the MemoryAccessResult from attempting to read an array's length
+// into the corresponding PropertyKind for the array.
d::PropertyKind GetArrayKind(d::MemoryAccessResult mem_result);
} // namespace v8_debug_helper_internal
diff --git a/deps/v8/tools/debug_helper/debug-helper.h b/deps/v8/tools/debug_helper/debug-helper.h
index 9bbec76c7c..7d75843bf6 100644
--- a/deps/v8/tools/debug_helper/debug-helper.h
+++ b/deps/v8/tools/debug_helper/debug-helper.h
@@ -46,6 +46,7 @@ enum class TypeCheckResult {
kSmi,
kWeakRef,
kUsedMap,
+ kKnownMapPointer,
kUsedTypeHint,
// Failure cases:
@@ -98,6 +99,16 @@ struct ObjectPropertiesResult {
const char* type; // Runtime type of the object.
size_t num_properties;
ObjectProperty** properties;
+
+ // If not all relevant memory is available, GetObjectProperties may respond
+ // with a technically correct but uninteresting type such as HeapObject, and
+ // use other heuristics to make reasonable guesses about what specific type
+ // the object actually is. You may request data about the same object again
+ // using any of these guesses as the type hint, but the results should be
+ // formatted to the user in a way that clearly indicates that they're only
+ // guesses.
+ size_t num_guessed_types;
+ const char** guessed_types;
};
// Copies byte_count bytes of memory from the given address in the debuggee to
@@ -109,7 +120,7 @@ typedef MemoryAccessResult (*MemoryAccessor)(uintptr_t address,
// Additional data that can help GetObjectProperties to be more accurate. Any
// fields you don't know can be set to zero and this library will do the best it
// can with the information available.
-struct Roots {
+struct HeapAddresses {
// Beginning of allocated space for various kinds of data. These can help us
// to detect certain common objects that are placed in memory during startup.
// These values might be provided via name-value pairs in CrashPad dumps.
@@ -119,9 +130,9 @@ struct Roots {
// key stored in v8::internal::Isolate::isolate_key_.
// 2. Get isolate->heap_.map_space_->memory_chunk_list_.front_ and similar for
// old_space_ and read_only_space_.
- uintptr_t map_space;
- uintptr_t old_space;
- uintptr_t read_only_space;
+ uintptr_t map_space_first_page;
+ uintptr_t old_space_first_page;
+ uintptr_t read_only_space_first_page;
// Any valid heap pointer address. On platforms where pointer compression is
// enabled, this can allow us to get data from compressed pointers even if the
@@ -139,7 +150,8 @@ extern "C" {
V8_DEBUG_HELPER_EXPORT v8::debug_helper::ObjectPropertiesResult*
_v8_debug_helper_GetObjectProperties(
uintptr_t object, v8::debug_helper::MemoryAccessor memory_accessor,
- const v8::debug_helper::Roots& heap_roots, const char* type_hint);
+ const v8::debug_helper::HeapAddresses& heap_addresses,
+ const char* type_hint);
V8_DEBUG_HELPER_EXPORT void _v8_debug_helper_Free_ObjectPropertiesResult(
v8::debug_helper::ObjectPropertiesResult* result);
}
@@ -159,16 +171,16 @@ using ObjectPropertiesResultPtr =
// Get information about the given object pointer, which could be:
// - A tagged pointer, strong or weak
// - A cleared weak pointer
-// - A compressed tagged pointer, sign-extended to 64 bits
+// - A compressed tagged pointer, zero-extended to 64 bits
// - A tagged small integer
// The type hint is only used if the object's Map is missing or corrupt. It
// should be the fully-qualified name of a class that inherits from
// v8::internal::Object.
inline ObjectPropertiesResultPtr GetObjectProperties(
uintptr_t object, v8::debug_helper::MemoryAccessor memory_accessor,
- const Roots& heap_roots, const char* type_hint = nullptr) {
+ const HeapAddresses& heap_addresses, const char* type_hint = nullptr) {
return ObjectPropertiesResultPtr(_v8_debug_helper_GetObjectProperties(
- object, memory_accessor, heap_roots, type_hint));
+ object, memory_accessor, heap_addresses, type_hint));
}
} // namespace debug_helper
diff --git a/deps/v8/tools/debug_helper/gen-heap-constants.py b/deps/v8/tools/debug_helper/gen-heap-constants.py
index 0fd575a994..1d81f2e510 100644
--- a/deps/v8/tools/debug_helper/gen-heap-constants.py
+++ b/deps/v8/tools/debug_helper/gen-heap-constants.py
@@ -16,6 +16,9 @@ out = """
#include <cstdint>
#include <string>
+#include "src/common/ptr-compr-inl.h"
+#include "tools/debug_helper/debug-helper-internal.h"
+
namespace v8_debug_helper_internal {
"""
@@ -51,6 +54,22 @@ def iterate_maps(target_space, camel_space_name):
iterate_maps('map_space', 'MapSpace')
iterate_maps('read_only_space', 'ReadOnlySpace')
+out = out + '\nvoid FillInUnknownHeapAddresses(' + \
+ 'd::HeapAddresses* heap_addresses, uintptr_t any_uncompressed_ptr) {\n'
+if (hasattr(v8heapconst, 'HEAP_FIRST_PAGES')): # Only exists in ptr-compr builds.
+ out = out + ' if (heap_addresses->any_heap_pointer == 0) {\n'
+ out = out + ' heap_addresses->any_heap_pointer = any_uncompressed_ptr;\n'
+ out = out + ' }\n'
+ expected_spaces = set(['map_space', 'read_only_space', 'old_space'])
+ for offset, space_name in v8heapconst.HEAP_FIRST_PAGES.items():
+ if (space_name in expected_spaces):
+ out = out + ' if (heap_addresses->' + space_name + '_first_page == 0) {\n'
+ out = out + ' heap_addresses->' + space_name + \
+ '_first_page = i::DecompressTaggedPointer(any_uncompressed_ptr, ' + \
+ str(offset) + ');\n'
+ out = out + ' }\n'
+out = out + '}\n'
+
out = out + '\n}\n'
try:
diff --git a/deps/v8/tools/debug_helper/get-object-properties.cc b/deps/v8/tools/debug_helper/get-object-properties.cc
index fbe992c40e..8eeeb84093 100644
--- a/deps/v8/tools/debug_helper/get-object-properties.cc
+++ b/deps/v8/tools/debug_helper/get-object-properties.cc
@@ -34,7 +34,7 @@ namespace v8_debug_helper_internal {
V(Foreign, FOREIGN_TYPE) \
V(FreeSpace, FREE_SPACE_TYPE) \
V(HeapNumber, HEAP_NUMBER_TYPE) \
- V(JSArgumentsObject, JS_ARGUMENTS_TYPE) \
+ V(JSArgumentsObject, JS_ARGUMENTS_OBJECT_TYPE) \
V(JSArray, JS_ARRAY_TYPE) \
V(JSArrayBuffer, JS_ARRAY_BUFFER_TYPE) \
V(JSArrayIterator, JS_ARRAY_ITERATOR_TYPE) \
@@ -52,8 +52,8 @@ namespace v8_debug_helper_internal {
V(JSModuleNamespace, JS_MODULE_NAMESPACE_TYPE) \
V(JSPromise, JS_PROMISE_TYPE) \
V(JSProxy, JS_PROXY_TYPE) \
- V(JSRegExp, JS_REGEXP_TYPE) \
- V(JSRegExpStringIterator, JS_REGEXP_STRING_ITERATOR_TYPE) \
+ V(JSRegExp, JS_REG_EXP_TYPE) \
+ V(JSRegExpStringIterator, JS_REG_EXP_STRING_ITERATOR_TYPE) \
V(JSSet, JS_SET_TYPE) \
V(JSStringIterator, JS_STRING_ITERATOR_TYPE) \
V(JSTypedArray, JS_TYPED_ARRAY_TYPE) \
@@ -71,27 +71,27 @@ namespace v8_debug_helper_internal {
V(PropertyCell, PROPERTY_CELL_TYPE) \
V(SharedFunctionInfo, SHARED_FUNCTION_INFO_TYPE) \
V(Symbol, SYMBOL_TYPE) \
- V(WasmExceptionObject, WASM_EXCEPTION_TYPE) \
- V(WasmGlobalObject, WASM_GLOBAL_TYPE) \
- V(WasmMemoryObject, WASM_MEMORY_TYPE) \
- V(WasmModuleObject, WASM_MODULE_TYPE) \
- V(WasmTableObject, WASM_TABLE_TYPE) \
+ V(WasmExceptionObject, WASM_EXCEPTION_OBJECT_TYPE) \
+ V(WasmGlobalObject, WASM_GLOBAL_OBJECT_TYPE) \
+ V(WasmMemoryObject, WASM_MEMORY_OBJECT_TYPE) \
+ V(WasmModuleObject, WASM_MODULE_OBJECT_TYPE) \
+ V(WasmTableObject, WASM_TABLE_OBJECT_TYPE) \
V(WeakArrayList, WEAK_ARRAY_LIST_TYPE) \
V(WeakCell, WEAK_CELL_TYPE)
#ifdef V8_INTL_SUPPORT
-#define TQ_INSTANCE_TYPES_SINGLE_NOSTRUCTS(V) \
- TQ_INSTANCE_TYPES_SINGLE_BASE(V) \
- V(JSV8BreakIterator, JS_INTL_V8_BREAK_ITERATOR_TYPE) \
- V(JSCollator, JS_INTL_COLLATOR_TYPE) \
- V(JSDateTimeFormat, JS_INTL_DATE_TIME_FORMAT_TYPE) \
- V(JSListFormat, JS_INTL_LIST_FORMAT_TYPE) \
- V(JSLocale, JS_INTL_LOCALE_TYPE) \
- V(JSNumberFormat, JS_INTL_NUMBER_FORMAT_TYPE) \
- V(JSPluralRules, JS_INTL_PLURAL_RULES_TYPE) \
- V(JSRelativeTimeFormat, JS_INTL_RELATIVE_TIME_FORMAT_TYPE) \
- V(JSSegmentIterator, JS_INTL_SEGMENT_ITERATOR_TYPE) \
- V(JSSegmenter, JS_INTL_SEGMENTER_TYPE)
+#define TQ_INSTANCE_TYPES_SINGLE_NOSTRUCTS(V) \
+ TQ_INSTANCE_TYPES_SINGLE_BASE(V) \
+ V(JSV8BreakIterator, JS_V8_BREAK_ITERATOR_TYPE) \
+ V(JSCollator, JS_COLLATOR_TYPE) \
+ V(JSDateTimeFormat, JS_DATE_TIME_FORMAT_TYPE) \
+ V(JSListFormat, JS_LIST_FORMAT_TYPE) \
+ V(JSLocale, JS_LOCALE_TYPE) \
+ V(JSNumberFormat, JS_NUMBER_FORMAT_TYPE) \
+ V(JSPluralRules, JS_PLURAL_RULES_TYPE) \
+ V(JSRelativeTimeFormat, JS_RELATIVE_TIME_FORMAT_TYPE) \
+ V(JSSegmentIterator, JS_SEGMENT_ITERATOR_TYPE) \
+ V(JSSegmenter, JS_SEGMENTER_TYPE)
#else
@@ -99,12 +99,14 @@ namespace v8_debug_helper_internal {
#endif // V8_INTL_SUPPORT
+// Used in the static assertion below.
enum class InstanceTypeCheckersSingle {
#define ENUM_VALUE(ClassName, INSTANCE_TYPE) k##ClassName = i::INSTANCE_TYPE,
INSTANCE_TYPE_CHECKERS_SINGLE(ENUM_VALUE)
#undef ENUM_VALUE
};
+// Verify that the instance type list above stays in sync with the truth.
#define CHECK_VALUE(ClassName, INSTANCE_TYPE) \
static_assert( \
static_cast<i::InstanceType>( \
@@ -117,6 +119,9 @@ TQ_INSTANCE_TYPES_SINGLE_NOSTRUCTS(CHECK_VALUE)
// Adapts one STRUCT_LIST_GENERATOR entry to (Name, NAME) format.
#define STRUCT_INSTANCE_TYPE_ADAPTER(V, NAME, Name, name) V(Name, NAME)
+// Pairs of (ClassName, CLASS_NAME_TYPE) for every instance type that
+// corresponds to a single Torque-defined class. Note that all Struct-derived
+// classes are defined in Torque.
#define TQ_INSTANCE_TYPES_SINGLE(V) \
TQ_INSTANCE_TYPES_SINGLE_NOSTRUCTS(V) \
STRUCT_LIST_GENERATOR(STRUCT_INSTANCE_TYPE_ADAPTER, V)
@@ -147,109 +152,154 @@ struct TypedObject {
TypedObject(d::TypeCheckResult type_check_result,
std::unique_ptr<TqObject> object)
: type_check_result(type_check_result), object(std::move(object)) {}
+
+ // How we discovered the object's type, or why we failed to do so.
d::TypeCheckResult type_check_result;
+
+ // Pointer to some TqObject subclass, representing the most specific known
+ // type for the object.
std::unique_ptr<TqObject> object;
+
+ // Collection of other guesses at more specific types than the one represented
+ // by |object|.
+ std::vector<TypedObject> possible_types;
};
TypedObject GetTypedObjectByHint(uintptr_t address,
std::string type_hint_string) {
-#define TYPE_NAME_CASE(ClassName, ...) \
- if (type_hint_string == "v8::internal::" #ClassName) { \
- return {d::TypeCheckResult::kUsedTypeHint, \
- v8::base::make_unique<Tq##ClassName>(address)}; \
+#define TYPE_NAME_CASE(ClassName, ...) \
+ if (type_hint_string == "v8::internal::" #ClassName) { \
+ return {d::TypeCheckResult::kUsedTypeHint, \
+ std::make_unique<Tq##ClassName>(address)}; \
}
TQ_INSTANCE_TYPES_SINGLE(TYPE_NAME_CASE)
TQ_INSTANCE_TYPES_RANGE(TYPE_NAME_CASE)
+ STRING_CLASS_TYPES(TYPE_NAME_CASE)
#undef TYPE_NAME_CASE
return {d::TypeCheckResult::kUnknownTypeHint,
- v8::base::make_unique<TqHeapObject>(address)};
+ std::make_unique<TqHeapObject>(address)};
}
-TypedObject GetTypedObjectForString(uintptr_t address, i::InstanceType type) {
+TypedObject GetTypedObjectForString(uintptr_t address, i::InstanceType type,
+ d::TypeCheckResult type_source) {
class StringGetDispatcher : public i::AllStatic {
public:
-#define DEFINE_METHOD(ClassName) \
- static inline TypedObject Handle##ClassName(uintptr_t address) { \
- return {d::TypeCheckResult::kUsedMap, \
- v8::base::make_unique<Tq##ClassName>(address)}; \
+#define DEFINE_METHOD(ClassName) \
+ static inline TypedObject Handle##ClassName( \
+ uintptr_t address, d::TypeCheckResult type_source) { \
+ return {type_source, std::make_unique<Tq##ClassName>(address)}; \
}
STRING_CLASS_TYPES(DEFINE_METHOD)
#undef DEFINE_METHOD
- static inline TypedObject HandleInvalidString(uintptr_t address) {
+ static inline TypedObject HandleInvalidString(
+ uintptr_t address, d::TypeCheckResult type_source) {
return {d::TypeCheckResult::kUnknownInstanceType,
- v8::base::make_unique<TqString>(address)};
+ std::make_unique<TqString>(address)};
}
};
return i::StringShape(type)
.DispatchToSpecificTypeWithoutCast<StringGetDispatcher, TypedObject>(
- address);
+ address, type_source);
+}
+
+TypedObject GetTypedObjectByInstanceType(uintptr_t address,
+ i::InstanceType type,
+ d::TypeCheckResult type_source) {
+ switch (type) {
+#define INSTANCE_TYPE_CASE(ClassName, INSTANCE_TYPE) \
+ case i::INSTANCE_TYPE: \
+ return {type_source, std::make_unique<Tq##ClassName>(address)};
+ TQ_INSTANCE_TYPES_SINGLE(INSTANCE_TYPE_CASE)
+#undef INSTANCE_TYPE_CASE
+
+ default:
+
+ // Special case: concrete subtypes of String are not included in the
+ // main instance type list because they use the low bits of the instance
+ // type enum as flags.
+ if (type <= i::LAST_STRING_TYPE) {
+ return GetTypedObjectForString(address, type, type_source);
+ }
+
+#define INSTANCE_RANGE_CASE(ClassName, FIRST_TYPE, LAST_TYPE) \
+ if (type >= i::FIRST_TYPE && type <= i::LAST_TYPE) { \
+ return {type_source, std::make_unique<Tq##ClassName>(address)}; \
+ }
+ TQ_INSTANCE_TYPES_RANGE(INSTANCE_RANGE_CASE)
+#undef INSTANCE_RANGE_CASE
+
+ return {d::TypeCheckResult::kUnknownInstanceType,
+ std::make_unique<TqHeapObject>(address)};
+ }
}
TypedObject GetTypedHeapObject(uintptr_t address, d::MemoryAccessor accessor,
- const char* type_hint) {
- auto heap_object = v8::base::make_unique<TqHeapObject>(address);
+ const char* type_hint,
+ const d::HeapAddresses& heap_addresses) {
+ auto heap_object = std::make_unique<TqHeapObject>(address);
Value<uintptr_t> map_ptr = heap_object->GetMapValue(accessor);
if (map_ptr.validity != d::MemoryAccessResult::kOk) {
+ // If we can't read the Map pointer from the object, then we likely can't
+ // read anything else, so there's not any point in attempting to use the
+ // type hint. Just return a failure.
return {map_ptr.validity == d::MemoryAccessResult::kAddressNotValid
? d::TypeCheckResult::kObjectPointerInvalid
: d::TypeCheckResult::kObjectPointerValidButInaccessible,
std::move(heap_object)};
}
+
Value<i::InstanceType> type =
TqMap(map_ptr.value).GetInstanceTypeValue(accessor);
-
if (type.validity == d::MemoryAccessResult::kOk) {
- // Dispatch to the appropriate method for each instance type. After calling
- // the generated method to fetch properties, we can add custom properties.
- switch (type.value) {
-#define INSTANCE_TYPE_CASE(ClassName, INSTANCE_TYPE) \
- case i::INSTANCE_TYPE: \
- return {d::TypeCheckResult::kUsedMap, \
- v8::base::make_unique<Tq##ClassName>(address)};
- TQ_INSTANCE_TYPES_SINGLE(INSTANCE_TYPE_CASE)
-#undef INSTANCE_TYPE_CASE
-
- default:
+ return GetTypedObjectByInstanceType(address, type.value,
+ d::TypeCheckResult::kUsedMap);
+ }
- // Special case: concrete subtypes of String are not included in the
- // main instance type list because they use the low bits of the instance
- // type enum as flags.
- if (type.value <= i::LAST_STRING_TYPE) {
- return GetTypedObjectForString(address, type.value);
- }
+ // We can't read the Map, so check whether it is in the list of known Maps,
+ // as another way to get its instance type.
+ KnownInstanceType known_map_type =
+ FindKnownMapInstanceType(map_ptr.value, heap_addresses);
+ if (known_map_type.confidence == KnownInstanceType::Confidence::kHigh) {
+ DCHECK_EQ(known_map_type.types.size(), 1);
+ return GetTypedObjectByInstanceType(address, known_map_type.types[0],
+ d::TypeCheckResult::kKnownMapPointer);
+ }
-#define INSTANCE_RANGE_CASE(ClassName, FIRST_TYPE, LAST_TYPE) \
- if (type.value >= i::FIRST_TYPE && type.value <= i::LAST_TYPE) { \
- return {d::TypeCheckResult::kUsedMap, \
- v8::base::make_unique<Tq##ClassName>(address)}; \
+ // Create a basic result that says that the object is a HeapObject and we
+ // couldn't read its Map.
+ TypedObject result = {
+ type.validity == d::MemoryAccessResult::kAddressNotValid
+ ? d::TypeCheckResult::kMapPointerInvalid
+ : d::TypeCheckResult::kMapPointerValidButInaccessible,
+ std::move(heap_object)};
+
+ // If a type hint is available, it may give us something more specific than
+ // HeapObject. However, a type hint of Object would be even less specific, so
+ // we'll only use the type hint if it's a subclass of HeapObject.
+ if (type_hint != nullptr) {
+ TypedObject hint_result = GetTypedObjectByHint(address, type_hint);
+ if (result.object->IsSuperclassOf(hint_result.object.get())) {
+ result = std::move(hint_result);
+ }
}
- TQ_INSTANCE_TYPES_RANGE(INSTANCE_RANGE_CASE)
-#undef INSTANCE_RANGE_CASE
- return {d::TypeCheckResult::kUnknownInstanceType,
- std::move(heap_object)};
- break;
+ // If low-confidence results are available from known Maps, include them only
+ // if they don't contradict the primary type and would provide some additional
+ // specificity.
+ for (const i::InstanceType type_guess : known_map_type.types) {
+ TypedObject guess_result = GetTypedObjectByInstanceType(
+ address, type_guess, d::TypeCheckResult::kKnownMapPointer);
+ if (result.object->IsSuperclassOf(guess_result.object.get())) {
+ result.possible_types.push_back(std::move(guess_result));
}
- } else if (type_hint != nullptr) {
- // Try to use the provided type hint, since the real instance type is
- // unavailable.
- return GetTypedObjectByHint(address, type_hint);
- } else {
- // TODO(v8:9376): Use known maps here. If known map is just a guess (because
- // root pointers weren't provided), then create a synthetic property with
- // the more specific type. Then the caller could presumably ask us again
- // with the type hint we provided. Otherwise, just go ahead and use it to
- // generate properties.
- return {type.validity == d::MemoryAccessResult::kAddressNotValid
- ? d::TypeCheckResult::kMapPointerInvalid
- : d::TypeCheckResult::kMapPointerValidButInaccessible,
- std::move(heap_object)};
}
+
+ return result;
}
#undef STRUCT_INSTANCE_TYPE_ADAPTER
@@ -261,8 +311,13 @@ TypedObject GetTypedHeapObject(uintptr_t address, d::MemoryAccessor accessor,
// An object visitor that accumulates the first few characters of a string.
class ReadStringVisitor : public TqObjectVisitor {
public:
- ReadStringVisitor(d::MemoryAccessor accessor)
- : accessor_(accessor), index_(0), limit_(INT32_MAX), done_(false) {}
+ ReadStringVisitor(d::MemoryAccessor accessor,
+ const d::HeapAddresses& heap_addresses)
+ : accessor_(accessor),
+ heap_addresses_(heap_addresses),
+ index_(0),
+ limit_(INT32_MAX),
+ done_(false) {}
// Returns the result as UTF-8 once visiting is complete.
std::string GetString() {
@@ -301,7 +356,9 @@ class ReadStringVisitor : public TqObjectVisitor {
void VisitConsString(const TqConsString* object) override {
uintptr_t first_address = GetOrFinish(object->GetFirstValue(accessor_));
if (done_) return;
- auto first = GetTypedHeapObject(first_address, accessor_, nullptr).object;
+ auto first =
+ GetTypedHeapObject(first_address, accessor_, nullptr, heap_addresses_)
+ .object;
first->Visit(this);
if (done_) return;
int32_t first_length = GetOrFinish(
@@ -309,7 +366,8 @@ class ReadStringVisitor : public TqObjectVisitor {
uintptr_t second = GetOrFinish(object->GetSecondValue(accessor_));
if (done_) return;
IndexModifier modifier(this, -first_length, -first_length);
- GetTypedHeapObject(second, accessor_, nullptr).object->Visit(this);
+ GetTypedHeapObject(second, accessor_, nullptr, heap_addresses_)
+ .object->Visit(this);
}
void VisitSlicedString(const TqSlicedString* object) override {
@@ -320,13 +378,15 @@ class ReadStringVisitor : public TqObjectVisitor {
if (done_) return;
int32_t limit_adjust = offset + length - limit_;
IndexModifier modifier(this, offset, limit_adjust < 0 ? limit_adjust : 0);
- GetTypedHeapObject(parent, accessor_, nullptr).object->Visit(this);
+ GetTypedHeapObject(parent, accessor_, nullptr, heap_addresses_)
+ .object->Visit(this);
}
void VisitThinString(const TqThinString* object) override {
uintptr_t actual = GetOrFinish(object->GetActualValue(accessor_));
if (done_) return;
- GetTypedHeapObject(actual, accessor_, nullptr).object->Visit(this);
+ GetTypedHeapObject(actual, accessor_, nullptr, heap_addresses_)
+ .object->Visit(this);
}
void VisitExternalString(const TqExternalString* object) override {
@@ -398,6 +458,7 @@ class ReadStringVisitor : public TqObjectVisitor {
std::u16string string_; // Result string.
d::MemoryAccessor accessor_;
+ const d::HeapAddresses& heap_addresses_;
int32_t index_; // Index of next char to read.
int32_t limit_; // Don't read past this index (set by SlicedString).
bool done_; // Whether to stop further work.
@@ -406,14 +467,15 @@ class ReadStringVisitor : public TqObjectVisitor {
// An object visitor that adds extra debugging information for some types.
class AddInfoVisitor : public TqObjectVisitor {
public:
- AddInfoVisitor(const std::string& brief, d::MemoryAccessor accessor)
- : accessor_(accessor), brief_(brief) {}
+ AddInfoVisitor(const std::string& brief, d::MemoryAccessor accessor,
+ const d::HeapAddresses& heap_addresses)
+ : accessor_(accessor), brief_(brief), heap_addresses_(heap_addresses) {}
// Returns the brief object description, once visiting is complete.
const std::string& GetBrief() { return brief_; }
void VisitString(const TqString* object) override {
- ReadStringVisitor visitor(accessor_);
+ ReadStringVisitor visitor(accessor_, heap_addresses_);
object->Visit(&visitor);
if (!brief_.empty()) brief_ += " ";
brief_ += "\"" + visitor.GetString() + "\"";
@@ -422,12 +484,18 @@ class AddInfoVisitor : public TqObjectVisitor {
private:
d::MemoryAccessor accessor_;
std::string brief_;
+ const d::HeapAddresses& heap_addresses_;
};
-std::unique_ptr<ObjectPropertiesResult> GetHeapObjectProperties(
+std::unique_ptr<ObjectPropertiesResult> GetHeapObjectPropertiesNotCompressed(
uintptr_t address, d::MemoryAccessor accessor, const char* type_hint,
- std::string brief) {
- TypedObject typed = GetTypedHeapObject(address, accessor, type_hint);
+ const d::HeapAddresses& heap_addresses) {
+ // Regardless of whether we can read the object itself, maybe we can find its
+ // pointer in the list of known objects.
+ std::string brief = FindKnownObject(address, heap_addresses);
+
+ TypedObject typed =
+ GetTypedHeapObject(address, accessor, type_hint, heap_addresses);
// TODO(v8:9376): Many object types need additional data that is not included
// in their Torque layout definitions. For example, JSObject has an array of
@@ -435,59 +503,62 @@ std::unique_ptr<ObjectPropertiesResult> GetHeapObjectProperties(
// should be represented as an array in this response. If the relevant memory
// is available, we should instead represent those properties (and any out-of-
// object properties) using their JavaScript property names.
- AddInfoVisitor visitor(brief, accessor);
+ AddInfoVisitor visitor(brief, accessor, heap_addresses);
typed.object->Visit(&visitor);
brief = visitor.GetBrief();
brief = AppendAddressAndType(brief, address, typed.object->GetName());
- return v8::base::make_unique<ObjectPropertiesResult>(
+ // Convert the low-confidence guessed types to a list of strings as expected
+ // for the response.
+ std::vector<std::string> guessed_types;
+ for (const auto& guess : typed.possible_types) {
+ guessed_types.push_back(guess.object->GetName());
+ }
+
+ return std::make_unique<ObjectPropertiesResult>(
typed.type_check_result, brief, typed.object->GetName(),
- typed.object->GetProperties(accessor));
+ typed.object->GetProperties(accessor), std::move(guessed_types));
}
-std::unique_ptr<ObjectPropertiesResult> GetHeapObjectProperties(
- uintptr_t address, d::MemoryAccessor memory_accessor, const d::Roots& roots,
- const char* type_hint) {
+std::unique_ptr<ObjectPropertiesResult> GetHeapObjectPropertiesMaybeCompressed(
+ uintptr_t address, d::MemoryAccessor memory_accessor,
+ d::HeapAddresses heap_addresses, const char* type_hint) {
// Try to figure out the heap range, for pointer compression (this is unused
// if pointer compression is disabled).
uintptr_t any_uncompressed_ptr = 0;
if (!IsPointerCompressed(address)) any_uncompressed_ptr = address;
- if (any_uncompressed_ptr == 0) any_uncompressed_ptr = roots.any_heap_pointer;
- if (any_uncompressed_ptr == 0) any_uncompressed_ptr = roots.map_space;
- if (any_uncompressed_ptr == 0) any_uncompressed_ptr = roots.old_space;
- if (any_uncompressed_ptr == 0) any_uncompressed_ptr = roots.read_only_space;
+ if (any_uncompressed_ptr == 0)
+ any_uncompressed_ptr = heap_addresses.any_heap_pointer;
+ if (any_uncompressed_ptr == 0)
+ any_uncompressed_ptr = heap_addresses.map_space_first_page;
+ if (any_uncompressed_ptr == 0)
+ any_uncompressed_ptr = heap_addresses.old_space_first_page;
+ if (any_uncompressed_ptr == 0)
+ any_uncompressed_ptr = heap_addresses.read_only_space_first_page;
+ FillInUnknownHeapAddresses(&heap_addresses, any_uncompressed_ptr);
if (any_uncompressed_ptr == 0) {
// We can't figure out the heap range. Just check for known objects.
- std::string brief = FindKnownObject(address, roots);
+ std::string brief = FindKnownObject(address, heap_addresses);
brief = AppendAddressAndType(brief, address, "v8::internal::TaggedValue");
- return v8::base::make_unique<ObjectPropertiesResult>(
+ return std::make_unique<ObjectPropertiesResult>(
d::TypeCheckResult::kUnableToDecompress, brief,
- "v8::internal::TaggedValue",
- std::vector<std::unique_ptr<ObjectProperty>>());
+ "v8::internal::TaggedValue");
}
- // TODO(v8:9376): It seems that the space roots are at predictable offsets
- // within the heap reservation block when pointer compression is enabled, so
- // we should be able to set those here.
-
- address = Decompress(address, any_uncompressed_ptr);
- // From here on all addresses should be decompressed.
+ address = EnsureDecompressed(address, any_uncompressed_ptr);
- // Regardless of whether we can read the object itself, maybe we can find its
- // pointer in the list of known objects.
- std::string brief = FindKnownObject(address, roots);
- return GetHeapObjectProperties(address, memory_accessor, type_hint, brief);
+ return GetHeapObjectPropertiesNotCompressed(address, memory_accessor,
+ type_hint, heap_addresses);
}
-std::unique_ptr<ObjectPropertiesResult> GetObjectPropertiesImpl(
- uintptr_t address, d::MemoryAccessor memory_accessor, const d::Roots& roots,
- const char* type_hint) {
- std::vector<std::unique_ptr<ObjectProperty>> props;
+std::unique_ptr<ObjectPropertiesResult> GetObjectProperties(
+ uintptr_t address, d::MemoryAccessor memory_accessor,
+ const d::HeapAddresses& heap_addresses, const char* type_hint) {
if (static_cast<uint32_t>(address) == i::kClearedWeakHeapObjectLower32) {
- return v8::base::make_unique<ObjectPropertiesResult>(
+ return std::make_unique<ObjectPropertiesResult>(
d::TypeCheckResult::kWeakRef, "cleared weak ref",
- "v8::internal::HeapObject", std::move(props));
+ "v8::internal::HeapObject");
}
bool is_weak = (address & i::kHeapObjectTagMask) == i::kWeakHeapObjectTag;
if (is_weak) {
@@ -495,7 +566,8 @@ std::unique_ptr<ObjectPropertiesResult> GetObjectPropertiesImpl(
}
if (i::Internals::HasHeapObjectTag(address)) {
std::unique_ptr<ObjectPropertiesResult> result =
- GetHeapObjectProperties(address, memory_accessor, roots, type_hint);
+ GetHeapObjectPropertiesMaybeCompressed(address, memory_accessor,
+ heap_addresses, type_hint);
if (is_weak) {
result->Prepend("weak ref to ");
}
@@ -507,9 +579,8 @@ std::unique_ptr<ObjectPropertiesResult> GetObjectPropertiesImpl(
int32_t value = i::PlatformSmiTagging::SmiToInt(address);
std::stringstream stream;
stream << value << " (0x" << std::hex << value << ")";
- return v8::base::make_unique<ObjectPropertiesResult>(
- d::TypeCheckResult::kSmi, stream.str(), "v8::internal::Smi",
- std::move(props));
+ return std::make_unique<ObjectPropertiesResult>(
+ d::TypeCheckResult::kSmi, stream.str(), "v8::internal::Smi");
}
} // namespace v8_debug_helper_internal
@@ -520,10 +591,10 @@ extern "C" {
V8_DEBUG_HELPER_EXPORT d::ObjectPropertiesResult*
_v8_debug_helper_GetObjectProperties(uintptr_t object,
d::MemoryAccessor memory_accessor,
- const d::Roots& heap_roots,
+ const d::HeapAddresses& heap_addresses,
const char* type_hint) {
- return di::GetObjectPropertiesImpl(object, memory_accessor, heap_roots,
- type_hint)
+ return di::GetObjectProperties(object, memory_accessor, heap_addresses,
+ type_hint)
.release()
->GetPublicView();
}
diff --git a/deps/v8/tools/debug_helper/heap-constants.cc b/deps/v8/tools/debug_helper/heap-constants.cc
index 2bd0420690..9b9ed04cc1 100644
--- a/deps/v8/tools/debug_helper/heap-constants.cc
+++ b/deps/v8/tools/debug_helper/heap-constants.cc
@@ -9,36 +9,37 @@ namespace d = v8::debug_helper;
namespace v8_debug_helper_internal {
-std::string FindKnownObject(uintptr_t address, const d::Roots& roots) {
+std::string FindKnownObject(uintptr_t address,
+ const d::HeapAddresses& heap_addresses) {
uintptr_t containing_page = address & ~i::kPageAlignmentMask;
uintptr_t offset_in_page = address & i::kPageAlignmentMask;
- // If there's a match with a known root, then search only that page.
- if (containing_page == roots.map_space) {
+ // If there's a match with a known page, then search only that page.
+ if (containing_page == heap_addresses.map_space_first_page) {
return FindKnownObjectInMapSpace(offset_in_page);
}
- if (containing_page == roots.old_space) {
+ if (containing_page == heap_addresses.old_space_first_page) {
return FindKnownObjectInOldSpace(offset_in_page);
}
- if (containing_page == roots.read_only_space) {
+ if (containing_page == heap_addresses.read_only_space_first_page) {
return FindKnownObjectInReadOnlySpace(offset_in_page);
}
- // For any unknown roots, compile a list of things this object might be.
+ // For any unknown pages, compile a list of things this object might be.
std::string result;
- if (roots.map_space == 0) {
+ if (heap_addresses.map_space_first_page == 0) {
std::string sub_result = FindKnownObjectInMapSpace(offset_in_page);
if (!sub_result.empty()) {
result += "maybe " + sub_result;
}
}
- if (roots.old_space == 0) {
+ if (heap_addresses.old_space_first_page == 0) {
std::string sub_result = FindKnownObjectInOldSpace(offset_in_page);
if (!sub_result.empty()) {
result = (result.empty() ? "" : result + ", ") + "maybe " + sub_result;
}
}
- if (roots.read_only_space == 0) {
+ if (heap_addresses.read_only_space_first_page == 0) {
std::string sub_result = FindKnownObjectInReadOnlySpace(offset_in_page);
if (!sub_result.empty()) {
result = (result.empty() ? "" : result + ", ") + "maybe " + sub_result;
@@ -48,4 +49,37 @@ std::string FindKnownObject(uintptr_t address, const d::Roots& roots) {
return result;
}
+KnownInstanceType FindKnownMapInstanceType(
+ uintptr_t address, const d::HeapAddresses& heap_addresses) {
+ uintptr_t containing_page = address & ~i::kPageAlignmentMask;
+ uintptr_t offset_in_page = address & i::kPageAlignmentMask;
+
+ // If there's a match with a known page, then search only that page.
+ if (containing_page == heap_addresses.map_space_first_page) {
+ return KnownInstanceType(
+ FindKnownMapInstanceTypeInMapSpace(offset_in_page));
+ }
+ if (containing_page == heap_addresses.read_only_space_first_page) {
+ return KnownInstanceType(
+ FindKnownMapInstanceTypeInReadOnlySpace(offset_in_page));
+ }
+
+ // For any unknown pages, compile a list of things this object might be.
+ KnownInstanceType result;
+ if (heap_addresses.map_space_first_page == 0) {
+ int sub_result = FindKnownMapInstanceTypeInMapSpace(offset_in_page);
+ if (sub_result >= 0) {
+ result.types.push_back(static_cast<i::InstanceType>(sub_result));
+ }
+ }
+ if (heap_addresses.read_only_space_first_page == 0) {
+ int sub_result = FindKnownMapInstanceTypeInReadOnlySpace(offset_in_page);
+ if (sub_result >= 0) {
+ result.types.push_back(static_cast<i::InstanceType>(sub_result));
+ }
+ }
+
+ return result;
+}
+
} // namespace v8_debug_helper_internal
diff --git a/deps/v8/tools/debug_helper/heap-constants.h b/deps/v8/tools/debug_helper/heap-constants.h
index f3149bbb47..6c1f17dc82 100644
--- a/deps/v8/tools/debug_helper/heap-constants.h
+++ b/deps/v8/tools/debug_helper/heap-constants.h
@@ -7,21 +7,60 @@
#include <cstdint>
#include <string>
+#include <vector>
#include "debug-helper.h"
+#include "src/objects/instance-type.h"
namespace d = v8::debug_helper;
namespace v8_debug_helper_internal {
-// Functions generated by mkgrokdump:
+// ===== Functions generated by gen-heap-constants.py: =========================
+
+// Returns the name of a known object, given its offset within the first page of
+// the space, or empty string on failure.
std::string FindKnownObjectInOldSpace(uintptr_t offset);
std::string FindKnownObjectInReadOnlySpace(uintptr_t offset);
std::string FindKnownObjectInMapSpace(uintptr_t offset);
-std::string FindKnownMapInstanceTypeInMapSpace(uintptr_t offset);
-std::string FindKnownMapInstanceTypeInReadOnlySpace(uintptr_t offset);
-std::string FindKnownObject(uintptr_t address, const d::Roots& roots);
+// In builds with pointer compression enabled, sets the *_first_page members in
+// the HeapAddresses object. In other builds, does nothing.
+void FillInUnknownHeapAddresses(d::HeapAddresses* heap_addresses,
+ uintptr_t any_uncompressed_ptr);
+
+// Returns the instance type for the known Map, given its offset within the
+// first page of the space, or empty string on failure.
+int FindKnownMapInstanceTypeInMapSpace(uintptr_t offset);
+int FindKnownMapInstanceTypeInReadOnlySpace(uintptr_t offset);
+
+// ===== End of generated functions. ===========================================
+
+// Returns a descriptive string if the given address matches a known object, or
+// an empty string otherwise.
+std::string FindKnownObject(uintptr_t address,
+ const d::HeapAddresses& heap_addresses);
+
+struct KnownInstanceType {
+ enum class Confidence {
+ kLow,
+ kHigh,
+ };
+ KnownInstanceType() : confidence(Confidence::kLow) {}
+ KnownInstanceType(int type) : KnownInstanceType() {
+ if (type >= 0) {
+ confidence = Confidence::kHigh;
+ types.push_back(static_cast<v8::internal::InstanceType>(type));
+ }
+ }
+ Confidence confidence;
+ std::vector<v8::internal::InstanceType> types;
+};
+
+// Returns information about the instance type of the Map at the given address,
+// based on the list of known Maps.
+KnownInstanceType FindKnownMapInstanceType(
+ uintptr_t address, const d::HeapAddresses& heap_addresses);
} // namespace v8_debug_helper_internal
diff --git a/deps/v8/tools/dev/gm.py b/deps/v8/tools/dev/gm.py
index 0e01f4f8d7..8269b94cc2 100755
--- a/deps/v8/tools/dev/gm.py
+++ b/deps/v8/tools/dev/gm.py
@@ -30,7 +30,8 @@ USE_PTY = "linux" in sys.platform
if USE_PTY:
import pty
-BUILD_TARGETS_TEST = ["d8", "cctest", "unittests"]
+BUILD_TARGETS_TEST = ["d8", "cctest", "inspector-test", "unittests",
+ "wasm_api_tests"]
BUILD_TARGETS_ALL = ["all"]
# All arches that this script understands.
diff --git a/deps/v8/tools/gcmole/BUILD.gn b/deps/v8/tools/gcmole/BUILD.gn
index 51b9ef527f..ba2d67fd79 100644
--- a/deps/v8/tools/gcmole/BUILD.gn
+++ b/deps/v8/tools/gcmole/BUILD.gn
@@ -11,6 +11,7 @@ group("v8_run_gcmole") {
"gccause.lua",
"GCMOLE.gn",
"gcmole.lua",
+ "gcmole-test.cc",
"gcmole-tools/",
"parallel.py",
"run-gcmole.py",
diff --git a/deps/v8/tools/gcmole/gcmole.lua b/deps/v8/tools/gcmole/gcmole.lua
index 6758973457..305a498b07 100644
--- a/deps/v8/tools/gcmole/gcmole.lua
+++ b/deps/v8/tools/gcmole/gcmole.lua
@@ -102,11 +102,12 @@ local function MakeClangCommandLine(
end
plugin_args = " " .. table.concat(plugin_args, " ")
end
- return CLANG_BIN .. "/clang++ -std=c++11 -c "
+ return CLANG_BIN .. "/clang++ -std=c++14 -c"
.. " -Xclang -load -Xclang " .. CLANG_PLUGINS .. "/libgcmole.so"
.. " -Xclang -plugin -Xclang " .. plugin
.. (plugin_args or "")
.. " -Xclang -triple -Xclang " .. triple
+ .. " -fno-exceptions"
.. " -D" .. arch_define
.. " -DENABLE_DEBUGGER_SUPPORT"
.. " -DV8_INTL_SUPPORT"
@@ -466,6 +467,10 @@ end
local function TestRun()
local errors, output = SafeCheckCorrectnessForArch('x64', true)
+ if not errors then
+ log("** Test file should produce errors, but none were found.")
+ return false
+ end
local filename = "tools/gcmole/test-expectations.txt"
local exp_file = assert(io.open(filename), "failed to open test expectations file")
@@ -473,18 +478,18 @@ local function TestRun()
if output ~= expectations then
log("** Output mismatch from running tests. Please run them manually.")
- else
- log("** Tests ran successfully")
+ return false
end
-end
-TestRun()
+ log("** Tests ran successfully")
+ return true
+end
-local errors = false
+local errors = not TestRun()
for _, arch in ipairs(ARCHS) do
if not ARCHITECTURES[arch] then
- error ("Unknown arch: " .. arch)
+ error("Unknown arch: " .. arch)
end
errors = SafeCheckCorrectnessForArch(arch, false) or errors
diff --git a/deps/v8/tools/gdbinit b/deps/v8/tools/gdbinit
index ad7847df31..53ead3e881 100644
--- a/deps/v8/tools/gdbinit
+++ b/deps/v8/tools/gdbinit
@@ -138,7 +138,13 @@ Find the location of a given address in V8 pages.
Usage: heap_find address
end
-set disassembly-flavor intel
+# The 'disassembly-flavor' command is only available on i386 and x84_64.
+python
+try:
+ gdb.execute("set disassembly-flavor intel")
+except gdb.error:
+ pass
+end
set disable-randomization off
# Install a handler whenever the debugger stops due to a signal. It walks up the
diff --git a/deps/v8/tools/gen-postmortem-metadata.py b/deps/v8/tools/gen-postmortem-metadata.py
index ceaf7798fd..725af52c7d 100644
--- a/deps/v8/tools/gen-postmortem-metadata.py
+++ b/deps/v8/tools/gen-postmortem-metadata.py
@@ -49,7 +49,6 @@
# for py2/py3 compatibility
from __future__ import print_function
-import io
import re
import sys
@@ -286,6 +285,7 @@ extras_accessors = [
'Code, instruction_start, uintptr_t, kHeaderSize',
'Code, instruction_size, int, kInstructionSizeOffset',
'String, length, int32_t, kLengthOffset',
+ 'DescriptorArray, header_size, uintptr_t, kHeaderSize',
];
#
@@ -296,7 +296,8 @@ extras_accessors = [
expected_classes = [
'ConsString', 'FixedArray', 'HeapNumber', 'JSArray', 'JSFunction',
'JSObject', 'JSRegExp', 'JSPrimitiveWrapper', 'Map', 'Oddball', 'Script',
- 'SeqOneByteString', 'SharedFunctionInfo', 'ScopeInfo', 'JSPromise'
+ 'SeqOneByteString', 'SharedFunctionInfo', 'ScopeInfo', 'JSPromise',
+ 'DescriptorArray'
];
@@ -383,10 +384,12 @@ def load_objects():
def load_objects_from_file(objfilename, checktypes):
- objfile = io.open(objfilename, 'r', encoding='utf-8');
+ objfile = open(objfilename, 'r');
in_insttype = False;
+ in_torque_insttype = False
typestr = '';
+ torque_typestr = ''
uncommented_file = ''
#
@@ -400,16 +403,28 @@ def load_objects_from_file(objfilename, checktypes):
in_insttype = True;
continue;
+ if (line.startswith('#define TORQUE_ASSIGNED_INSTANCE_TYPE_LIST')):
+ in_torque_insttype = True
+ continue
+
if (in_insttype and line.startswith('};')):
in_insttype = False;
continue;
+ if (in_torque_insttype and (not line or line.isspace())):
+ in_torque_insttype = False
+ continue
+
line = re.sub('//.*', '', line.strip());
if (in_insttype):
typestr += line;
continue;
+ if (in_torque_insttype):
+ torque_typestr += line
+ continue
+
uncommented_file += '\n' + line
for match in re.finditer(r'\nclass(?:\s+V8_EXPORT(?:_PRIVATE)?)?'
@@ -437,6 +452,9 @@ def load_objects_from_file(objfilename, checktypes):
entries = typestr.split(',');
for entry in entries:
types[re.sub('\s*=.*', '', entry).lstrip()] = True;
+ entries = torque_typestr.split('\\')
+ for entry in entries:
+ types[re.sub(r' *V\(|\) *', '', entry)] = True
#
# Infer class names for each type based on a systematic transformation.
@@ -446,10 +464,7 @@ def load_objects_from_file(objfilename, checktypes):
# way around.
#
for type in types:
- #
- # REGEXP behaves like REG_EXP, as in JS_REGEXP_TYPE => JSRegExp.
- #
- usetype = re.sub('_REGEXP_', '_REG_EXP_', type);
+ usetype = type
#
# Remove the "_TYPE" suffix and then convert to camel case,
@@ -578,7 +593,7 @@ def load_fields():
def load_fields_from_file(filename):
- inlfile = io.open(filename, 'r', encoding='utf-8');
+ inlfile = open(filename, 'r');
#
# Each class's fields and the corresponding offsets are described in the
diff --git a/deps/v8/tools/generate-header-include-checks.py b/deps/v8/tools/generate-header-include-checks.py
index fa18d85bf5..909dafe74d 100755
--- a/deps/v8/tools/generate-header-include-checks.py
+++ b/deps/v8/tools/generate-header-include-checks.py
@@ -22,7 +22,7 @@ import os.path
import re
import sys
-# TODO(clemensh): Extend to tests.
+# TODO(clemensb): Extend to tests.
DEFAULT_INPUT = ['base', 'src']
DEFAULT_GN_FILE = 'BUILD.gn'
MY_DIR = os.path.dirname(os.path.realpath(__file__))
diff --git a/deps/v8/tools/heap-stats/categories.js b/deps/v8/tools/heap-stats/categories.js
index 6560758f3e..0e87662488 100644
--- a/deps/v8/tools/heap-stats/categories.js
+++ b/deps/v8/tools/heap-stats/categories.js
@@ -32,7 +32,7 @@ const CATEGORIES = new Map([
'GLOBAL_PROPERTIES_TYPE',
'HEAP_NUMBER_TYPE',
'INTERNALIZED_STRING_TYPE',
- 'JS_ARGUMENTS_TYPE',
+ 'JS_ARGUMENTS_OBJECT_TYPE',
'JS_ARRAY_BUFFER_TYPE',
'JS_ARRAY_ITERATOR_TYPE',
'JS_ARRAY_TYPE',
@@ -50,16 +50,16 @@ const CATEGORIES = new Map([
'JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE',
'JS_GLOBAL_OBJECT_TYPE',
'JS_GLOBAL_PROXY_TYPE',
- 'JS_INTL_COLLATOR_TYPE',
- 'JS_INTL_DATE_TIME_FORMAT_TYPE',
- 'JS_INTL_LIST_FORMAT_TYPE',
- 'JS_INTL_LOCALE_TYPE',
- 'JS_INTL_NUMBER_FORMAT_TYPE',
- 'JS_INTL_PLURAL_RULES_TYPE',
- 'JS_INTL_RELATIVE_TIME_FORMAT_TYPE',
- 'JS_INTL_SEGMENT_ITERATOR_TYPE',
- 'JS_INTL_SEGMENTER_TYPE',
- 'JS_INTL_V8_BREAK_ITERATOR_TYPE',
+ 'JS_COLLATOR_TYPE',
+ 'JS_DATE_TIME_FORMAT_TYPE',
+ 'JS_LIST_FORMAT_TYPE',
+ 'JS_LOCALE_TYPE',
+ 'JS_NUMBER_FORMAT_TYPE',
+ 'JS_PLURAL_RULES_TYPE',
+ 'JS_RELATIVE_TIME_FORMAT_TYPE',
+ 'JS_SEGMENT_ITERATOR_TYPE',
+ 'JS_SEGMENTER_TYPE',
+ 'JS_V8_BREAK_ITERATOR_TYPE',
'JS_MAP_KEY_ITERATOR_TYPE',
'JS_MAP_KEY_VALUE_ITERATOR_TYPE',
'JS_MAP_TYPE',
@@ -69,7 +69,7 @@ const CATEGORIES = new Map([
'JS_PRIMITIVE_WRAPPER_TYPE',
'JS_PROMISE_TYPE',
'JS_PROXY_TYPE',
- 'JS_REGEXP_TYPE',
+ 'JS_REG_EXP_TYPE',
'JS_SET_KEY_VALUE_ITERATOR_TYPE',
'JS_SET_TYPE',
'JS_SET_VALUE_ITERATOR_TYPE',
@@ -96,9 +96,9 @@ const CATEGORIES = new Map([
'UNCACHED_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE',
'UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE',
'UNCACHED_EXTERNAL_STRING_TYPE',
- 'WASM_INSTANCE_TYPE',
- 'WASM_MEMORY_TYPE',
- 'WASM_MODULE_TYPE',
+ 'WASM_INSTANCE_OBJECT_TYPE',
+ 'WASM_MEMORY_OBJECT_TYPE',
+ 'WASM_MODULE_OBJECT_TYPE',
])
],
[
diff --git a/deps/v8/tools/ic-explorer.html b/deps/v8/tools/ic-explorer.html
index aede91e0d0..4c725163c5 100644
--- a/deps/v8/tools/ic-explorer.html
+++ b/deps/v8/tools/ic-explorer.html
@@ -362,7 +362,7 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
<div id="legend" style="padding-right: 200px">
<div style="float:right; border-style: solid; border-width: 1px; padding:20px">
0 uninitialized<br>
- . premonomorphic<br>
+ X no feedback<br>
1 monomorphic<br>
^ recompute handler<br>
P polymorphic<br>
diff --git a/deps/v8/tools/inspect-d8.js b/deps/v8/tools/inspect-d8.js
new file mode 100644
index 0000000000..b87a7586b2
--- /dev/null
+++ b/deps/v8/tools/inspect-d8.js
@@ -0,0 +1,30 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This helper allows to debug d8 using Chrome DevTools.
+//
+// It runs a simple REPL for inspector messages and relies on
+// websocketd (https://github.com/joewalnes/websocketd) for the WebSocket
+// communication.
+//
+// You can start a session with a debug build of d8 like:
+//
+// $ websocketd out/x64.debug/d8 YOUR_SCRIPT.js tools/inspect-d8.js
+//
+// After that, copy the URL from console and pass it as `ws=` parameter to
+// the Chrome DevTools frontend like:
+//
+// chrome-devtools://devtools/bundled/js_app.html?ws=localhost:80
+
+function receive(msg) {
+ print(msg);
+}
+
+function handleInspectorMessage() {
+ send(readline());
+}
+
+while (true) {
+ handleInspectorMessage();
+}
diff --git a/deps/v8/tools/js2c.py b/deps/v8/tools/js2c.py
index b94f3add23..43a876e236 100755
--- a/deps/v8/tools/js2c.py
+++ b/deps/v8/tools/js2c.py
@@ -202,19 +202,11 @@ def PrepareSources(source_files, native_type, emit_js):
Returns:
An instance of Sources.
"""
+ result = Sources()
filters = BuildFilterChain()
source_files_and_contents = [(f, ReadFile(f)) for f in source_files]
- # Have a single not-quite-empty source file if there are none present;
- # otherwise you get errors trying to compile an empty C++ array.
- # It cannot be empty (or whitespace, which gets trimmed to empty), as
- # the deserialization code assumes each file is nonempty.
- if not source_files_and_contents:
- source_files_and_contents = [("dummy.js", "(function() {})")]
-
- result = Sources()
-
for (source, contents) in source_files_and_contents:
try:
lines = filters(contents)
@@ -273,7 +265,9 @@ def BuildMetadata(sources, source_bytes, native_type):
metadata = {
"builtin_count": len(sources.modules),
- "sources_declaration": SOURCES_DECLARATION % ToCArray(source_bytes),
+ "sources_declaration":
+ SOURCES_DECLARATION % ToCArray(
+ source_bytes if len(source_bytes) != 0 else "\0"),
"total_length": total_length,
"get_index_cases": "".join(get_index_cases),
"get_script_source_cases": "".join(get_script_source_cases),
diff --git a/deps/v8/tools/mips_toolchain.tar.gz.sha1 b/deps/v8/tools/mips_toolchain.tar.gz.sha1
deleted file mode 100644
index 8d4572336a..0000000000
--- a/deps/v8/tools/mips_toolchain.tar.gz.sha1
+++ /dev/null
@@ -1 +0,0 @@
-d51b5d903340262d8d13ecd51054c16a901b3cf3 \ No newline at end of file
diff --git a/deps/v8/tools/regexp-sequences.py b/deps/v8/tools/regexp-sequences.py
new file mode 100755
index 0000000000..56e83679a9
--- /dev/null
+++ b/deps/v8/tools/regexp-sequences.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+python %prog trace-file
+
+Parses output generated by v8 with flag --trace-regexp-bytecodes and generates
+a list of the most common sequences.
+"""
+
+from __future__ import print_function
+
+import sys
+import re
+import collections
+
+def parse(file, seqlen):
+ # example:
+ # pc = 00, sp = 0, curpos = 0, curchar = 0000000a ..., bc = PUSH_BT, 02, 00, 00, 00, e8, 00, 00, 00 .......
+ rx = re.compile(r'pc = (?P<pc>[0-9a-f]+), sp = (?P<sp>\d+), '
+ r'curpos = (?P<curpos>\d+), curchar = (?P<char_hex>[0-9a-f]+) '
+ r'(:?\.|\()(?P<char>\.|\w)(:?\.|\)), bc = (?P<bc>\w+), .*')
+ total = 0
+ bc_cnt = [None] * seqlen
+ for i in xrange(seqlen):
+ bc_cnt[i] = {}
+ last = [None] * seqlen
+ with open(file) as f:
+ l = f.readline()
+ while l:
+ l = l.strip()
+ if l.startswith("Start bytecode interpreter"):
+ for i in xrange(seqlen):
+ last[i] = collections.deque(maxlen=i+1)
+
+ match = rx.search(l)
+ if match:
+ total += 1
+ bc = match.group('bc')
+ for i in xrange(seqlen):
+ last[i].append(bc)
+ key = ' --> '.join(last[i])
+ bc_cnt[i][key] = bc_cnt[i].get(key,0) + 1
+
+ l = f.readline()
+ return bc_cnt, total
+
+def print_most_common(d, seqlen, total):
+ sorted_d = sorted(d.items(), key=lambda kv: kv[1], reverse=True)
+ for (k,v) in sorted_d:
+ if v*100/total < 1.0:
+ return
+ print("{}: {} ({} %)".format(k,v,(v*100/total)))
+
+def main(argv):
+ max_seq = 7
+ bc_cnt, total = parse(argv[1],max_seq)
+ for i in xrange(max_seq):
+ print()
+ print("Most common of length {}".format(i+1))
+ print()
+ print_most_common(bc_cnt[i], i, total)
+
+if __name__ == '__main__':
+ main(sys.argv)
diff --git a/deps/v8/tools/release/merge_to_branch.py b/deps/v8/tools/release/merge_to_branch.py
index c9594292b1..64f2a1981d 100755
--- a/deps/v8/tools/release/merge_to_branch.py
+++ b/deps/v8/tools/release/merge_to_branch.py
@@ -47,7 +47,7 @@ class Preparation(Step):
if self._options.force:
os.remove(self.Config("ALREADY_MERGING_SENTINEL_FILE"))
elif self._options.step == 0: # pragma: no cover
- self.Die("A merge is already in progress")
+ self.Die("A merge is already in progress. Use -f to continue")
open(self.Config("ALREADY_MERGING_SENTINEL_FILE"), "a").close()
self.InitialEnvironmentChecks(self.default_cwd)
diff --git a/deps/v8/tools/testrunner/base_runner.py b/deps/v8/tools/testrunner/base_runner.py
index cb23366aa4..d999d87a27 100644
--- a/deps/v8/tools/testrunner/base_runner.py
+++ b/deps/v8/tools/testrunner/base_runner.py
@@ -46,8 +46,10 @@ DEFAULT_OUT_GN = 'out.gn'
# Map of test name synonyms to lists of test suites. Should be ordered by
# expected runtimes (suites with slow test cases first). These groups are
# invoked in separate steps on the bots.
+# The mapping from names used here to GN targets (which must stay in sync)
+# is defined in infra/mb/gn_isolate_map.pyl.
TEST_MAP = {
- # This needs to stay in sync with test/bot_default.isolate.
+ # This needs to stay in sync with group("v8_bot_default") in test/BUILD.gn.
"bot_default": [
"debugger",
"mjsunit",
@@ -62,8 +64,9 @@ TEST_MAP = {
"preparser",
"intl",
"unittests",
+ "wasm-api-tests",
],
- # This needs to stay in sync with test/default.isolate.
+ # This needs to stay in sync with group("v8_default") in test/BUILD.gn.
"default": [
"debugger",
"mjsunit",
@@ -77,8 +80,9 @@ TEST_MAP = {
"preparser",
"intl",
"unittests",
+ "wasm-api-tests",
],
- # This needs to stay in sync with test/d8_default.isolate.
+ # This needs to stay in sync with group("v8_d8_default") in test/BUILD.gn.
"d8_default": [
"debugger",
"mjsunit",
@@ -87,7 +91,7 @@ TEST_MAP = {
"preparser",
"intl",
],
- # This needs to stay in sync with test/optimize_for_size.isolate.
+ # This needs to stay in sync with "v8_optimize_for_size" in test/BUILD.gn.
"optimize_for_size": [
"debugger",
"mjsunit",
@@ -190,7 +194,9 @@ class BuildConfig(object):
self.is_full_debug = build_config['is_full_debug']
self.msan = build_config['is_msan']
self.no_i18n = not build_config['v8_enable_i18n_support']
- self.no_snap = not build_config['v8_use_snapshot']
+ # TODO(https://crbug.com/v8/8531)
+ # 'v8_use_snapshot' was removed, 'no_snap' can be removed as well.
+ self.no_snap = False
self.predictable = build_config['v8_enable_verify_predictable']
self.tsan = build_config['is_tsan']
# TODO(machenbach): We only have ubsan not ubsan_vptr.
@@ -315,13 +321,11 @@ class BaseTestRunner(object):
default=False, action="store_true")
parser.add_option("--outdir", help="Base directory with compile output",
default="out")
- parser.add_option("--buildbot", help="DEPRECATED!",
- default=False, action="store_true")
parser.add_option("--arch",
help="The architecture to run tests for")
parser.add_option("-m", "--mode",
- help="The test mode in which to run (uppercase for ninja"
- " and buildbot builds): %s" % MODES.keys())
+ help="The test mode in which to run (uppercase for builds"
+ " in CI): %s" % MODES.keys())
parser.add_option("--shell-dir", help="DEPRECATED! Executables from build "
"directory will be used")
parser.add_option("--test-root", help="Root directory of the test suites",
@@ -350,9 +354,6 @@ class BaseTestRunner(object):
"color, mono)")
parser.add_option("--json-test-results",
help="Path to a file for storing json results.")
- parser.add_option("--junitout", help="File name of the JUnit output")
- parser.add_option("--junittestsuite", default="v8tests",
- help="The testsuite name in the JUnit output file")
parser.add_option("--exit-after-n-failures", type="int", default=100,
help="Exit after the first N failures instead of "
"running all tests. Pass 0 to disable this feature.")
@@ -436,7 +437,7 @@ class BaseTestRunner(object):
# gn
# outdir
# outdir/arch.mode
- # Each path is provided in two versions: <path> and <path>/mode for buildbot.
+ # Each path is provided in two versions: <path> and <path>/mode for bots.
def _possible_outdirs(self, options):
def outdirs():
if options.gn:
@@ -451,7 +452,7 @@ class BaseTestRunner(object):
for outdir in outdirs():
yield os.path.join(self.basedir, outdir)
- # buildbot option
+ # bot option
if options.mode:
yield os.path.join(self.basedir, outdir, options.mode)
@@ -493,9 +494,9 @@ class BaseTestRunner(object):
def _process_default_options(self, options):
# We don't use the mode for more path-magic.
- # Therefore transform the buildbot mode here to fix build_config value.
+ # Therefore transform the bot mode here to fix build_config value.
if options.mode:
- options.mode = self._buildbot_to_v8_mode(options.mode)
+ options.mode = self._bot_to_v8_mode(options.mode)
build_config_mode = 'debug' if self.build_config.is_debug else 'release'
if options.mode:
@@ -535,8 +536,8 @@ class BaseTestRunner(object):
options.command_prefix = shlex.split(options.command_prefix)
options.extra_flags = sum(map(shlex.split, options.extra_flags), [])
- def _buildbot_to_v8_mode(self, config):
- """Convert buildbot build configs to configs understood by the v8 runner.
+ def _bot_to_v8_mode(self, config):
+ """Convert build configs from bots to configs understood by the v8 runner.
V8 configs are always lower case and without the additional _x64 suffix
for 64 bit builds on windows with ninja.
@@ -799,9 +800,6 @@ class BaseTestRunner(object):
def _create_progress_indicators(self, test_count, options):
procs = [PROGRESS_INDICATORS[options.progress]()]
- if options.junitout:
- procs.append(progress.JUnitTestProgressIndicator(options.junitout,
- options.junittestsuite))
if options.json_test_results:
procs.append(progress.JsonTestProgressIndicator(
self.framework_name,
diff --git a/deps/v8/tools/testrunner/local/junit_output.py b/deps/v8/tools/testrunner/local/junit_output.py
deleted file mode 100644
index 52f31ec422..0000000000
--- a/deps/v8/tools/testrunner/local/junit_output.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright 2013 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import xml.etree.ElementTree as xml
-
-
-class JUnitTestOutput:
- def __init__(self, test_suite_name):
- self.root = xml.Element("testsuite")
- self.root.attrib["name"] = test_suite_name
-
- def HasRunTest(self, test_name, test_cmd, test_duration, test_failure):
- testCaseElement = xml.Element("testcase")
- testCaseElement.attrib["name"] = test_name
- testCaseElement.attrib["cmd"] = test_cmd
- testCaseElement.attrib["time"] = str(round(test_duration, 3))
- if len(test_failure):
- failureElement = xml.Element("failure")
- failureElement.text = test_failure
- testCaseElement.append(failureElement)
- self.root.append(testCaseElement)
-
- def FinishAndWrite(self, f):
- xml.ElementTree(self.root).write(f, "UTF-8")
diff --git a/deps/v8/tools/testrunner/local/statusfile.py b/deps/v8/tools/testrunner/local/statusfile.py
index e4778326a9..db07a62885 100644
--- a/deps/v8/tools/testrunner/local/statusfile.py
+++ b/deps/v8/tools/testrunner/local/statusfile.py
@@ -300,6 +300,8 @@ JS_TEST_PATHS = {
'webkit': [[]],
}
+FILE_EXTENSIONS = [".js", ".mjs"]
+
def PresubmitCheck(path):
with open(path) as f:
contents = ReadContent(f.read())
@@ -326,8 +328,11 @@ def PresubmitCheck(path):
_assert('*' not in rule or (rule.count('*') == 1 and rule[-1] == '*'),
"Only the last character of a rule key can be a wildcard")
if basename in JS_TEST_PATHS and '*' not in rule:
- _assert(any(os.path.exists(os.path.join(os.path.dirname(path),
- *(paths + [rule + ".js"])))
+ def _any_exist(paths):
+ return any(os.path.exists(os.path.join(os.path.dirname(path),
+ *(paths + [rule + ext])))
+ for ext in FILE_EXTENSIONS)
+ _assert(any(_any_exist(paths)
for paths in JS_TEST_PATHS[basename]),
"missing file for %s test %s" % (basename, rule))
return status["success"]
diff --git a/deps/v8/tools/testrunner/local/variants.py b/deps/v8/tools/testrunner/local/variants.py
index fe63d0b935..57c16c0af1 100644
--- a/deps/v8/tools/testrunner/local/variants.py
+++ b/deps/v8/tools/testrunner/local/variants.py
@@ -32,6 +32,8 @@ ALL_VARIANT_FLAGS = {
"stress_sampling": [["--stress-sampling-allocation-profiler=16384"]],
"trusted": [["--no-untrusted-code-mitigations"]],
"no_wasm_traps": [["--no-wasm-trap-handler"]],
+ "turboprop": [["--turboprop"]],
+ "top_level_await": [["--harmony-top-level-await"]],
}
SLOW_VARIANTS = set([
diff --git a/deps/v8/tools/testrunner/standard_runner.py b/deps/v8/tools/testrunner/standard_runner.py
index 51e78608cb..4d9c73f2fc 100755
--- a/deps/v8/tools/testrunner/standard_runner.py
+++ b/deps/v8/tools/testrunner/standard_runner.py
@@ -8,8 +8,11 @@
from __future__ import print_function
from functools import reduce
+import datetime
+import json
import os
import sys
+import tempfile
# Adds testrunner to the path hence it has to be imported at the beggining.
import base_runner
@@ -43,7 +46,7 @@ VARIANT_ALIASES = {
# Shortcut for the two above ('more' first - it has the longer running tests)
'exhaustive': MORE_VARIANTS + VARIANTS,
# Additional variants, run on a subset of bots.
- 'extra': ['nooptimization', 'future', 'no_wasm_traps'],
+ 'extra': ['nooptimization', 'future', 'no_wasm_traps', 'turboprop'],
}
GC_STRESS_FLAGS = ['--gc-interval=500', '--stress-compaction',
@@ -120,6 +123,10 @@ class StandardTestRunner(base_runner.BaseTestRunner):
'with test processors: 0 means infinite '
'generation.')
+ # Extra features.
+ parser.add_option('--time', help='Print timing information after running',
+ default=False, action='store_true')
+
# Noop
parser.add_option('--cfi-vptr',
help='Run tests with UBSAN cfi_vptr option.',
@@ -146,8 +153,6 @@ class StandardTestRunner(base_runner.BaseTestRunner):
default=False, action='store_true')
parser.add_option('--flakiness-results',
help='Path to a file for storing flakiness json.')
- parser.add_option('--time', help='Print timing information after running',
- default=False, action='store_true')
parser.add_option('--warn-unused', help='Report unused rules',
default=False, action='store_true')
parser.add_option('--report', default=False, action='store_true',
@@ -168,7 +173,6 @@ class StandardTestRunner(base_runner.BaseTestRunner):
if self.build_config.asan:
options.extra_flags.append('--invoke-weak-callbacks')
- options.extra_flags.append('--omit-quit')
if self.build_config.no_snap:
# Speed up slow nosnap runs. Allocation verification is covered by
@@ -231,6 +235,14 @@ class StandardTestRunner(base_runner.BaseTestRunner):
# TODO(machenbach): uncomment after infra side lands.
# base_runner.TEST_MAP['d8_default'].remove('intl')
+ if options.time and not options.json_test_results:
+ # We retrieve the slowest tests from the JSON output file, so create
+ # a temporary output file (which will automatically get deleted on exit)
+ # if the user didn't specify one.
+ self._temporary_json_output_file = tempfile.NamedTemporaryFile(
+ prefix="v8-test-runner-")
+ options.json_test_results = self._temporary_json_output_file.name
+
def _parse_variants(self, aliases_str):
# Use developer defaults if no variant was specified.
aliases_str = aliases_str or 'dev'
@@ -341,9 +353,47 @@ class StandardTestRunner(base_runner.BaseTestRunner):
if not results.total:
exit_code = utils.EXIT_CODE_NO_TESTS
+ if options.time:
+ self._print_durations(options)
+
# Indicate if a SIGINT or SIGTERM happened.
return max(exit_code, sigproc.exit_code)
+ def _print_durations(self, options):
+
+ def format_duration(duration_in_seconds):
+ duration = datetime.timedelta(seconds=duration_in_seconds)
+ time = (datetime.datetime.min + duration).time()
+ return time.strftime('%M:%S:') + '%03i' % int(time.microsecond / 1000)
+
+ def _duration_results_text(test):
+ return [
+ 'Test: %s' % test['name'],
+ 'Flags: %s' % ' '.join(test['flags']),
+ 'Command: %s' % test['command'],
+ 'Duration: %s' % format_duration(test['duration']),
+ ]
+
+ assert os.path.exists(options.json_test_results)
+ complete_results = []
+ with open(options.json_test_results, "r") as f:
+ complete_results = json.loads(f.read())
+ output = complete_results[0]
+ lines = []
+ for test in output['slowest_tests']:
+ suffix = ''
+ if test.get('marked_slow') is False:
+ suffix = ' *'
+ lines.append(
+ '%s %s%s' % (format_duration(test['duration']),
+ test['name'], suffix))
+
+ # Slowest tests duration details.
+ lines.extend(['', 'Details:', ''])
+ for test in output['slowest_tests']:
+ lines.extend(_duration_results_text(test))
+ print("\n".join(lines))
+
def _create_predictable_filter(self):
if not self.build_config.predictable:
return None
diff --git a/deps/v8/tools/testrunner/testproc/progress.py b/deps/v8/tools/testrunner/testproc/progress.py
index 6957cdc423..98f08ac842 100644
--- a/deps/v8/tools/testrunner/testproc/progress.py
+++ b/deps/v8/tools/testrunner/testproc/progress.py
@@ -5,6 +5,7 @@
# for py2/py3 compatibility
from __future__ import print_function
+import datetime
import json
import os
import platform
@@ -13,7 +14,6 @@ import sys
import time
from . import base
-from ..local import junit_output
# Base dir of the build products for Release and Debug.
@@ -152,8 +152,11 @@ class VerboseProgressIndicator(SimpleProgressIndicator):
except:
pass
+ def _ensure_delay(self, delay):
+ return time.time() - self._last_printed_time > delay
+
def _on_heartbeat(self):
- if time.time() - self._last_printed_time > 30:
+ if self._ensure_delay(30):
# Print something every 30 seconds to not get killed by an output
# timeout.
self._print('Still working...')
@@ -170,6 +173,16 @@ class CIProgressIndicator(VerboseProgressIndicator):
if self.options.ci_test_completion:
with open(self.options.ci_test_completion, "a") as f:
f.write(self._message(test, result) + "\n")
+ self._output_feedback()
+
+ def _output_feedback(self):
+ """Reduced the verbosity leads to getting killed by an ouput timeout.
+ We ensure output every minute.
+ """
+ if self._ensure_delay(60):
+ dt = time.time()
+ st = datetime.datetime.fromtimestamp(dt).strftime('%Y-%m-%d %H:%M:%S')
+ self._print(st)
class DotsProgressIndicator(SimpleProgressIndicator):
@@ -303,45 +316,6 @@ class MonochromeProgressIndicator(CompactProgressIndicator):
print(("\r" + (" " * last_length) + "\r"), end='')
-class JUnitTestProgressIndicator(ProgressIndicator):
- def __init__(self, junitout, junittestsuite):
- super(JUnitTestProgressIndicator, self).__init__()
- self._requirement = base.DROP_PASS_STDOUT
-
- self.outputter = junit_output.JUnitTestOutput(junittestsuite)
- if junitout:
- self.outfile = open(junitout, "w")
- else:
- self.outfile = sys.stdout
-
- def _on_result_for(self, test, result):
- # TODO(majeski): Support for dummy/grouped results
- fail_text = ""
- output = result.output
- if result.has_unexpected_output:
- stdout = output.stdout.strip()
- if len(stdout):
- fail_text += "stdout:\n%s\n" % stdout
- stderr = output.stderr.strip()
- if len(stderr):
- fail_text += "stderr:\n%s\n" % stderr
- fail_text += "Command: %s" % result.cmd.to_string()
- if output.HasCrashed():
- fail_text += "exit code: %d\n--- CRASHED ---" % output.exit_code
- if output.HasTimedOut():
- fail_text += "--- TIMEOUT ---"
- self.outputter.HasRunTest(
- test_name=str(test),
- test_cmd=result.cmd.to_string(relative=True),
- test_duration=output.duration,
- test_failure=fail_text)
-
- def finished(self):
- self.outputter.FinishAndWrite(self.outfile)
- if self.outfile != sys.stdout:
- self.outfile.close()
-
-
class JsonTestProgressIndicator(ProgressIndicator):
def __init__(self, framework_name, json_test_results, arch, mode):
super(JsonTestProgressIndicator, self).__init__()
@@ -400,7 +374,7 @@ class JsonTestProgressIndicator(ProgressIndicator):
complete_results = []
if os.path.exists(self.json_test_results):
with open(self.json_test_results, "r") as f:
- # Buildbot might start out with an empty file.
+ # On bots we might start out with an empty file.
complete_results = json.loads(f.read() or "[]")
duration_mean = None
diff --git a/deps/v8/tools/torque/format-torque.py b/deps/v8/tools/torque/format-torque.py
index 2e04e659c1..3b90faa73c 100755
--- a/deps/v8/tools/torque/format-torque.py
+++ b/deps/v8/tools/torque/format-torque.py
@@ -33,7 +33,7 @@ def preprocess(input):
break;
input = re.sub(r'\bgenerates\s+\'([^\']+)\'\s*',
- r' _GeNeRaTeS00_/*\1@*/', input)
+ r'_GeNeRaTeS00_/*\1@*/', input)
input = re.sub(r'\bconstexpr\s+\'([^\']+)\'\s*',
r' _CoNsExP_/*\1@*/', input)
input = re.sub(r'\notherwise',
diff --git a/deps/v8/tools/ubsan/blacklist.txt b/deps/v8/tools/ubsan/blacklist.txt
index 0705adc0b4..ea4e79bf52 100644
--- a/deps/v8/tools/ubsan/blacklist.txt
+++ b/deps/v8/tools/ubsan/blacklist.txt
@@ -9,3 +9,9 @@ fun:*v8*internal*InvokeAccessorGetterCallback*
# Bug 8735: WeakCallbackInfo<void> vs. WeakCallbackInfo<T>.
fun:*v8*internal*GlobalHandles*PendingPhantomCallback*Invoke*
fun:*v8*internal*GlobalHandles*Node*PostGarbageCollectionProcessing*
+
+# Simulators casting C++ functions to a generic signature.
+fun:*v8*internal*UnsafeDirectApiCall*
+fun:*v8*internal*UnsafeDirectGetterCall*
+fun:*v8*internal*UnsafeGenericFunctionCall*
+fun:*v8*internal*UnsafeProfilingApiCall*
diff --git a/deps/v8/tools/unittests/run_tests_test.py b/deps/v8/tools/unittests/run_tests_test.py
index 93b10f5fd9..a40bb6756d 100755
--- a/deps/v8/tools/unittests/run_tests_test.py
+++ b/deps/v8/tools/unittests/run_tests_test.py
@@ -338,7 +338,7 @@ class SystemTest(unittest.TestCase):
basedir, dcheck_always_on=True, is_asan=True, is_cfi=True,
is_msan=True, is_tsan=True, is_ubsan_vptr=True, target_cpu='x86',
v8_enable_i18n_support=False, v8_target_cpu='x86',
- v8_use_snapshot=False, v8_enable_embedded_builtins=False,
+ v8_enable_embedded_builtins=False,
v8_enable_verify_csa=False, v8_enable_lite_mode=False,
v8_enable_pointer_compression=False)
result = run_tests(
@@ -355,7 +355,6 @@ class SystemTest(unittest.TestCase):
'dcheck_always_on\n'
'msan\n'
'no_i18n\n'
- 'no_snap\n'
'tsan\n'
'ubsan_vptr\n'
'>>> Running tests for ia32.release')
@@ -572,7 +571,7 @@ class SystemTest(unittest.TestCase):
variants.
"""
with temp_base() as basedir:
- override_build_config(basedir, v8_use_snapshot=False)
+ override_build_config(basedir, is_asan=True)
result = run_tests(
basedir,
'--mode=Release',
diff --git a/deps/v8/tools/unittests/testdata/testroot1/test/sweet/sweet.status b/deps/v8/tools/unittests/testdata/testroot1/test/sweet/sweet.status
index d823cfd231..a0bd517739 100644
--- a/deps/v8/tools/unittests/testdata/testroot1/test/sweet/sweet.status
+++ b/deps/v8/tools/unittests/testdata/testroot1/test/sweet/sweet.status
@@ -29,7 +29,7 @@
'regress/*': [CRASH],
}],
-['no_snap', {
+['asan', {
'bananas': [PASS, NO_VARIANTS],
'raspberries': [FAIL, NO_VARIANTS],
}],
diff --git a/deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json b/deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json
index 0192fd8ee3..0d2148bc21 100644
--- a/deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json
+++ b/deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json
@@ -17,7 +17,6 @@
"v8_enable_i18n_support": true,
"v8_enable_verify_predictable": false,
"v8_target_cpu": "x64",
- "v8_use_snapshot": true,
"v8_enable_embedded_builtins": false,
"v8_enable_verify_csa": false,
"v8_enable_lite_mode": false,
diff --git a/deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json b/deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json
index f19c310bf8..8066096781 100644
--- a/deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json
+++ b/deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json
@@ -17,7 +17,6 @@
"v8_enable_i18n_support": true,
"v8_enable_verify_predictable": false,
"v8_target_cpu": "x64",
- "v8_use_snapshot": true,
"v8_enable_embedded_builtins": false,
"v8_enable_verify_csa": false,
"v8_enable_lite_mode": false,
diff --git a/deps/v8/tools/v8heapconst.py b/deps/v8/tools/v8heapconst.py
index 53aaaf74df..102f5a0753 100644
--- a/deps/v8/tools/v8heapconst.py
+++ b/deps/v8/tools/v8heapconst.py
@@ -26,401 +26,402 @@ INSTANCE_TYPES = {
50: "UNCACHED_EXTERNAL_STRING_TYPE",
58: "UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE",
64: "SYMBOL_TYPE",
- 65: "HEAP_NUMBER_TYPE",
- 66: "BIGINT_TYPE",
+ 65: "BIG_INT_BASE_TYPE",
+ 66: "HEAP_NUMBER_TYPE",
67: "ODDBALL_TYPE",
- 68: "MAP_TYPE",
- 69: "CODE_TYPE",
+ 68: "SOURCE_TEXT_MODULE_TYPE",
+ 69: "SYNTHETIC_MODULE_TYPE",
70: "FOREIGN_TYPE",
- 71: "BYTE_ARRAY_TYPE",
- 72: "BYTECODE_ARRAY_TYPE",
- 73: "FREE_SPACE_TYPE",
- 74: "FIXED_DOUBLE_ARRAY_TYPE",
- 75: "FEEDBACK_METADATA_TYPE",
- 76: "FILLER_TYPE",
- 77: "ACCESS_CHECK_INFO_TYPE",
- 78: "ACCESSOR_INFO_TYPE",
- 79: "ACCESSOR_PAIR_TYPE",
- 80: "ALIASED_ARGUMENTS_ENTRY_TYPE",
- 81: "ALLOCATION_MEMENTO_TYPE",
- 82: "ARRAY_BOILERPLATE_DESCRIPTION_TYPE",
- 83: "ASM_WASM_DATA_TYPE",
- 84: "ASYNC_GENERATOR_REQUEST_TYPE",
- 85: "CLASS_POSITIONS_TYPE",
- 86: "DEBUG_INFO_TYPE",
- 87: "ENUM_CACHE_TYPE",
- 88: "FUNCTION_TEMPLATE_INFO_TYPE",
- 89: "FUNCTION_TEMPLATE_RARE_DATA_TYPE",
- 90: "INTERCEPTOR_INFO_TYPE",
- 91: "INTERPRETER_DATA_TYPE",
- 92: "OBJECT_TEMPLATE_INFO_TYPE",
- 93: "PROMISE_CAPABILITY_TYPE",
- 94: "PROMISE_REACTION_TYPE",
- 95: "PROTOTYPE_INFO_TYPE",
- 96: "SCRIPT_TYPE",
- 97: "SOURCE_POSITION_TABLE_WITH_FRAME_CACHE_TYPE",
- 98: "SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE",
- 99: "STACK_FRAME_INFO_TYPE",
- 100: "STACK_TRACE_FRAME_TYPE",
- 101: "TEMPLATE_OBJECT_DESCRIPTION_TYPE",
- 102: "TUPLE2_TYPE",
- 103: "TUPLE3_TYPE",
- 104: "WASM_CAPI_FUNCTION_DATA_TYPE",
- 105: "WASM_DEBUG_INFO_TYPE",
- 106: "WASM_EXCEPTION_TAG_TYPE",
- 107: "WASM_EXPORTED_FUNCTION_DATA_TYPE",
- 108: "WASM_INDIRECT_FUNCTION_TABLE_TYPE",
- 109: "WASM_JS_FUNCTION_DATA_TYPE",
- 110: "CALLABLE_TASK_TYPE",
- 111: "CALLBACK_TASK_TYPE",
- 112: "PROMISE_FULFILL_REACTION_JOB_TASK_TYPE",
- 113: "PROMISE_REJECT_REACTION_JOB_TASK_TYPE",
- 114: "PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE",
- 115: "INTERNAL_CLASS_TYPE",
- 116: "SMI_PAIR_TYPE",
- 117: "SMI_BOX_TYPE",
- 118: "SORT_STATE_TYPE",
- 119: "SOURCE_TEXT_MODULE_TYPE",
- 120: "SYNTHETIC_MODULE_TYPE",
- 121: "ALLOCATION_SITE_TYPE",
- 122: "EMBEDDER_DATA_ARRAY_TYPE",
- 123: "FIXED_ARRAY_TYPE",
- 124: "OBJECT_BOILERPLATE_DESCRIPTION_TYPE",
- 125: "CLOSURE_FEEDBACK_CELL_ARRAY_TYPE",
- 126: "HASH_TABLE_TYPE",
- 127: "ORDERED_HASH_MAP_TYPE",
- 128: "ORDERED_HASH_SET_TYPE",
- 129: "ORDERED_NAME_DICTIONARY_TYPE",
- 130: "NAME_DICTIONARY_TYPE",
- 131: "GLOBAL_DICTIONARY_TYPE",
- 132: "NUMBER_DICTIONARY_TYPE",
- 133: "SIMPLE_NUMBER_DICTIONARY_TYPE",
- 134: "STRING_TABLE_TYPE",
- 135: "EPHEMERON_HASH_TABLE_TYPE",
- 136: "SCOPE_INFO_TYPE",
- 137: "SCRIPT_CONTEXT_TABLE_TYPE",
- 138: "AWAIT_CONTEXT_TYPE",
- 139: "BLOCK_CONTEXT_TYPE",
- 140: "CATCH_CONTEXT_TYPE",
- 141: "DEBUG_EVALUATE_CONTEXT_TYPE",
- 142: "EVAL_CONTEXT_TYPE",
- 143: "FUNCTION_CONTEXT_TYPE",
- 144: "MODULE_CONTEXT_TYPE",
- 145: "NATIVE_CONTEXT_TYPE",
- 146: "SCRIPT_CONTEXT_TYPE",
- 147: "WITH_CONTEXT_TYPE",
- 148: "WEAK_FIXED_ARRAY_TYPE",
- 149: "TRANSITION_ARRAY_TYPE",
- 150: "CALL_HANDLER_INFO_TYPE",
- 151: "CELL_TYPE",
- 152: "CODE_DATA_CONTAINER_TYPE",
- 153: "DESCRIPTOR_ARRAY_TYPE",
- 154: "FEEDBACK_CELL_TYPE",
- 155: "FEEDBACK_VECTOR_TYPE",
- 156: "LOAD_HANDLER_TYPE",
- 157: "PREPARSE_DATA_TYPE",
- 158: "PROPERTY_ARRAY_TYPE",
- 159: "PROPERTY_CELL_TYPE",
- 160: "SHARED_FUNCTION_INFO_TYPE",
- 161: "SMALL_ORDERED_HASH_MAP_TYPE",
- 162: "SMALL_ORDERED_HASH_SET_TYPE",
- 163: "SMALL_ORDERED_NAME_DICTIONARY_TYPE",
- 164: "STORE_HANDLER_TYPE",
- 165: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE",
- 166: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE",
+ 71: "PROMISE_FULFILL_REACTION_JOB_TASK_TYPE",
+ 72: "PROMISE_REJECT_REACTION_JOB_TASK_TYPE",
+ 73: "CALLABLE_TASK_TYPE",
+ 74: "CALLBACK_TASK_TYPE",
+ 75: "PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE",
+ 76: "LOAD_HANDLER_TYPE",
+ 77: "STORE_HANDLER_TYPE",
+ 78: "FUNCTION_TEMPLATE_INFO_TYPE",
+ 79: "OBJECT_TEMPLATE_INFO_TYPE",
+ 80: "TUPLE2_TYPE",
+ 81: "TUPLE3_TYPE",
+ 82: "ACCESS_CHECK_INFO_TYPE",
+ 83: "ACCESSOR_INFO_TYPE",
+ 84: "ACCESSOR_PAIR_TYPE",
+ 85: "ALIASED_ARGUMENTS_ENTRY_TYPE",
+ 86: "ALLOCATION_MEMENTO_TYPE",
+ 87: "ALLOCATION_SITE_TYPE",
+ 88: "ARRAY_BOILERPLATE_DESCRIPTION_TYPE",
+ 89: "ASM_WASM_DATA_TYPE",
+ 90: "ASYNC_GENERATOR_REQUEST_TYPE",
+ 91: "CALL_HANDLER_INFO_TYPE",
+ 92: "CLASS_POSITIONS_TYPE",
+ 93: "DEBUG_INFO_TYPE",
+ 94: "ENUM_CACHE_TYPE",
+ 95: "FEEDBACK_CELL_TYPE",
+ 96: "FUNCTION_TEMPLATE_RARE_DATA_TYPE",
+ 97: "INTERCEPTOR_INFO_TYPE",
+ 98: "INTERNAL_CLASS_TYPE",
+ 99: "INTERPRETER_DATA_TYPE",
+ 100: "PROMISE_CAPABILITY_TYPE",
+ 101: "PROMISE_REACTION_TYPE",
+ 102: "PROTOTYPE_INFO_TYPE",
+ 103: "SCRIPT_TYPE",
+ 104: "SMI_BOX_TYPE",
+ 105: "SMI_PAIR_TYPE",
+ 106: "SORT_STATE_TYPE",
+ 107: "SOURCE_POSITION_TABLE_WITH_FRAME_CACHE_TYPE",
+ 108: "SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE",
+ 109: "STACK_FRAME_INFO_TYPE",
+ 110: "STACK_TRACE_FRAME_TYPE",
+ 111: "TEMPLATE_OBJECT_DESCRIPTION_TYPE",
+ 112: "WASM_CAPI_FUNCTION_DATA_TYPE",
+ 113: "WASM_DEBUG_INFO_TYPE",
+ 114: "WASM_EXCEPTION_TAG_TYPE",
+ 115: "WASM_EXPORTED_FUNCTION_DATA_TYPE",
+ 116: "WASM_INDIRECT_FUNCTION_TABLE_TYPE",
+ 117: "WASM_JS_FUNCTION_DATA_TYPE",
+ 118: "FIXED_ARRAY_TYPE",
+ 119: "HASH_TABLE_TYPE",
+ 120: "EPHEMERON_HASH_TABLE_TYPE",
+ 121: "GLOBAL_DICTIONARY_TYPE",
+ 122: "NAME_DICTIONARY_TYPE",
+ 123: "NUMBER_DICTIONARY_TYPE",
+ 124: "ORDERED_HASH_MAP_TYPE",
+ 125: "ORDERED_HASH_SET_TYPE",
+ 126: "ORDERED_NAME_DICTIONARY_TYPE",
+ 127: "SIMPLE_NUMBER_DICTIONARY_TYPE",
+ 128: "STRING_TABLE_TYPE",
+ 129: "CLOSURE_FEEDBACK_CELL_ARRAY_TYPE",
+ 130: "OBJECT_BOILERPLATE_DESCRIPTION_TYPE",
+ 131: "SCOPE_INFO_TYPE",
+ 132: "SCRIPT_CONTEXT_TABLE_TYPE",
+ 133: "BYTE_ARRAY_TYPE",
+ 134: "BYTECODE_ARRAY_TYPE",
+ 135: "FIXED_DOUBLE_ARRAY_TYPE",
+ 136: "AWAIT_CONTEXT_TYPE",
+ 137: "BLOCK_CONTEXT_TYPE",
+ 138: "CATCH_CONTEXT_TYPE",
+ 139: "DEBUG_EVALUATE_CONTEXT_TYPE",
+ 140: "EVAL_CONTEXT_TYPE",
+ 141: "FUNCTION_CONTEXT_TYPE",
+ 142: "MODULE_CONTEXT_TYPE",
+ 143: "NATIVE_CONTEXT_TYPE",
+ 144: "SCRIPT_CONTEXT_TYPE",
+ 145: "WITH_CONTEXT_TYPE",
+ 146: "SMALL_ORDERED_HASH_MAP_TYPE",
+ 147: "SMALL_ORDERED_HASH_SET_TYPE",
+ 148: "SMALL_ORDERED_NAME_DICTIONARY_TYPE",
+ 149: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE",
+ 150: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE",
+ 151: "WEAK_FIXED_ARRAY_TYPE",
+ 152: "TRANSITION_ARRAY_TYPE",
+ 153: "CELL_TYPE",
+ 154: "CODE_TYPE",
+ 155: "CODE_DATA_CONTAINER_TYPE",
+ 156: "DESCRIPTOR_ARRAY_TYPE",
+ 157: "EMBEDDER_DATA_ARRAY_TYPE",
+ 158: "FEEDBACK_METADATA_TYPE",
+ 159: "FEEDBACK_VECTOR_TYPE",
+ 160: "FILLER_TYPE",
+ 161: "FREE_SPACE_TYPE",
+ 162: "MAP_TYPE",
+ 163: "PREPARSE_DATA_TYPE",
+ 164: "PROPERTY_ARRAY_TYPE",
+ 165: "PROPERTY_CELL_TYPE",
+ 166: "SHARED_FUNCTION_INFO_TYPE",
167: "WEAK_ARRAY_LIST_TYPE",
168: "WEAK_CELL_TYPE",
- 1024: "JS_PROXY_TYPE",
- 1025: "JS_GLOBAL_OBJECT_TYPE",
- 1026: "JS_GLOBAL_PROXY_TYPE",
- 1027: "JS_MODULE_NAMESPACE_TYPE",
+ 169: "JS_PROXY_TYPE",
+ 1057: "JS_OBJECT_TYPE",
+ 170: "JS_GLOBAL_OBJECT_TYPE",
+ 171: "JS_GLOBAL_PROXY_TYPE",
+ 172: "JS_MODULE_NAMESPACE_TYPE",
1040: "JS_SPECIAL_API_OBJECT_TYPE",
1041: "JS_PRIMITIVE_WRAPPER_TYPE",
+ 1042: "JS_MAP_KEY_ITERATOR_TYPE",
+ 1043: "JS_MAP_KEY_VALUE_ITERATOR_TYPE",
+ 1044: "JS_MAP_VALUE_ITERATOR_TYPE",
+ 1045: "JS_SET_KEY_VALUE_ITERATOR_TYPE",
+ 1046: "JS_SET_VALUE_ITERATOR_TYPE",
+ 1047: "JS_GENERATOR_OBJECT_TYPE",
+ 1048: "JS_ASYNC_FUNCTION_OBJECT_TYPE",
+ 1049: "JS_ASYNC_GENERATOR_OBJECT_TYPE",
+ 1050: "JS_DATA_VIEW_TYPE",
+ 1051: "JS_TYPED_ARRAY_TYPE",
+ 1052: "JS_MAP_TYPE",
+ 1053: "JS_SET_TYPE",
+ 1054: "JS_WEAK_MAP_TYPE",
+ 1055: "JS_WEAK_SET_TYPE",
1056: "JS_API_OBJECT_TYPE",
- 1057: "JS_OBJECT_TYPE",
- 1058: "JS_ARGUMENTS_TYPE",
- 1059: "JS_ARRAY_BUFFER_TYPE",
- 1060: "JS_ARRAY_ITERATOR_TYPE",
- 1061: "JS_ARRAY_TYPE",
+ 1058: "JS_ARGUMENTS_OBJECT_TYPE",
+ 1059: "JS_ARRAY_TYPE",
+ 1060: "JS_ARRAY_BUFFER_TYPE",
+ 1061: "JS_ARRAY_ITERATOR_TYPE",
1062: "JS_ASYNC_FROM_SYNC_ITERATOR_TYPE",
- 1063: "JS_ASYNC_FUNCTION_OBJECT_TYPE",
- 1064: "JS_ASYNC_GENERATOR_OBJECT_TYPE",
- 1065: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
- 1066: "JS_DATE_TYPE",
+ 1063: "JS_COLLATOR_TYPE",
+ 1064: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
+ 1065: "JS_DATE_TYPE",
+ 1066: "JS_DATE_TIME_FORMAT_TYPE",
1067: "JS_ERROR_TYPE",
- 1068: "JS_GENERATOR_OBJECT_TYPE",
- 1069: "JS_MAP_TYPE",
- 1070: "JS_MAP_KEY_ITERATOR_TYPE",
- 1071: "JS_MAP_KEY_VALUE_ITERATOR_TYPE",
- 1072: "JS_MAP_VALUE_ITERATOR_TYPE",
- 1073: "JS_MESSAGE_OBJECT_TYPE",
- 1074: "JS_PROMISE_TYPE",
- 1075: "JS_REGEXP_TYPE",
- 1076: "JS_REGEXP_STRING_ITERATOR_TYPE",
- 1077: "JS_SET_TYPE",
- 1078: "JS_SET_KEY_VALUE_ITERATOR_TYPE",
- 1079: "JS_SET_VALUE_ITERATOR_TYPE",
- 1080: "JS_STRING_ITERATOR_TYPE",
- 1081: "JS_WEAK_REF_TYPE",
- 1082: "JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_TYPE",
- 1083: "JS_FINALIZATION_GROUP_TYPE",
- 1084: "JS_WEAK_MAP_TYPE",
- 1085: "JS_WEAK_SET_TYPE",
- 1086: "JS_TYPED_ARRAY_TYPE",
- 1087: "JS_DATA_VIEW_TYPE",
- 1088: "JS_INTL_V8_BREAK_ITERATOR_TYPE",
- 1089: "JS_INTL_COLLATOR_TYPE",
- 1090: "JS_INTL_DATE_TIME_FORMAT_TYPE",
- 1091: "JS_INTL_LIST_FORMAT_TYPE",
- 1092: "JS_INTL_LOCALE_TYPE",
- 1093: "JS_INTL_NUMBER_FORMAT_TYPE",
- 1094: "JS_INTL_PLURAL_RULES_TYPE",
- 1095: "JS_INTL_RELATIVE_TIME_FORMAT_TYPE",
- 1096: "JS_INTL_SEGMENT_ITERATOR_TYPE",
- 1097: "JS_INTL_SEGMENTER_TYPE",
- 1098: "WASM_EXCEPTION_TYPE",
- 1099: "WASM_GLOBAL_TYPE",
- 1100: "WASM_INSTANCE_TYPE",
- 1101: "WASM_MEMORY_TYPE",
- 1102: "WASM_MODULE_TYPE",
- 1103: "WASM_TABLE_TYPE",
- 1104: "JS_BOUND_FUNCTION_TYPE",
- 1105: "JS_FUNCTION_TYPE",
+ 1068: "JS_FINALIZATION_GROUP_TYPE",
+ 1069: "JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_TYPE",
+ 1070: "JS_LIST_FORMAT_TYPE",
+ 1071: "JS_LOCALE_TYPE",
+ 1072: "JS_MESSAGE_OBJECT_TYPE",
+ 1073: "JS_NUMBER_FORMAT_TYPE",
+ 1074: "JS_PLURAL_RULES_TYPE",
+ 1075: "JS_PROMISE_TYPE",
+ 1076: "JS_REG_EXP_TYPE",
+ 1077: "JS_REG_EXP_STRING_ITERATOR_TYPE",
+ 1078: "JS_RELATIVE_TIME_FORMAT_TYPE",
+ 1079: "JS_SEGMENT_ITERATOR_TYPE",
+ 1080: "JS_SEGMENTER_TYPE",
+ 1081: "JS_STRING_ITERATOR_TYPE",
+ 1082: "JS_V8_BREAK_ITERATOR_TYPE",
+ 1083: "JS_WEAK_REF_TYPE",
+ 1084: "WASM_EXCEPTION_OBJECT_TYPE",
+ 1085: "WASM_GLOBAL_OBJECT_TYPE",
+ 1086: "WASM_INSTANCE_OBJECT_TYPE",
+ 1087: "WASM_MEMORY_OBJECT_TYPE",
+ 1088: "WASM_MODULE_OBJECT_TYPE",
+ 1089: "WASM_TABLE_OBJECT_TYPE",
+ 1090: "JS_BOUND_FUNCTION_TYPE",
+ 1091: "JS_FUNCTION_TYPE",
}
# List of known V8 maps.
KNOWN_MAPS = {
- ("read_only_space", 0x00119): (73, "FreeSpaceMap"),
- ("read_only_space", 0x00169): (68, "MetaMap"),
- ("read_only_space", 0x001e9): (67, "NullMap"),
- ("read_only_space", 0x00251): (153, "DescriptorArrayMap"),
- ("read_only_space", 0x002b1): (148, "WeakFixedArrayMap"),
- ("read_only_space", 0x00301): (76, "OnePointerFillerMap"),
- ("read_only_space", 0x00351): (76, "TwoPointerFillerMap"),
- ("read_only_space", 0x003d1): (67, "UninitializedMap"),
- ("read_only_space", 0x00441): (8, "OneByteInternalizedStringMap"),
- ("read_only_space", 0x004e1): (67, "UndefinedMap"),
- ("read_only_space", 0x00541): (65, "HeapNumberMap"),
- ("read_only_space", 0x005c1): (67, "TheHoleMap"),
- ("read_only_space", 0x00669): (67, "BooleanMap"),
- ("read_only_space", 0x00741): (71, "ByteArrayMap"),
- ("read_only_space", 0x00791): (123, "FixedArrayMap"),
- ("read_only_space", 0x007e1): (123, "FixedCOWArrayMap"),
- ("read_only_space", 0x00831): (126, "HashTableMap"),
- ("read_only_space", 0x00881): (64, "SymbolMap"),
- ("read_only_space", 0x008d1): (40, "OneByteStringMap"),
- ("read_only_space", 0x00921): (136, "ScopeInfoMap"),
- ("read_only_space", 0x00971): (160, "SharedFunctionInfoMap"),
- ("read_only_space", 0x009c1): (69, "CodeMap"),
- ("read_only_space", 0x00a11): (143, "FunctionContextMap"),
- ("read_only_space", 0x00a61): (151, "CellMap"),
- ("read_only_space", 0x00ab1): (159, "GlobalPropertyCellMap"),
- ("read_only_space", 0x00b01): (70, "ForeignMap"),
- ("read_only_space", 0x00b51): (149, "TransitionArrayMap"),
- ("read_only_space", 0x00ba1): (155, "FeedbackVectorMap"),
- ("read_only_space", 0x00c41): (67, "ArgumentsMarkerMap"),
- ("read_only_space", 0x00ce1): (67, "ExceptionMap"),
- ("read_only_space", 0x00d81): (67, "TerminationExceptionMap"),
- ("read_only_space", 0x00e29): (67, "OptimizedOutMap"),
- ("read_only_space", 0x00ec9): (67, "StaleRegisterMap"),
- ("read_only_space", 0x00f39): (145, "NativeContextMap"),
- ("read_only_space", 0x00f89): (144, "ModuleContextMap"),
- ("read_only_space", 0x00fd9): (142, "EvalContextMap"),
- ("read_only_space", 0x01029): (146, "ScriptContextMap"),
- ("read_only_space", 0x01079): (138, "AwaitContextMap"),
- ("read_only_space", 0x010c9): (139, "BlockContextMap"),
- ("read_only_space", 0x01119): (140, "CatchContextMap"),
- ("read_only_space", 0x01169): (147, "WithContextMap"),
- ("read_only_space", 0x011b9): (141, "DebugEvaluateContextMap"),
- ("read_only_space", 0x01209): (137, "ScriptContextTableMap"),
- ("read_only_space", 0x01259): (125, "ClosureFeedbackCellArrayMap"),
- ("read_only_space", 0x012a9): (75, "FeedbackMetadataArrayMap"),
- ("read_only_space", 0x012f9): (123, "ArrayListMap"),
- ("read_only_space", 0x01349): (66, "BigIntMap"),
- ("read_only_space", 0x01399): (124, "ObjectBoilerplateDescriptionMap"),
- ("read_only_space", 0x013e9): (72, "BytecodeArrayMap"),
- ("read_only_space", 0x01439): (152, "CodeDataContainerMap"),
- ("read_only_space", 0x01489): (74, "FixedDoubleArrayMap"),
- ("read_only_space", 0x014d9): (131, "GlobalDictionaryMap"),
- ("read_only_space", 0x01529): (154, "ManyClosuresCellMap"),
- ("read_only_space", 0x01579): (123, "ModuleInfoMap"),
- ("read_only_space", 0x015c9): (130, "NameDictionaryMap"),
- ("read_only_space", 0x01619): (154, "NoClosuresCellMap"),
- ("read_only_space", 0x01669): (132, "NumberDictionaryMap"),
- ("read_only_space", 0x016b9): (154, "OneClosureCellMap"),
- ("read_only_space", 0x01709): (127, "OrderedHashMapMap"),
- ("read_only_space", 0x01759): (128, "OrderedHashSetMap"),
- ("read_only_space", 0x017a9): (129, "OrderedNameDictionaryMap"),
- ("read_only_space", 0x017f9): (157, "PreparseDataMap"),
- ("read_only_space", 0x01849): (158, "PropertyArrayMap"),
- ("read_only_space", 0x01899): (150, "SideEffectCallHandlerInfoMap"),
- ("read_only_space", 0x018e9): (150, "SideEffectFreeCallHandlerInfoMap"),
- ("read_only_space", 0x01939): (150, "NextCallSideEffectFreeCallHandlerInfoMap"),
- ("read_only_space", 0x01989): (133, "SimpleNumberDictionaryMap"),
- ("read_only_space", 0x019d9): (123, "SloppyArgumentsElementsMap"),
- ("read_only_space", 0x01a29): (161, "SmallOrderedHashMapMap"),
- ("read_only_space", 0x01a79): (162, "SmallOrderedHashSetMap"),
- ("read_only_space", 0x01ac9): (163, "SmallOrderedNameDictionaryMap"),
- ("read_only_space", 0x01b19): (119, "SourceTextModuleMap"),
- ("read_only_space", 0x01b69): (134, "StringTableMap"),
- ("read_only_space", 0x01bb9): (120, "SyntheticModuleMap"),
- ("read_only_space", 0x01c09): (165, "UncompiledDataWithoutPreparseDataMap"),
- ("read_only_space", 0x01c59): (166, "UncompiledDataWithPreparseDataMap"),
- ("read_only_space", 0x01ca9): (167, "WeakArrayListMap"),
- ("read_only_space", 0x01cf9): (135, "EphemeronHashTableMap"),
- ("read_only_space", 0x01d49): (122, "EmbedderDataArrayMap"),
- ("read_only_space", 0x01d99): (168, "WeakCellMap"),
- ("read_only_space", 0x01de9): (58, "NativeSourceStringMap"),
- ("read_only_space", 0x01e39): (32, "StringMap"),
- ("read_only_space", 0x01e89): (41, "ConsOneByteStringMap"),
- ("read_only_space", 0x01ed9): (33, "ConsStringMap"),
- ("read_only_space", 0x01f29): (45, "ThinOneByteStringMap"),
- ("read_only_space", 0x01f79): (37, "ThinStringMap"),
- ("read_only_space", 0x01fc9): (35, "SlicedStringMap"),
- ("read_only_space", 0x02019): (43, "SlicedOneByteStringMap"),
- ("read_only_space", 0x02069): (34, "ExternalStringMap"),
- ("read_only_space", 0x020b9): (42, "ExternalOneByteStringMap"),
- ("read_only_space", 0x02109): (50, "UncachedExternalStringMap"),
- ("read_only_space", 0x02159): (0, "InternalizedStringMap"),
- ("read_only_space", 0x021a9): (2, "ExternalInternalizedStringMap"),
- ("read_only_space", 0x021f9): (10, "ExternalOneByteInternalizedStringMap"),
- ("read_only_space", 0x02249): (18, "UncachedExternalInternalizedStringMap"),
- ("read_only_space", 0x02299): (26, "UncachedExternalOneByteInternalizedStringMap"),
- ("read_only_space", 0x022e9): (58, "UncachedExternalOneByteStringMap"),
- ("read_only_space", 0x02339): (67, "SelfReferenceMarkerMap"),
- ("read_only_space", 0x023a1): (87, "EnumCacheMap"),
- ("read_only_space", 0x02441): (82, "ArrayBoilerplateDescriptionMap"),
- ("read_only_space", 0x02631): (90, "InterceptorInfoMap"),
- ("read_only_space", 0x04eb1): (77, "AccessCheckInfoMap"),
- ("read_only_space", 0x04f01): (78, "AccessorInfoMap"),
- ("read_only_space", 0x04f51): (79, "AccessorPairMap"),
- ("read_only_space", 0x04fa1): (80, "AliasedArgumentsEntryMap"),
- ("read_only_space", 0x04ff1): (81, "AllocationMementoMap"),
- ("read_only_space", 0x05041): (83, "AsmWasmDataMap"),
- ("read_only_space", 0x05091): (84, "AsyncGeneratorRequestMap"),
- ("read_only_space", 0x050e1): (85, "ClassPositionsMap"),
- ("read_only_space", 0x05131): (86, "DebugInfoMap"),
- ("read_only_space", 0x05181): (88, "FunctionTemplateInfoMap"),
- ("read_only_space", 0x051d1): (89, "FunctionTemplateRareDataMap"),
- ("read_only_space", 0x05221): (91, "InterpreterDataMap"),
- ("read_only_space", 0x05271): (92, "ObjectTemplateInfoMap"),
- ("read_only_space", 0x052c1): (93, "PromiseCapabilityMap"),
- ("read_only_space", 0x05311): (94, "PromiseReactionMap"),
- ("read_only_space", 0x05361): (95, "PrototypeInfoMap"),
- ("read_only_space", 0x053b1): (96, "ScriptMap"),
- ("read_only_space", 0x05401): (97, "SourcePositionTableWithFrameCacheMap"),
- ("read_only_space", 0x05451): (98, "SourceTextModuleInfoEntryMap"),
- ("read_only_space", 0x054a1): (99, "StackFrameInfoMap"),
- ("read_only_space", 0x054f1): (100, "StackTraceFrameMap"),
- ("read_only_space", 0x05541): (101, "TemplateObjectDescriptionMap"),
- ("read_only_space", 0x05591): (102, "Tuple2Map"),
- ("read_only_space", 0x055e1): (103, "Tuple3Map"),
- ("read_only_space", 0x05631): (104, "WasmCapiFunctionDataMap"),
- ("read_only_space", 0x05681): (105, "WasmDebugInfoMap"),
- ("read_only_space", 0x056d1): (106, "WasmExceptionTagMap"),
- ("read_only_space", 0x05721): (107, "WasmExportedFunctionDataMap"),
- ("read_only_space", 0x05771): (108, "WasmIndirectFunctionTableMap"),
- ("read_only_space", 0x057c1): (109, "WasmJSFunctionDataMap"),
- ("read_only_space", 0x05811): (110, "CallableTaskMap"),
- ("read_only_space", 0x05861): (111, "CallbackTaskMap"),
- ("read_only_space", 0x058b1): (112, "PromiseFulfillReactionJobTaskMap"),
- ("read_only_space", 0x05901): (113, "PromiseRejectReactionJobTaskMap"),
- ("read_only_space", 0x05951): (114, "PromiseResolveThenableJobTaskMap"),
- ("read_only_space", 0x059a1): (115, "InternalClassMap"),
- ("read_only_space", 0x059f1): (116, "SmiPairMap"),
- ("read_only_space", 0x05a41): (117, "SmiBoxMap"),
- ("read_only_space", 0x05a91): (118, "SortStateMap"),
- ("read_only_space", 0x05ae1): (121, "AllocationSiteWithWeakNextMap"),
- ("read_only_space", 0x05b31): (121, "AllocationSiteWithoutWeakNextMap"),
- ("read_only_space", 0x05b81): (156, "LoadHandler1Map"),
- ("read_only_space", 0x05bd1): (156, "LoadHandler2Map"),
- ("read_only_space", 0x05c21): (156, "LoadHandler3Map"),
- ("read_only_space", 0x05c71): (164, "StoreHandler0Map"),
- ("read_only_space", 0x05cc1): (164, "StoreHandler1Map"),
- ("read_only_space", 0x05d11): (164, "StoreHandler2Map"),
- ("read_only_space", 0x05d61): (164, "StoreHandler3Map"),
- ("map_space", 0x00119): (1057, "ExternalMap"),
- ("map_space", 0x00169): (1073, "JSMessageObjectMap"),
+ ("read_only_space", 0x00121): (161, "FreeSpaceMap"),
+ ("read_only_space", 0x00171): (162, "MetaMap"),
+ ("read_only_space", 0x001f1): (67, "NullMap"),
+ ("read_only_space", 0x00259): (156, "DescriptorArrayMap"),
+ ("read_only_space", 0x002b9): (151, "WeakFixedArrayMap"),
+ ("read_only_space", 0x00309): (160, "OnePointerFillerMap"),
+ ("read_only_space", 0x00359): (160, "TwoPointerFillerMap"),
+ ("read_only_space", 0x003d9): (67, "UninitializedMap"),
+ ("read_only_space", 0x00449): (8, "OneByteInternalizedStringMap"),
+ ("read_only_space", 0x004e9): (67, "UndefinedMap"),
+ ("read_only_space", 0x00549): (66, "HeapNumberMap"),
+ ("read_only_space", 0x005c9): (67, "TheHoleMap"),
+ ("read_only_space", 0x00671): (67, "BooleanMap"),
+ ("read_only_space", 0x00749): (133, "ByteArrayMap"),
+ ("read_only_space", 0x00799): (118, "FixedArrayMap"),
+ ("read_only_space", 0x007e9): (118, "FixedCOWArrayMap"),
+ ("read_only_space", 0x00839): (119, "HashTableMap"),
+ ("read_only_space", 0x00889): (64, "SymbolMap"),
+ ("read_only_space", 0x008d9): (40, "OneByteStringMap"),
+ ("read_only_space", 0x00929): (131, "ScopeInfoMap"),
+ ("read_only_space", 0x00979): (166, "SharedFunctionInfoMap"),
+ ("read_only_space", 0x009c9): (154, "CodeMap"),
+ ("read_only_space", 0x00a19): (141, "FunctionContextMap"),
+ ("read_only_space", 0x00a69): (153, "CellMap"),
+ ("read_only_space", 0x00ab9): (165, "GlobalPropertyCellMap"),
+ ("read_only_space", 0x00b09): (70, "ForeignMap"),
+ ("read_only_space", 0x00b59): (152, "TransitionArrayMap"),
+ ("read_only_space", 0x00ba9): (159, "FeedbackVectorMap"),
+ ("read_only_space", 0x00c49): (67, "ArgumentsMarkerMap"),
+ ("read_only_space", 0x00ce9): (67, "ExceptionMap"),
+ ("read_only_space", 0x00d89): (67, "TerminationExceptionMap"),
+ ("read_only_space", 0x00e31): (67, "OptimizedOutMap"),
+ ("read_only_space", 0x00ed1): (67, "StaleRegisterMap"),
+ ("read_only_space", 0x00f41): (143, "NativeContextMap"),
+ ("read_only_space", 0x00f91): (142, "ModuleContextMap"),
+ ("read_only_space", 0x00fe1): (140, "EvalContextMap"),
+ ("read_only_space", 0x01031): (144, "ScriptContextMap"),
+ ("read_only_space", 0x01081): (136, "AwaitContextMap"),
+ ("read_only_space", 0x010d1): (137, "BlockContextMap"),
+ ("read_only_space", 0x01121): (138, "CatchContextMap"),
+ ("read_only_space", 0x01171): (145, "WithContextMap"),
+ ("read_only_space", 0x011c1): (139, "DebugEvaluateContextMap"),
+ ("read_only_space", 0x01211): (132, "ScriptContextTableMap"),
+ ("read_only_space", 0x01261): (129, "ClosureFeedbackCellArrayMap"),
+ ("read_only_space", 0x012b1): (158, "FeedbackMetadataArrayMap"),
+ ("read_only_space", 0x01301): (118, "ArrayListMap"),
+ ("read_only_space", 0x01351): (65, "BigIntMap"),
+ ("read_only_space", 0x013a1): (130, "ObjectBoilerplateDescriptionMap"),
+ ("read_only_space", 0x013f1): (134, "BytecodeArrayMap"),
+ ("read_only_space", 0x01441): (155, "CodeDataContainerMap"),
+ ("read_only_space", 0x01491): (135, "FixedDoubleArrayMap"),
+ ("read_only_space", 0x014e1): (121, "GlobalDictionaryMap"),
+ ("read_only_space", 0x01531): (95, "ManyClosuresCellMap"),
+ ("read_only_space", 0x01581): (118, "ModuleInfoMap"),
+ ("read_only_space", 0x015d1): (122, "NameDictionaryMap"),
+ ("read_only_space", 0x01621): (95, "NoClosuresCellMap"),
+ ("read_only_space", 0x01671): (123, "NumberDictionaryMap"),
+ ("read_only_space", 0x016c1): (95, "OneClosureCellMap"),
+ ("read_only_space", 0x01711): (124, "OrderedHashMapMap"),
+ ("read_only_space", 0x01761): (125, "OrderedHashSetMap"),
+ ("read_only_space", 0x017b1): (126, "OrderedNameDictionaryMap"),
+ ("read_only_space", 0x01801): (163, "PreparseDataMap"),
+ ("read_only_space", 0x01851): (164, "PropertyArrayMap"),
+ ("read_only_space", 0x018a1): (91, "SideEffectCallHandlerInfoMap"),
+ ("read_only_space", 0x018f1): (91, "SideEffectFreeCallHandlerInfoMap"),
+ ("read_only_space", 0x01941): (91, "NextCallSideEffectFreeCallHandlerInfoMap"),
+ ("read_only_space", 0x01991): (127, "SimpleNumberDictionaryMap"),
+ ("read_only_space", 0x019e1): (118, "SloppyArgumentsElementsMap"),
+ ("read_only_space", 0x01a31): (146, "SmallOrderedHashMapMap"),
+ ("read_only_space", 0x01a81): (147, "SmallOrderedHashSetMap"),
+ ("read_only_space", 0x01ad1): (148, "SmallOrderedNameDictionaryMap"),
+ ("read_only_space", 0x01b21): (68, "SourceTextModuleMap"),
+ ("read_only_space", 0x01b71): (128, "StringTableMap"),
+ ("read_only_space", 0x01bc1): (69, "SyntheticModuleMap"),
+ ("read_only_space", 0x01c11): (150, "UncompiledDataWithoutPreparseDataMap"),
+ ("read_only_space", 0x01c61): (149, "UncompiledDataWithPreparseDataMap"),
+ ("read_only_space", 0x01cb1): (167, "WeakArrayListMap"),
+ ("read_only_space", 0x01d01): (120, "EphemeronHashTableMap"),
+ ("read_only_space", 0x01d51): (157, "EmbedderDataArrayMap"),
+ ("read_only_space", 0x01da1): (168, "WeakCellMap"),
+ ("read_only_space", 0x01df1): (58, "NativeSourceStringMap"),
+ ("read_only_space", 0x01e41): (32, "StringMap"),
+ ("read_only_space", 0x01e91): (41, "ConsOneByteStringMap"),
+ ("read_only_space", 0x01ee1): (33, "ConsStringMap"),
+ ("read_only_space", 0x01f31): (45, "ThinOneByteStringMap"),
+ ("read_only_space", 0x01f81): (37, "ThinStringMap"),
+ ("read_only_space", 0x01fd1): (35, "SlicedStringMap"),
+ ("read_only_space", 0x02021): (43, "SlicedOneByteStringMap"),
+ ("read_only_space", 0x02071): (34, "ExternalStringMap"),
+ ("read_only_space", 0x020c1): (42, "ExternalOneByteStringMap"),
+ ("read_only_space", 0x02111): (50, "UncachedExternalStringMap"),
+ ("read_only_space", 0x02161): (0, "InternalizedStringMap"),
+ ("read_only_space", 0x021b1): (2, "ExternalInternalizedStringMap"),
+ ("read_only_space", 0x02201): (10, "ExternalOneByteInternalizedStringMap"),
+ ("read_only_space", 0x02251): (18, "UncachedExternalInternalizedStringMap"),
+ ("read_only_space", 0x022a1): (26, "UncachedExternalOneByteInternalizedStringMap"),
+ ("read_only_space", 0x022f1): (58, "UncachedExternalOneByteStringMap"),
+ ("read_only_space", 0x02341): (67, "SelfReferenceMarkerMap"),
+ ("read_only_space", 0x023a9): (94, "EnumCacheMap"),
+ ("read_only_space", 0x02449): (88, "ArrayBoilerplateDescriptionMap"),
+ ("read_only_space", 0x02639): (97, "InterceptorInfoMap"),
+ ("read_only_space", 0x04f79): (71, "PromiseFulfillReactionJobTaskMap"),
+ ("read_only_space", 0x04fc9): (72, "PromiseRejectReactionJobTaskMap"),
+ ("read_only_space", 0x05019): (73, "CallableTaskMap"),
+ ("read_only_space", 0x05069): (74, "CallbackTaskMap"),
+ ("read_only_space", 0x050b9): (75, "PromiseResolveThenableJobTaskMap"),
+ ("read_only_space", 0x05109): (78, "FunctionTemplateInfoMap"),
+ ("read_only_space", 0x05159): (79, "ObjectTemplateInfoMap"),
+ ("read_only_space", 0x051a9): (80, "Tuple2Map"),
+ ("read_only_space", 0x051f9): (81, "Tuple3Map"),
+ ("read_only_space", 0x05249): (82, "AccessCheckInfoMap"),
+ ("read_only_space", 0x05299): (83, "AccessorInfoMap"),
+ ("read_only_space", 0x052e9): (84, "AccessorPairMap"),
+ ("read_only_space", 0x05339): (85, "AliasedArgumentsEntryMap"),
+ ("read_only_space", 0x05389): (86, "AllocationMementoMap"),
+ ("read_only_space", 0x053d9): (89, "AsmWasmDataMap"),
+ ("read_only_space", 0x05429): (90, "AsyncGeneratorRequestMap"),
+ ("read_only_space", 0x05479): (92, "ClassPositionsMap"),
+ ("read_only_space", 0x054c9): (93, "DebugInfoMap"),
+ ("read_only_space", 0x05519): (96, "FunctionTemplateRareDataMap"),
+ ("read_only_space", 0x05569): (99, "InterpreterDataMap"),
+ ("read_only_space", 0x055b9): (100, "PromiseCapabilityMap"),
+ ("read_only_space", 0x05609): (101, "PromiseReactionMap"),
+ ("read_only_space", 0x05659): (102, "PrototypeInfoMap"),
+ ("read_only_space", 0x056a9): (103, "ScriptMap"),
+ ("read_only_space", 0x056f9): (107, "SourcePositionTableWithFrameCacheMap"),
+ ("read_only_space", 0x05749): (108, "SourceTextModuleInfoEntryMap"),
+ ("read_only_space", 0x05799): (109, "StackFrameInfoMap"),
+ ("read_only_space", 0x057e9): (110, "StackTraceFrameMap"),
+ ("read_only_space", 0x05839): (111, "TemplateObjectDescriptionMap"),
+ ("read_only_space", 0x05889): (112, "WasmCapiFunctionDataMap"),
+ ("read_only_space", 0x058d9): (113, "WasmDebugInfoMap"),
+ ("read_only_space", 0x05929): (114, "WasmExceptionTagMap"),
+ ("read_only_space", 0x05979): (115, "WasmExportedFunctionDataMap"),
+ ("read_only_space", 0x059c9): (116, "WasmIndirectFunctionTableMap"),
+ ("read_only_space", 0x05a19): (117, "WasmJSFunctionDataMap"),
+ ("read_only_space", 0x05a69): (98, "InternalClassMap"),
+ ("read_only_space", 0x05ab9): (105, "SmiPairMap"),
+ ("read_only_space", 0x05b09): (104, "SmiBoxMap"),
+ ("read_only_space", 0x05b59): (106, "SortStateMap"),
+ ("read_only_space", 0x05ba9): (87, "AllocationSiteWithWeakNextMap"),
+ ("read_only_space", 0x05bf9): (87, "AllocationSiteWithoutWeakNextMap"),
+ ("read_only_space", 0x05c49): (76, "LoadHandler1Map"),
+ ("read_only_space", 0x05c99): (76, "LoadHandler2Map"),
+ ("read_only_space", 0x05ce9): (76, "LoadHandler3Map"),
+ ("read_only_space", 0x05d39): (77, "StoreHandler0Map"),
+ ("read_only_space", 0x05d89): (77, "StoreHandler1Map"),
+ ("read_only_space", 0x05dd9): (77, "StoreHandler2Map"),
+ ("read_only_space", 0x05e29): (77, "StoreHandler3Map"),
+ ("map_space", 0x00121): (1057, "ExternalMap"),
+ ("map_space", 0x00171): (1072, "JSMessageObjectMap"),
}
# List of known V8 objects.
KNOWN_OBJECTS = {
- ("read_only_space", 0x001b9): "NullValue",
- ("read_only_space", 0x00239): "EmptyDescriptorArray",
- ("read_only_space", 0x002a1): "EmptyWeakFixedArray",
- ("read_only_space", 0x003a1): "UninitializedValue",
- ("read_only_space", 0x004b1): "UndefinedValue",
- ("read_only_space", 0x00531): "NanValue",
- ("read_only_space", 0x00591): "TheHoleValue",
- ("read_only_space", 0x00629): "HoleNanValue",
- ("read_only_space", 0x00639): "TrueValue",
- ("read_only_space", 0x006e9): "FalseValue",
- ("read_only_space", 0x00731): "empty_string",
- ("read_only_space", 0x00bf1): "EmptyScopeInfo",
- ("read_only_space", 0x00c01): "EmptyFixedArray",
- ("read_only_space", 0x00c11): "ArgumentsMarker",
- ("read_only_space", 0x00cb1): "Exception",
- ("read_only_space", 0x00d51): "TerminationException",
- ("read_only_space", 0x00df9): "OptimizedOut",
- ("read_only_space", 0x00e99): "StaleRegister",
- ("read_only_space", 0x02389): "EmptyEnumCache",
- ("read_only_space", 0x023f1): "EmptyPropertyArray",
- ("read_only_space", 0x02401): "EmptyByteArray",
- ("read_only_space", 0x02411): "EmptyObjectBoilerplateDescription",
- ("read_only_space", 0x02429): "EmptyArrayBoilerplateDescription",
- ("read_only_space", 0x02491): "EmptyClosureFeedbackCellArray",
- ("read_only_space", 0x024a1): "EmptySloppyArgumentsElements",
- ("read_only_space", 0x024c1): "EmptySlowElementDictionary",
- ("read_only_space", 0x02509): "EmptyOrderedHashMap",
- ("read_only_space", 0x02531): "EmptyOrderedHashSet",
- ("read_only_space", 0x02559): "EmptyFeedbackMetadata",
- ("read_only_space", 0x02569): "EmptyPropertyCell",
- ("read_only_space", 0x02591): "EmptyPropertyDictionary",
- ("read_only_space", 0x025e1): "NoOpInterceptorInfo",
- ("read_only_space", 0x02681): "EmptyWeakArrayList",
- ("read_only_space", 0x02699): "InfinityValue",
- ("read_only_space", 0x026a9): "MinusZeroValue",
- ("read_only_space", 0x026b9): "MinusInfinityValue",
- ("read_only_space", 0x026c9): "SelfReferenceMarker",
- ("read_only_space", 0x02721): "OffHeapTrampolineRelocationInfo",
- ("read_only_space", 0x02739): "TrampolineTrivialCodeDataContainer",
- ("read_only_space", 0x02751): "TrampolinePromiseRejectionCodeDataContainer",
- ("read_only_space", 0x02769): "GlobalThisBindingScopeInfo",
- ("read_only_space", 0x027d1): "EmptyFunctionScopeInfo",
- ("read_only_space", 0x02821): "HashSeed",
- ("old_space", 0x00119): "ArgumentsIteratorAccessor",
- ("old_space", 0x00189): "ArrayLengthAccessor",
- ("old_space", 0x001f9): "BoundFunctionLengthAccessor",
- ("old_space", 0x00269): "BoundFunctionNameAccessor",
- ("old_space", 0x002d9): "ErrorStackAccessor",
- ("old_space", 0x00349): "FunctionArgumentsAccessor",
- ("old_space", 0x003b9): "FunctionCallerAccessor",
- ("old_space", 0x00429): "FunctionNameAccessor",
- ("old_space", 0x00499): "FunctionLengthAccessor",
- ("old_space", 0x00509): "FunctionPrototypeAccessor",
- ("old_space", 0x00579): "StringLengthAccessor",
- ("old_space", 0x005e9): "InvalidPrototypeValidityCell",
- ("old_space", 0x005f9): "EmptyScript",
- ("old_space", 0x00679): "ManyClosuresCell",
- ("old_space", 0x00691): "ArrayConstructorProtector",
- ("old_space", 0x006a1): "NoElementsProtector",
- ("old_space", 0x006c9): "IsConcatSpreadableProtector",
- ("old_space", 0x006d9): "ArraySpeciesProtector",
- ("old_space", 0x00701): "TypedArraySpeciesProtector",
- ("old_space", 0x00729): "PromiseSpeciesProtector",
- ("old_space", 0x00751): "StringLengthProtector",
- ("old_space", 0x00761): "ArrayIteratorProtector",
- ("old_space", 0x00789): "ArrayBufferDetachingProtector",
- ("old_space", 0x007b1): "PromiseHookProtector",
- ("old_space", 0x007d9): "PromiseResolveProtector",
- ("old_space", 0x007e9): "MapIteratorProtector",
- ("old_space", 0x00811): "PromiseThenProtector",
- ("old_space", 0x00839): "SetIteratorProtector",
- ("old_space", 0x00861): "StringIteratorProtector",
- ("old_space", 0x00889): "SingleCharacterStringCache",
- ("old_space", 0x01099): "StringSplitCache",
- ("old_space", 0x018a9): "RegExpMultipleCache",
- ("old_space", 0x020b9): "BuiltinsConstantsTable",
+ ("read_only_space", 0x001c1): "NullValue",
+ ("read_only_space", 0x00241): "EmptyDescriptorArray",
+ ("read_only_space", 0x002a9): "EmptyWeakFixedArray",
+ ("read_only_space", 0x003a9): "UninitializedValue",
+ ("read_only_space", 0x004b9): "UndefinedValue",
+ ("read_only_space", 0x00539): "NanValue",
+ ("read_only_space", 0x00599): "TheHoleValue",
+ ("read_only_space", 0x00631): "HoleNanValue",
+ ("read_only_space", 0x00641): "TrueValue",
+ ("read_only_space", 0x006f1): "FalseValue",
+ ("read_only_space", 0x00739): "empty_string",
+ ("read_only_space", 0x00bf9): "EmptyScopeInfo",
+ ("read_only_space", 0x00c09): "EmptyFixedArray",
+ ("read_only_space", 0x00c19): "ArgumentsMarker",
+ ("read_only_space", 0x00cb9): "Exception",
+ ("read_only_space", 0x00d59): "TerminationException",
+ ("read_only_space", 0x00e01): "OptimizedOut",
+ ("read_only_space", 0x00ea1): "StaleRegister",
+ ("read_only_space", 0x02391): "EmptyEnumCache",
+ ("read_only_space", 0x023f9): "EmptyPropertyArray",
+ ("read_only_space", 0x02409): "EmptyByteArray",
+ ("read_only_space", 0x02419): "EmptyObjectBoilerplateDescription",
+ ("read_only_space", 0x02431): "EmptyArrayBoilerplateDescription",
+ ("read_only_space", 0x02499): "EmptyClosureFeedbackCellArray",
+ ("read_only_space", 0x024a9): "EmptySloppyArgumentsElements",
+ ("read_only_space", 0x024c9): "EmptySlowElementDictionary",
+ ("read_only_space", 0x02511): "EmptyOrderedHashMap",
+ ("read_only_space", 0x02539): "EmptyOrderedHashSet",
+ ("read_only_space", 0x02561): "EmptyFeedbackMetadata",
+ ("read_only_space", 0x02571): "EmptyPropertyCell",
+ ("read_only_space", 0x02599): "EmptyPropertyDictionary",
+ ("read_only_space", 0x025e9): "NoOpInterceptorInfo",
+ ("read_only_space", 0x02689): "EmptyWeakArrayList",
+ ("read_only_space", 0x026a1): "InfinityValue",
+ ("read_only_space", 0x026b1): "MinusZeroValue",
+ ("read_only_space", 0x026c1): "MinusInfinityValue",
+ ("read_only_space", 0x026d1): "SelfReferenceMarker",
+ ("read_only_space", 0x02729): "OffHeapTrampolineRelocationInfo",
+ ("read_only_space", 0x02741): "TrampolineTrivialCodeDataContainer",
+ ("read_only_space", 0x02759): "TrampolinePromiseRejectionCodeDataContainer",
+ ("read_only_space", 0x02771): "GlobalThisBindingScopeInfo",
+ ("read_only_space", 0x027d9): "EmptyFunctionScopeInfo",
+ ("read_only_space", 0x02829): "HashSeed",
+ ("old_space", 0x00121): "ArgumentsIteratorAccessor",
+ ("old_space", 0x00191): "ArrayLengthAccessor",
+ ("old_space", 0x00201): "BoundFunctionLengthAccessor",
+ ("old_space", 0x00271): "BoundFunctionNameAccessor",
+ ("old_space", 0x002e1): "ErrorStackAccessor",
+ ("old_space", 0x00351): "FunctionArgumentsAccessor",
+ ("old_space", 0x003c1): "FunctionCallerAccessor",
+ ("old_space", 0x00431): "FunctionNameAccessor",
+ ("old_space", 0x004a1): "FunctionLengthAccessor",
+ ("old_space", 0x00511): "FunctionPrototypeAccessor",
+ ("old_space", 0x00581): "RegExpResultIndicesAccessor",
+ ("old_space", 0x005f1): "StringLengthAccessor",
+ ("old_space", 0x00661): "InvalidPrototypeValidityCell",
+ ("old_space", 0x00671): "EmptyScript",
+ ("old_space", 0x006f1): "ManyClosuresCell",
+ ("old_space", 0x00709): "ArrayConstructorProtector",
+ ("old_space", 0x00731): "NoElementsProtector",
+ ("old_space", 0x00759): "IsConcatSpreadableProtector",
+ ("old_space", 0x00781): "ArraySpeciesProtector",
+ ("old_space", 0x007a9): "TypedArraySpeciesProtector",
+ ("old_space", 0x007d1): "PromiseSpeciesProtector",
+ ("old_space", 0x007f9): "StringLengthProtector",
+ ("old_space", 0x00821): "ArrayIteratorProtector",
+ ("old_space", 0x00849): "ArrayBufferDetachingProtector",
+ ("old_space", 0x00871): "PromiseHookProtector",
+ ("old_space", 0x00899): "PromiseResolveProtector",
+ ("old_space", 0x008c1): "MapIteratorProtector",
+ ("old_space", 0x008e9): "PromiseThenProtector",
+ ("old_space", 0x00911): "SetIteratorProtector",
+ ("old_space", 0x00939): "StringIteratorProtector",
+ ("old_space", 0x00961): "SingleCharacterStringCache",
+ ("old_space", 0x01171): "StringSplitCache",
+ ("old_space", 0x01981): "RegExpMultipleCache",
+ ("old_space", 0x02191): "BuiltinsConstantsTable",
}
# List of known V8 Frame Markers.
diff --git a/deps/v8/tools/wasm-compilation-hints/OWNERS b/deps/v8/tools/wasm-compilation-hints/OWNERS
index 4c00a60a00..89abec33f3 100644
--- a/deps/v8/tools/wasm-compilation-hints/OWNERS
+++ b/deps/v8/tools/wasm-compilation-hints/OWNERS
@@ -1,2 +1,2 @@
-clemensh@chromium.org
+clemensb@chromium.org
mstarzinger@chromium.org
diff --git a/deps/v8/tools/wasm/update-wasm-spec-tests.sh b/deps/v8/tools/wasm/update-wasm-spec-tests.sh
index 01688648eb..b3e9185c4d 100755
--- a/deps/v8/tools/wasm/update-wasm-spec-tests.sh
+++ b/deps/v8/tools/wasm/update-wasm-spec-tests.sh
@@ -71,7 +71,7 @@ log_and_run cp -r ${TMP_DIR}/spec/test/js-api/* ${JS_API_TEST_DIR}/tests
# Generate the proposal tests.
###############################################################################
-repos='bulk-memory-operations reference-types js-types'
+repos='bulk-memory-operations reference-types js-types JS-BigInt-integration'
for repo in ${repos}; do
echo "Process ${repo}"
diff --git a/deps/v8/tools/whitespace.txt b/deps/v8/tools/whitespace.txt
index 1540f5f52a..c9a3400c49 100644
--- a/deps/v8/tools/whitespace.txt
+++ b/deps/v8/tools/whitespace.txt
@@ -7,6 +7,6 @@ A Smi balks into a war and says:
The doubles heard this and started to unbox.
The Smi looked at them when a crazy v8-autoroll account showed up...
The autoroller bought a round of Himbeerbrause. Suddenly.....
-The bartender starts to shake the bottles..............
+The bartender starts to shake the bottles..................
I can't add trailing whitespaces, so I'm adding this line.
I'm starting to think that just adding trailing whitespaces might not be bad.
diff --git a/deps/v8/tools/windbg.js b/deps/v8/tools/windbg.js
index 91877b4c61..bcf45a496b 100644
--- a/deps/v8/tools/windbg.js
+++ b/deps/v8/tools/windbg.js
@@ -20,9 +20,6 @@ function help() {
print(" e.g. !jlh(\"key\") or !jlh(\"this->receiver_\")");
print(" !job(address_or_taggedint)");
print(" prints object at the address, e.g. !job(0x235cb869f9)");
- print(" !jobs(start_address, count)");
- print(" prints 'count' objects from a continuous range of Object");
- print(" pointers, e.g. !jobs(0x5f7270, 42)");
print(" !jst() or !jst");
print(" prints javascript stack (output goes into the console)");
print(" !jsbp() or !jsbp");
@@ -53,6 +50,11 @@ function help() {
print(" !where(address)");
print(" prints name of the space and address of the MemoryChunk the");
print(" 'address' is from, e.g. !where(0x235cb869f9)");
+ print(" !rs(chunk_address, set_id = 0)");
+ print(" prints slots from the remembered set in the MemoryChunk. If");
+ print(" 'chunk_address' isn't specified, prints for all chunks in the");
+ print(" old space; 'set_id' should match RememberedSetType enum,");
+ print(" e.g. !rs, !rs 0x2fb14780000, !rs(0x2fb14780000, 1)");
print("");
print("--------------------------------------------------------------------");
@@ -114,14 +116,6 @@ function print(s) {
host.diagnostics.debugLog(s + "\n");
}
-function print_filtered(obj, filter) {
- for (let line of obj) {
- if (!filter || line.indexOf(filter) != -1) {
- print(line);
- }
- }
-}
-
function inspect(s) {
for (let k of Reflect.ownKeys(s)) {
// Attempting to print either of:
@@ -140,10 +134,23 @@ function hex(number) {
/*=============================================================================
Utils (postmortem and live)
=============================================================================*/
-// WinDbg wraps large integers into objects that fail isInteger test (and,
-// consequently fail isSafeInteger test even if the original value was a safe
-// integer). I cannot figure out how to extract the original value from the
-// wrapper object so doing it via conversion to a string. Brrr. Ugly.
+// WinDbg wraps large integers (0x80000000+) into an object of library type that
+// fails isInteger test (and, consequently fail isSafeInteger test even if the
+// original value was a safe integer).
+// However, that library type does have a set of methods on it which you can use
+// to force conversion:
+// .asNumber() / .valueOf(): Performs conversion to JavaScript number.
+// Throws if the ordinal part of the 64-bit number does not pack into JavaScript
+// number without loss of precision.
+// .convertToNumber(): Performs conversion to JavaScript number.
+// Does NOT throw if the ordinal part of the 64-bit number does not pack into
+// JavaScript number. This will simply result in loss of precision.
+// The library will also add these methods to the prototype for the standard
+// number prototype. Meaning you can always .asNumber() / .convertToNumber() to
+// get either JavaScript number or the private Int64 type into a JavaScript
+// number.
+// We could use the conversion functions but it seems that doing the conversion
+// via toString is just as good and slightly more generic...
function int(val) {
if (typeof val === 'number') {
return Number.isInteger(val) ? val : undefined;
@@ -192,6 +199,26 @@ function get_register(name) {
.Registers.User[name];
}
+// JS doesn't do bitwise operations on large integers, so let's do it ourselves
+// using hex string representation.
+function bitwise_and(l, r) {
+ l = hex(l);
+ let l_length = l.length;
+ r = hex(r);
+ let r_length = r.length;
+ let res = "";
+ let length = Math.min(l_length, r_length) - 2; // to account for "0x"
+ for (let i = 1; i <= length; i++) {
+ res = (parseInt(l[l_length - i], 16) & parseInt(r[r_length - i], 16))
+ .toString(16) + res;
+ }
+ return parseInt(res, 16);
+}
+
+
+/*=============================================================================
+ Script setup
+=============================================================================*/
// In debug builds v8 code is compiled into v8.dll, and in release builds
// the code is compiled directly into the executable. If you are debugging some
// other embedder, run !set_module and provide the module name to use.
@@ -209,9 +236,18 @@ function module_name(use_this_module) {
return m.Name.indexOf("\\v8.dll") !== -1;
});
- if (v8) {
+ let v8_test = host.namespace.Debugger.State.DebuggerVariables.curprocess
+ .Modules.Where(
+ function(m) {
+ return m.Name.indexOf("\\v8_for_testing.dll") !== -1;
+ });
+
+ if (v8.Count() > 0) {
module_name_cache = "v8";
}
+ else if (v8_test.Count() > 0) {
+ module_name_cache = "v8_for_testing";
+ }
else {
for (let exe_name in known_exes) {
let exe = host.namespace.Debugger.State.DebuggerVariables.curprocess
@@ -219,7 +255,7 @@ function module_name(use_this_module) {
function(m) {
return m.Name.indexOf(`\\${exe_name}.exe`) !== -1;
});
- if (exe) {
+ if (exe.Count() > 0) {
module_name_cache = exe_name;
break;
}
@@ -234,6 +270,25 @@ function module_name(use_this_module) {
return module_name_cache;
};
+let using_ptr_compr = false;
+let isolate_address = 0;
+function set_isolate_address(addr, ptr_compr) {
+ isolate_address = addr;
+
+ if (typeof ptr_compr === 'undefined') {
+ ptr_compr = (bitwise_and(isolate_address, 0xffffffff) == 0);
+ }
+ using_ptr_compr = ptr_compr;
+
+ if (using_ptr_compr) {
+ print("The target is using pointer compression.");
+ }
+}
+
+
+/*=============================================================================
+ Wrappers around V8's printing functions and other utils for live-debugging
+=============================================================================*/
function make_call(fn) {
if (!supports_call_command()) {
print("ERROR: This command is supported in live sessions only!");
@@ -249,16 +304,8 @@ function make_call(fn) {
return output;
}
-
-/*=============================================================================
- Wrappers around V8's printing functions and other utils for live-debugging
-=============================================================================*/
-
-/*-----------------------------------------------------------------------------
- 'address' should be an int (so in hex must include '0x' prefix).
------------------------------------------------------------------------------*/
function print_object(address) {
- let output = make_call(`_v8_internal_Print_Object(${address})`);
+ let output = make_call(`_v8_internal_Print_Object(${decomp(address)})`);
// skip the first few lines with meta info of .call command
let skip_line = true;
@@ -273,43 +320,13 @@ function print_object(address) {
}
}
-/*-----------------------------------------------------------------------------
- 'handle_to_object' should be a name of a Handle which can be a local
- variable or it can be a member variable like "this->receiver_".
------------------------------------------------------------------------------*/
function print_object_from_handle(handle_to_object) {
let handle = host.evaluateExpression(handle_to_object);
let location = handle.location_;
- let pobj = poi(location.address);
+ let pobj = poi(location.address); // handles use uncompressed pointers
print_object(pobj);
}
-/*-----------------------------------------------------------------------------
- 'start_address' should be an int (so in hex must include '0x' prefix), it can
- point at any continuous memory that contains Object pointers.
------------------------------------------------------------------------------*/
-function print_objects_array(start_address, count) {
- const ptr_size = pointer_size();
- let ctl = host.namespace.Debugger.Utility.Control;
- let addr_int = start_address;
- for (let i = 0; i < count; i++) {
- const addr_hex = hex(addr_int);
-
- // TODO: Tried using createPointerObject but it throws unknown exception
- // from ChakraCore. Why?
- //let obj = host.createPointerObject(addr_hex, module, "void*");
-
- let output = ctl.ExecuteCommand(`dp ${addr_hex} l1`);
- let item = "";
- for (item of output) {} // 005f7270 34604101
- let deref = `0x${item.split(" ").pop()}`;
- print(`${addr_hex} -> ${deref}`);
- print_object(deref);
-
- addr_int += ptr_size;
- }
-}
-
function print_js_stack() {
make_call("_v8_internal_Print_StackTrace()");
}
@@ -323,21 +340,47 @@ function set_user_js_bp() {
/*=============================================================================
Managed heap related functions (live and post-mortem debugging)
=============================================================================*/
-let isolate_address = 0;
-function set_isolate_address(addr) {
- isolate_address = addr;
+/*-----------------------------------------------------------------------------
+ Pointer compression
+-----------------------------------------------------------------------------*/
+function tagged_size() {
+ return using_ptr_compr ? 4 : pointer_size();
}
+function get_compressed_ptr_base() {
+ if (!using_ptr_compr) return 0;
+
+ return isolate_address;
+}
+
+function decomp(value) {
+ if (value > 0xffffffff) return value;
+ return get_compressed_ptr_base() + value;
+}
+
+// Adjust for possible pointer compression ('address' is assumed to be on the
+// managed heap).
+function poim(address) {
+ try {
+ // readMemoryValues throws if cannot read from 'address'.
+ return host.memory.readMemoryValues(decomp(address), 1, tagged_size())[0];
+ }
+ catch (e){}
+}
+
+/*-----------------------------------------------------------------------------
+ Exploring objects
+-----------------------------------------------------------------------------*/
function is_map(addr) {
let address = int(addr);
if (!Number.isSafeInteger(address) || address % 2 == 0) return false;
// the first field in all objects, including maps, is a map pointer, but for
// maps the pointer is always the same - the meta map that points to itself.
- const map_addr = int(poi(address - 1));
+ const map_addr = int(poim(address - 1));
if (!Number.isSafeInteger(map_addr)) return false;
- const map_map_addr = int(poi(map_addr - 1));
+ const map_map_addr = int(poim(map_addr - 1));
if (!Number.isSafeInteger(map_map_addr)) return false;
return (map_addr === map_map_addr);
@@ -348,12 +391,12 @@ function is_likely_object(addr) {
if (!Number.isSafeInteger(address) || address % 2 == 0) return false;
// the first field in all objects must be a map pointer
- return is_map(poi(address - 1));
+ return is_map(poim(address - 1));
}
function find_object_near(aligned_addr, max_distance, step_op) {
if (!step_op) {
- const step = pointer_size();
+ const step = tagged_size();
const prev =
find_object_near(aligned_addr, max_distance, x => x - step);
const next =
@@ -364,14 +407,14 @@ function find_object_near(aligned_addr, max_distance, step_op) {
return (addr - prev <= next - addr) ? prev : next;
}
- let maybe_map_addr = poi(aligned_addr);
+ let maybe_map_addr = poim(aligned_addr);
let iters = 0;
while (maybe_map_addr && iters < max_distance) {
if (is_map(maybe_map_addr)) {
return aligned_addr;
}
aligned_addr = step_op(aligned_addr);
- maybe_map_addr = poi(aligned_addr);
+ maybe_map_addr = poim(aligned_addr);
iters++;
}
}
@@ -379,7 +422,7 @@ function find_object_near(aligned_addr, max_distance, step_op) {
function find_object_prev(addr, max_distance) {
if (!Number.isSafeInteger(int(addr))) return;
- const ptr_size = pointer_size();
+ const ptr_size = tagged_size();
const aligned_addr = addr - (addr % ptr_size);
return find_object_near(aligned_addr, max_distance, x => x - ptr_size);
}
@@ -387,7 +430,7 @@ function find_object_prev(addr, max_distance) {
function find_object_next(addr, max_distance) {
if (!Number.isSafeInteger(int(addr))) return;
- const ptr_size = pointer_size();
+ const ptr_size = tagged_size();
const aligned_addr = addr - (addr % ptr_size) + ptr_size;
return find_object_near(aligned_addr, max_distance, x => x + ptr_size);
}
@@ -400,7 +443,7 @@ function print_object_prev(addr, max_slots = 100) {
}
else {
print(
- `found object: ${hex(obj_addr + 1)} : ${hex(poi(obj_addr))}`);
+ `found object: ${hex(obj_addr + 1)} : ${hex(poim(obj_addr))}`);
}
}
@@ -412,7 +455,7 @@ function print_object_next(addr, max_slots = 100) {
}
else {
print(
- `found object: ${hex(obj_addr + 1)} : ${hex(poi(obj_addr))}`);
+ `found object: ${hex(obj_addr + 1)} : ${hex(poim(obj_addr))}`);
}
}
@@ -422,10 +465,11 @@ function print_objects_in_range(start, end){
if (!Number.isSafeInteger(int(start)) || !Number.isSafeInteger(int(end))) {
return;
}
-
const ptr_size = pointer_size();
+ if (start < ptr_size || end <= start) return;
+
let iters = (end - start) / ptr_size;
- let cur = start;
+ let cur = start - ptr_size;
print(`===============================================`);
print(`objects in range ${hex(start)} - ${hex(end)}`);
print(`===============================================`);
@@ -434,7 +478,7 @@ function print_objects_in_range(start, end){
let obj = find_object_next(cur, iters);
if (obj) {
count++;
- print(`${hex(obj + 1)} : ${hex(poi(obj))}`);
+ print(`${hex(obj + 1)} : ${hex(poim(obj))}`);
iters = (end - cur) / ptr_size;
}
cur = obj + ptr_size;
@@ -454,10 +498,10 @@ function print_objects_tree(root, depth_limit) {
let path = [];
function impl(obj, depth, depth_limit) {
- const ptr_size = pointer_size();
+ const ptr_size = tagged_size();
// print the current object and its map pointer
const this_obj =
- `${" ".repeat(2 * depth)}${hex(obj)} : ${hex(poi(obj - 1))}`;
+ `${" ".repeat(2 * depth)}${hex(obj)} : ${hex(poim(obj - 1))}`;
const cutoff = depth_limit && depth == depth_limit - 1;
print(`${this_obj}${cutoff ? " (...)" : ""}`);
if (cutoff) return;
@@ -472,7 +516,7 @@ function print_objects_tree(root, depth_limit) {
let seen = new Set(path);
while (!is_likely_object(cur + 1) && iter < 100) {
iter++;
- let field = poi(cur);
+ let field = poim(cur);
if (is_likely_object(field)) {
if (seen.has(field)) {
print(
@@ -491,7 +535,7 @@ function print_objects_tree(root, depth_limit) {
}
/*-----------------------------------------------------------------------------
- Memory in each Space is organized into a linked list of memory chunks
+ Memory spaces
-----------------------------------------------------------------------------*/
const NEVER_EVACUATE = 1 << 7; // see src\heap\spaces.h
@@ -564,12 +608,6 @@ function find_chunk(address) {
return undefined;
}
-/*-----------------------------------------------------------------------------
- Print memory chunks from spaces in the current Heap
- 'isolate_address' should be an int (so in hex must include '0x' prefix).
- 'space': space separated string containing "all", "old", "new", "map",
- "code", "ro [readonly]", "lo [large]", "nlo [newlarge]"
------------------------------------------------------------------------------*/
function print_memory(space = "all") {
if (isolate_address == 0) {
print("Please call !set_iso(isolate_address) first.");
@@ -622,16 +660,13 @@ function print_memory(space = "all") {
}
}
-/*-----------------------------------------------------------------------------
- 'isolate_address' and 'address' should be ints (so in hex must include '0x'
- prefix).
------------------------------------------------------------------------------*/
function print_owning_space(address) {
if (isolate_address == 0) {
print("Please call !set_iso(isolate_address) first.");
return;
}
+ address = decomp(address);
let c = find_chunk(address);
if (c) {
print(`${hex(address)} is in ${c.space} (chunk: ${hex(c.address)})`);
@@ -642,7 +677,7 @@ function print_owning_space(address) {
}
/*-----------------------------------------------------------------------------
-
+ Handles
-----------------------------------------------------------------------------*/
function print_handles_data(print_handles = false) {
if (isolate_address == 0) {
@@ -705,6 +740,9 @@ function print_handles_data(print_handles = false) {
}
}
+/*-----------------------------------------------------------------------------
+ dp
+-----------------------------------------------------------------------------*/
function pad_right(addr) {
let addr_hex = hex(addr);
return `${addr_hex}${" ".repeat(pointer_size() * 2 + 2 - addr_hex.length)}`;
@@ -721,26 +759,109 @@ function dp(addr, count = 10) {
return;
}
- const ptr_size = pointer_size();
+ const ptr_size = tagged_size();
let aligned_addr = addr - (addr % ptr_size);
- let val = poi(aligned_addr);
+ let val = poim(aligned_addr);
let iter = 0;
while (val && iter < count) {
- const augm_map = is_map(val) ? "map" : "";
- const augm_obj = is_likely_object(val) && !is_map(val) ? "obj" : "";
- const augm_other = !is_map(val) && !is_likely_object(val) ? "val" : "";
- let c = find_chunk(val);
+ const map = is_map(val);
+ const obj = is_likely_object(val) && !map;
+
+ const augm_map = map ? "map" : "";
+ const augm_obj = obj ? "obj" : "";
+ const augm_other = !map && !obj ? "val" : "";
+
+ let c = find_chunk(decomp(val));
const augm_space = c ? ` in ${c.space}` : "";
const augm = `${augm_map}${augm_obj}${augm_other}${augm_space}`;
- print(`${pad_right(aligned_addr)} ${pad_right(val)} ${augm}`);
+ const full_ptr = using_ptr_compr ?
+ pad_right((map || obj) ? decomp(val) : val) : "";
+ print(`${pad_right(aligned_addr)} ${pad_right(val)} ${full_ptr} ${augm}`);
aligned_addr += ptr_size;
- val = poi(aligned_addr);
+ val = poim(aligned_addr);
iter++;
}
}
+/*-----------------------------------------------------------------------------
+ Remembered Sets
+-----------------------------------------------------------------------------*/
+// set ids: 0 = OLD_TO_NEW, 1 = 0 = OLD_TO_OLD
+function print_remembered_set(chunk_addr, set_id = 0) {
+ if (!chunk_addr) {
+ if (isolate_address == 0) {
+ print("Please call !set_iso(isolate_address) or provide chunk address.");
+ return;
+ }
+
+ let iso = cast(isolate_address, "v8::internal::Isolate");
+ let h = iso.heap_;
+ let chunks = [];
+ get_chunks_space('old', h.old_space_.memory_chunk_list_.front_, chunks);
+ get_chunks_space('lo', h.lo_space_.memory_chunk_list_.front_, chunks);
+ for (let c of chunks) {
+ try {
+ print_remembered_set(c.address);
+ }
+ catch (e) {
+ print(`failed to process chunk ${hex(c.address)} due to ${e.message}`);
+ }
+ }
+ return;
+ }
+
+ print(`Remembered set in chunk ${hex(chunk_addr)}`);
+ let chunk = cast(chunk_addr, "v8::internal::MemoryChunk");
+
+ // chunk.slot_set_ is an array of SlotSet's. For standard pages there is 0 or
+ // 1 item in the array, but for large pages there will be more.
+ const page_size = 256 * 1024;
+ const sets_count = Math.floor((chunk.size_ + page_size - 1) / page_size);
+ let rs = chunk.slot_set_[set_id];
+ if (rs.isNull) {
+ print(` <empty>`);
+ return;
+ }
+ if (rs[0].page_start_ != chunk_addr) {
+ print(`page_start_ [${hex(rs.page_start_)}] doesn't match chunk_addr!`);
+ return;
+ }
+
+ const ptr_size = tagged_size();
+ let count = 0;
+ for (let s = 0; s < sets_count; s++){
+ const buckets_count = rs[s].buckets_.Count();
+ for (let b = 0; b < buckets_count; b++) {
+ let bucket = rs[s].buckets_[b];
+ if (bucket.isNull) continue;
+ // there are 32 cells in each bucket, cell's size is 32 bits
+ print(` bucket ${hex(bucket.address.asNumber())}:`);
+ const first_cell = bucket.address.asNumber();
+ for (let c = 0; c < 32; c++) {
+ let cell = host.memory.readMemoryValues(
+ first_cell + c * 4, 1, 4 /*size to read*/)[0];
+ if (cell == 0) continue;
+ let mask = 1;
+ for (let bit = 0; bit < 32; bit++){
+ if (cell & mask) {
+ count++;
+ const slot_offset = (b * 32 * 32 + c * 32 + bit) * ptr_size;
+ const slot = rs[s].page_start_ + slot_offset;
+ print(` ${hex(slot)} -> ${hex(poim(slot))}`);
+ }
+ mask = mask << 1;
+ }
+ }
+ }
+ }
+
+ if (count == 0) print(` <empty>`);
+ else print(` ${count} remembered pointers in chunk ${hex(chunk_addr)}`);
+}
+
+
/*=============================================================================
Initialize short aliased names for the most common commands
=============================================================================*/
@@ -749,7 +870,6 @@ function initializeScript() {
new host.functionAlias(help, "help"),
new host.functionAlias(print_object_from_handle, "jlh"),
new host.functionAlias(print_object, "job"),
- new host.functionAlias(print_objects_array, "jobs"),
new host.functionAlias(print_js_stack, "jst"),
new host.functionAlias(set_isolate_address, "set_iso"),
@@ -757,6 +877,7 @@ function initializeScript() {
new host.functionAlias(print_memory, "mem"),
new host.functionAlias(print_owning_space, "where"),
new host.functionAlias(print_handles_data, "handles"),
+ new host.functionAlias(print_remembered_set, "rs"),
new host.functionAlias(print_object_prev, "jo_prev"),
new host.functionAlias(print_object_next, "jo_next"),